aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/DreamItGetIT/statuscake/Gopkg.lock13
-rw-r--r--vendor/github.com/DreamItGetIT/statuscake/contactGroups.go149
-rw-r--r--vendor/github.com/DreamItGetIT/statuscake/responses.go2
-rw-r--r--vendor/github.com/DreamItGetIT/statuscake/ssl.go273
-rw-r--r--vendor/github.com/DreamItGetIT/statuscake/tests.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go23
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go31
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go11
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/logger.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go9
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go6
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go15
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go3
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go97
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go65
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go34
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go29
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go650
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go17
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go11
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go45
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go15
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request.go92
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go5
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go258
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go59
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/session.go226
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go339
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go20
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/types.go20
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/version.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go6
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go296
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go250
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go77
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go20
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go6
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go19
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/api.go590
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go3
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go3
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/sse.go64
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go34
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/api.go1126
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/doc.go76
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/errors.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go96
-rw-r--r--vendor/github.com/google/go-cmp/cmp/compare.go557
-rw-r--r--vendor/github.com/google/go-cmp/cmp/export_panic.go (renamed from vendor/github.com/google/go-cmp/cmp/unsafe_panic.go)6
-rw-r--r--vendor/github.com/google/go-cmp/cmp/export_unsafe.go (renamed from vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go)8
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go2
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go4
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go31
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go9
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go10
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go10
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/function/func.go64
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/value/format.go277
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go23
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go26
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/value/sort.go9
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/value/zero.go45
-rw-r--r--vendor/github.com/google/go-cmp/cmp/options.go255
-rw-r--r--vendor/github.com/google/go-cmp/cmp/path.go339
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report.go51
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_compare.go296
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_reflect.go279
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_slices.go333
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_text.go382
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_value.go121
-rw-r--r--vendor/github.com/google/go-cmp/cmp/reporter.go53
-rw-r--r--vendor/github.com/google/go-querystring/LICENSE27
-rw-r--r--vendor/github.com/google/go-querystring/query/encode.go320
-rw-r--r--vendor/github.com/hashicorp/go-getter/checksum.go30
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_bitbucket.go2
-rw-r--r--vendor/github.com/hashicorp/go-plugin/client.go20
-rw-r--r--vendor/github.com/hashicorp/go-plugin/server.go32
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md2
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go31
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go20
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go8
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md2
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/structure.go6
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/spec.md4
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/structure.go4
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go2
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/format.go35
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/parse_ref.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/plan.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/state.go160
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_walk.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/versions.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/config/providers.go42
-rw-r--r--vendor/github.com/hashicorp/terraform/config/raw_config.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/config_build.go1
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/getter.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/resource.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/version_constraint.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/schema.go75
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/shims.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/getter.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go668
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto351
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/data.go1
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/eval.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/collection.go30
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/functions.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/get.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/providers/provider.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/states/state_deepcopy.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version2.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/diff.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_apply.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_diff.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go85
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_variable.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaluate.go53
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/interpolate.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/provider_mock.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_address.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/util.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/version/version.go2
-rw-r--r--vendor/github.com/satori/go.uuid/.travis.yml23
-rw-r--r--vendor/github.com/satori/go.uuid/LICENSE20
-rw-r--r--vendor/github.com/satori/go.uuid/README.md65
-rw-r--r--vendor/github.com/satori/go.uuid/codec.go206
-rw-r--r--vendor/github.com/satori/go.uuid/generator.go239
-rw-r--r--vendor/github.com/satori/go.uuid/sql.go78
-rw-r--r--vendor/github.com/satori/go.uuid/uuid.go161
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/.travis.yml5
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md10
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/LICENSE201
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml31
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/NOTICE20
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/apic.go739
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/converter.go69
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go57
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/decode.go261
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/emitterc.go1685
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/encode.go189
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/error.go97
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/go.mod3
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/go.sum18
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/implied_type.go268
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/parserc.go1095
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/readerc.go412
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/resolve.go288
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/scannerc.go2696
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/writerc.go26
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/yaml.go215
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/yamlh.go738
-rw-r--r--vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go173
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/path.go42
-rw-r--r--vendor/golang.org/x/crypto/openpgp/keys.go14
-rw-r--r--vendor/golang.org/x/crypto/openpgp/packet/private_key.go26
-rw-r--r--vendor/modules.txt31
199 files changed, 18358 insertions, 3159 deletions
diff --git a/vendor/github.com/DreamItGetIT/statuscake/Gopkg.lock b/vendor/github.com/DreamItGetIT/statuscake/Gopkg.lock
index c5b189e..b433daf 100644
--- a/vendor/github.com/DreamItGetIT/statuscake/Gopkg.lock
+++ b/vendor/github.com/DreamItGetIT/statuscake/Gopkg.lock
@@ -18,6 +18,14 @@
18 version = "v1.1.1" 18 version = "v1.1.1"
19 19
20[[projects]] 20[[projects]]
21 digest = "1:a63cff6b5d8b95638bfe300385d93b2a6d9d687734b863da8e09dc834510a690"
22 name = "github.com/google/go-querystring"
23 packages = ["query"]
24 pruneopts = "UT"
25 revision = "44c6ddd0a2342c386950e880b658017258da92fc"
26 version = "v1.0.0"
27
28[[projects]]
21 digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" 29 digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
22 name = "github.com/pmezard/go-difflib" 30 name = "github.com/pmezard/go-difflib"
23 packages = ["difflib"] 31 packages = ["difflib"]
@@ -30,7 +38,7 @@
30 name = "github.com/stretchr/testify" 38 name = "github.com/stretchr/testify"
31 packages = [ 39 packages = [
32 "assert", 40 "assert",
33 "require", 41 "require"
34 ] 42 ]
35 pruneopts = "UT" 43 pruneopts = "UT"
36 revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" 44 revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
@@ -41,8 +49,9 @@
41 analyzer-version = 1 49 analyzer-version = 1
42 input-imports = [ 50 input-imports = [
43 "github.com/DreamItGetIT/statuscake", 51 "github.com/DreamItGetIT/statuscake",
52 "github.com/google/go-querystring/query",
44 "github.com/stretchr/testify/assert", 53 "github.com/stretchr/testify/assert",
45 "github.com/stretchr/testify/require", 54 "github.com/stretchr/testify/require"
46 ] 55 ]
47 solver-name = "gps-cdcl" 56 solver-name = "gps-cdcl"
48 solver-version = 1 57 solver-version = 1
diff --git a/vendor/github.com/DreamItGetIT/statuscake/contactGroups.go b/vendor/github.com/DreamItGetIT/statuscake/contactGroups.go
new file mode 100644
index 0000000..437fe37
--- /dev/null
+++ b/vendor/github.com/DreamItGetIT/statuscake/contactGroups.go
@@ -0,0 +1,149 @@
1package statuscake
2
3import (
4 "encoding/json"
5 "fmt"
6 "net/url"
7 "strings"
8 "github.com/google/go-querystring/query"
9)
10
11//ContactGroup represent the data received by the API with GET
12type ContactGroup struct {
13 GroupName string `json:"GroupName" url:"GroupName,omitempty"`
14 Emails []string `json:"Emails"`
15 EmailsPut string `url:"Email,omitempty"`
16 Mobiles string `json:"Mobiles" url:"Mobile,omitempty"`
17 Boxcar string `json:"Boxcar" url:"Boxcar,omitempty"`
18 Pushover string `json:"Pushover" url:"Pushover,omitempty"`
19 ContactID int `json:"ContactID" url:"ContactID,omitempty"`
20 DesktopAlert string `json:"DesktopAlert" url:"DesktopAlert,omitempty"`
21 PingURL string `json:"PingURL" url:"PingURL,omitempty"`
22
23}
24
25type Response struct {
26 Success bool `json:"Success"`
27 Message string `json:"Message"`
28 InsertID int `json:"InsertID"`
29}
30
31//ContactGroups represent the actions done wit the API
32type ContactGroups interface {
33 All() ([]*ContactGroup, error)
34 Detail(int) (*ContactGroup, error)
35 Update(*ContactGroup) (*ContactGroup, error)
36 Delete(int) error
37 Create(*ContactGroup) (*ContactGroup, error)
38}
39
40func findContactGroup(responses []*ContactGroup, id int) (*ContactGroup, error) {
41 var response *ContactGroup
42 for _, elem := range responses {
43 if (*elem).ContactID == id {
44 return elem, nil
45 }
46 }
47 return response, fmt.Errorf("%d Not found", id)
48}
49
50type contactGroups struct{
51 client apiClient
52}
53
54//NewContactGroups return a new ssls
55func NewContactGroups(c apiClient) ContactGroups {
56 return &contactGroups{
57 client: c,
58 }
59}
60
61//All return a list of all the ContactGroup from the API
62func (tt *contactGroups) All() ([]*ContactGroup, error) {
63 rawResponse, err := tt.client.get("/ContactGroups", nil)
64 if err != nil {
65 return nil, fmt.Errorf("Error getting StatusCake contactGroups: %s", err.Error())
66 }
67 var getResponse []*ContactGroup
68 err = json.NewDecoder(rawResponse.Body).Decode(&getResponse)
69 if err != nil {
70 return nil, err
71 }
72 return getResponse, err
73}
74
75//Detail return the ContactGroup corresponding to the id
76func (tt *contactGroups) Detail(id int) (*ContactGroup, error) {
77 responses, err := tt.All()
78 if err != nil {
79 return nil, err
80 }
81 myContactGroup, errF := findContactGroup(responses, id)
82 if errF != nil {
83 return nil, errF
84 }
85 return myContactGroup, nil
86}
87
88//Update update the API with cg and create one if cg.ContactID=0 then return the corresponding ContactGroup
89func (tt *contactGroups) Update(cg *ContactGroup) (*ContactGroup, error) {
90
91 if(cg.ContactID == 0){
92 return tt.Create(cg)
93 }
94 cg.EmailsPut=strings.Join(cg.Emails,",")
95 var v url.Values
96
97 v, _ = query.Values(*cg)
98
99 rawResponse, err := tt.client.put("/ContactGroups/Update", v)
100 if err != nil {
101 return nil, fmt.Errorf("Error creating StatusCake ContactGroup: %s", err.Error())
102 }
103
104 var response Response
105 err = json.NewDecoder(rawResponse.Body).Decode(&response)
106 if err != nil {
107 return nil, err
108 }
109
110 if !response.Success {
111 return nil, fmt.Errorf("%s", response.Message)
112 }
113
114 return cg, nil
115}
116
117//Delete delete the ContactGroup which ID is id
118func (tt *contactGroups) Delete(id int) error {
119 _, err := tt.client.delete("/ContactGroups/Update", url.Values{"ContactID": {fmt.Sprint(id)}})
120 return err
121}
122
123//CreatePartial create the ContactGroup whith the data in cg and return the ContactGroup created
124func (tt *contactGroups) Create(cg *ContactGroup) (*ContactGroup, error) {
125 cg.ContactID=0
126 cg.EmailsPut=strings.Join(cg.Emails,",")
127 var v url.Values
128 v, _ = query.Values(*cg)
129
130 rawResponse, err := tt.client.put("/ContactGroups/Update", v)
131 if err != nil {
132 return nil, fmt.Errorf("Error creating StatusCake ContactGroup: %s", err.Error())
133 }
134
135 var response Response
136 err = json.NewDecoder(rawResponse.Body).Decode(&response)
137 if err != nil {
138 return nil, err
139 }
140
141 if !response.Success {
142 return nil, fmt.Errorf("%s", response.Message)
143 }
144
145 cg.ContactID = response.InsertID
146
147 return cg,nil
148}
149
diff --git a/vendor/github.com/DreamItGetIT/statuscake/responses.go b/vendor/github.com/DreamItGetIT/statuscake/responses.go
index ec74a63..553cb95 100644
--- a/vendor/github.com/DreamItGetIT/statuscake/responses.go
+++ b/vendor/github.com/DreamItGetIT/statuscake/responses.go
@@ -64,6 +64,7 @@ type detailResponse struct {
64 EnableSSLWarning bool `json:"EnableSSLWarning"` 64 EnableSSLWarning bool `json:"EnableSSLWarning"`
65 FollowRedirect bool `json:"FollowRedirect"` 65 FollowRedirect bool `json:"FollowRedirect"`
66 StatusCodes []string `json:"StatusCodes"` 66 StatusCodes []string `json:"StatusCodes"`
67 Tags []string `json:"Tags"`
67} 68}
68 69
69func (d *detailResponse) test() *Test { 70func (d *detailResponse) test() *Test {
@@ -100,5 +101,6 @@ func (d *detailResponse) test() *Test {
100 EnableSSLAlert: d.EnableSSLWarning, 101 EnableSSLAlert: d.EnableSSLWarning,
101 FollowRedirect: d.FollowRedirect, 102 FollowRedirect: d.FollowRedirect,
102 StatusCodes: strings.Join(d.StatusCodes[:], ","), 103 StatusCodes: strings.Join(d.StatusCodes[:], ","),
104 TestTags: d.Tags,
103 } 105 }
104} 106}
diff --git a/vendor/github.com/DreamItGetIT/statuscake/ssl.go b/vendor/github.com/DreamItGetIT/statuscake/ssl.go
new file mode 100644
index 0000000..3f73d8d
--- /dev/null
+++ b/vendor/github.com/DreamItGetIT/statuscake/ssl.go
@@ -0,0 +1,273 @@
1package statuscake
2
3import (
4 "encoding/json"
5 "fmt"
6 "net/url"
7 "strings"
8 "strconv"
9
10 "github.com/google/go-querystring/query"
11)
12
13//Ssl represent the data received by the API with GET
14type Ssl struct {
15 ID string `json:"id" url:"id,omitempty"`
16 Domain string `json:"domain" url:"domain,omitempty"`
17 Checkrate int `json:"checkrate" url:"checkrate,omitempty"`
18 ContactGroupsC string ` url:"contact_groups,omitempty"`
19 AlertAt string `json:"alert_at" url:"alert_at,omitempty"`
20 AlertReminder bool `json:"alert_reminder" url:"alert_expiry,omitempty"`
21 AlertExpiry bool `json:"alert_expiry" url:"alert_reminder,omitempty"`
22 AlertBroken bool `json:"alert_broken" url:"alert_broken,omitempty"`
23 AlertMixed bool `json:"alert_mixed" url:"alert_mixed,omitempty"`
24 Paused bool `json:"paused"`
25 IssuerCn string `json:"issuer_cn"`
26 CertScore string `json:"cert_score"`
27 CipherScore string `json:"cipher_score"`
28 CertStatus string `json:"cert_status"`
29 Cipher string `json:"cipher"`
30 ValidFromUtc string `json:"valid_from_utc"`
31 ValidUntilUtc string `json:"valid_until_utc"`
32 MixedContent []map[string]string `json:"mixed_content"`
33 Flags map[string]bool `json:"flags"`
34 ContactGroups []string `json:"contact_groups"`
35 LastReminder int `json:"last_reminder"`
36 LastUpdatedUtc string `json:"last_updated_utc"`
37}
38
39//PartialSsl represent a ssl test creation or modification
40type PartialSsl struct {
41 ID int
42 Domain string
43 Checkrate string
44 ContactGroupsC string
45 AlertAt string
46 AlertExpiry bool
47 AlertReminder bool
48 AlertBroken bool
49 AlertMixed bool
50}
51
52type createSsl struct {
53 ID int `url:"id,omitempty"`
54 Domain string `url:"domain" json:"domain"`
55 Checkrate string `url:"checkrate" json:"checkrate"`
56 ContactGroupsC string `url:"contact_groups" json:"contact_groups"`
57 AlertAt string `url:"alert_at" json:"alert_at"`
58 AlertExpiry bool `url:"alert_expiry" json:"alert_expiry"`
59 AlertReminder bool `url:"alert_reminder" json:"alert_reminder"`
60 AlertBroken bool `url:"alert_broken" json:"alert_broken"`
61 AlertMixed bool `url:"alert_mixed" json:"alert_mixed"`
62}
63
64type updateSsl struct {
65 ID int `url:"id"`
66 Domain string `url:"domain" json:"domain"`
67 Checkrate string `url:"checkrate" json:"checkrate"`
68 ContactGroupsC string `url:"contact_groups" json:"contact_groups"`
69 AlertAt string `url:"alert_at" json:"alert_at"`
70 AlertExpiry bool `url:"alert_expiry" json:"alert_expiry"`
71 AlertReminder bool `url:"alert_reminder" json:"alert_reminder"`
72 AlertBroken bool `url:"alert_broken" json:"alert_broken"`
73 AlertMixed bool `url:"alert_mixed" json:"alert_mixed"`
74}
75
76
77type sslUpdateResponse struct {
78 Success bool `json:"Success"`
79 Message interface{} `json:"Message"`
80}
81
82type sslCreateResponse struct {
83 Success bool `json:"Success"`
84 Message interface{} `json:"Message"`
85 Input createSsl `json:"Input"`
86}
87
88//Ssls represent the actions done wit the API
89type Ssls interface {
90 All() ([]*Ssl, error)
91 completeSsl(*PartialSsl) (*Ssl, error)
92 Detail(string) (*Ssl, error)
93 Update(*PartialSsl) (*Ssl, error)
94 UpdatePartial(*PartialSsl) (*PartialSsl, error)
95 Delete(ID string) error
96 CreatePartial(*PartialSsl) (*PartialSsl, error)
97 Create(*PartialSsl) (*Ssl, error)
98}
99
100func consolidateSsl(s *Ssl) {
101 (*s).ContactGroupsC = strings.Trim(strings.Join(strings.Fields(fmt.Sprint((*s).ContactGroups)), ","), "[]")
102}
103
104func findSsl(responses []*Ssl, id string) (*Ssl, error) {
105 var response *Ssl
106 for _, elem := range responses {
107 if (*elem).ID == id {
108 return elem, nil
109 }
110 }
111 return response, fmt.Errorf("%s Not found", id)
112}
113
114func (tt *ssls) completeSsl(s *PartialSsl) (*Ssl, error) {
115 full, err := tt.Detail(strconv.Itoa((*s).ID))
116 if err != nil {
117 return nil, err
118 }
119 (*full).ContactGroups = strings.Split((*s).ContactGroupsC,",")
120 return full, nil
121}
122
123//Partial return a PartialSsl corresponding to the Ssl
124func Partial(s *Ssl) (*PartialSsl,error) {
125 if s==nil {
126 return nil,fmt.Errorf("s is nil")
127 }
128 id,err:=strconv.Atoi(s.ID)
129 if(err!=nil){
130 return nil,err
131 }
132 return &PartialSsl{
133 ID: id,
134 Domain: s.Domain,
135 Checkrate: strconv.Itoa(s.Checkrate),
136 ContactGroupsC: s.ContactGroupsC,
137 AlertReminder: s.AlertReminder,
138 AlertExpiry: s.AlertExpiry,
139 AlertBroken: s.AlertBroken,
140 AlertMixed: s.AlertMixed,
141 AlertAt: s.AlertAt,
142 },nil
143
144}
145
146type ssls struct {
147 client apiClient
148}
149
150//NewSsls return a new ssls
151func NewSsls(c apiClient) Ssls {
152 return &ssls{
153 client: c,
154 }
155}
156
157//All return a list of all the ssl from the API
158func (tt *ssls) All() ([]*Ssl, error) {
159 rawResponse, err := tt.client.get("/SSL", nil)
160 if err != nil {
161 return nil, fmt.Errorf("Error getting StatusCake Ssl: %s", err.Error())
162 }
163 var getResponse []*Ssl
164 err = json.NewDecoder(rawResponse.Body).Decode(&getResponse)
165 if err != nil {
166 return nil, err
167 }
168
169 for ssl := range getResponse {
170 consolidateSsl(getResponse[ssl])
171 }
172
173 return getResponse, err
174}
175
176//Detail return the ssl corresponding to the id
177func (tt *ssls) Detail(id string) (*Ssl, error) {
178 responses, err := tt.All()
179 if err != nil {
180 return nil, err
181 }
182 mySsl, errF := findSsl(responses, id)
183 if errF != nil {
184 return nil, errF
185 }
186 return mySsl, nil
187}
188
189//Update update the API with s and create one if s.ID=0 then return the corresponding Ssl
190func (tt *ssls) Update(s *PartialSsl) (*Ssl, error) {
191 var err error
192 s, err = tt.UpdatePartial(s)
193 if err!= nil {
194 return nil, err
195 }
196 return tt.completeSsl(s)
197}
198
199//UpdatePartial update the API with s and create one if s.ID=0 then return the corresponding PartialSsl
200func (tt *ssls) UpdatePartial(s *PartialSsl) (*PartialSsl, error) {
201
202 if((*s).ID == 0){
203 return tt.CreatePartial(s)
204 }
205 var v url.Values
206
207 v, _ = query.Values(updateSsl(*s))
208
209 rawResponse, err := tt.client.put("/SSL/Update", v)
210 if err != nil {
211 return nil, fmt.Errorf("Error creating StatusCake Ssl: %s", err.Error())
212 }
213
214 var updateResponse sslUpdateResponse
215 err = json.NewDecoder(rawResponse.Body).Decode(&updateResponse)
216 if err != nil {
217 return nil, err
218 }
219
220 if !updateResponse.Success {
221 return nil, fmt.Errorf("%s", updateResponse.Message.(string))
222 }
223
224
225 return s, nil
226}
227
228//Delete delete the ssl which ID is id
229func (tt *ssls) Delete(id string) error {
230 _, err := tt.client.delete("/SSL/Update", url.Values{"id": {fmt.Sprint(id)}})
231 if err != nil {
232 return err
233 }
234
235 return nil
236}
237
238//Create create the ssl whith the data in s and return the Ssl created
239func (tt *ssls) Create(s *PartialSsl) (*Ssl, error) {
240 var err error
241 s, err = tt.CreatePartial(s)
242 if err!= nil {
243 return nil, err
244 }
245 return tt.completeSsl(s)
246}
247
248//CreatePartial create the ssl whith the data in s and return the PartialSsl created
249func (tt *ssls) CreatePartial(s *PartialSsl) (*PartialSsl, error) {
250 (*s).ID=0
251 var v url.Values
252 v, _ = query.Values(createSsl(*s))
253
254 rawResponse, err := tt.client.put("/SSL/Update", v)
255 if err != nil {
256 return nil, fmt.Errorf("Error creating StatusCake Ssl: %s", err.Error())
257 }
258
259 var createResponse sslCreateResponse
260 err = json.NewDecoder(rawResponse.Body).Decode(&createResponse)
261 if err != nil {
262 return nil, err
263 }
264
265 if !createResponse.Success {
266 return nil, fmt.Errorf("%s", createResponse.Message.(string))
267 }
268 *s = PartialSsl(createResponse.Input)
269 (*s).ID = int(createResponse.Message.(float64))
270
271 return s,nil
272}
273
diff --git a/vendor/github.com/DreamItGetIT/statuscake/tests.go b/vendor/github.com/DreamItGetIT/statuscake/tests.go
index 2a2383d..f92d29f 100644
--- a/vendor/github.com/DreamItGetIT/statuscake/tests.go
+++ b/vendor/github.com/DreamItGetIT/statuscake/tests.go
@@ -99,7 +99,7 @@ type Test struct {
99 TestTags []string `json:"TestTags" querystring:"TestTags"` 99 TestTags []string `json:"TestTags" querystring:"TestTags"`
100 100
101 // Comma Seperated List of StatusCodes to Trigger Error on (on Update will replace, so send full list each time) 101 // Comma Seperated List of StatusCodes to Trigger Error on (on Update will replace, so send full list each time)
102 StatusCodes string `json:"StatusCodes" querystring:"StatusCodes"` 102 StatusCodes string `json:"StatusCodes" querystring:"StatusCodes" querystringoptions:"omitempty"`
103 103
104 // Set to 1 to enable the Cookie Jar. Required for some redirects. 104 // Set to 1 to enable the Cookie Jar. Required for some redirects.
105 UseJar int `json:"UseJar" querystring:"UseJar"` 105 UseJar int `json:"UseJar" querystring:"UseJar"`
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
index 56fdfc2..99849c0 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
@@ -138,8 +138,27 @@ type RequestFailure interface {
138 RequestID() string 138 RequestID() string
139} 139}
140 140
141// NewRequestFailure returns a new request error wrapper for the given Error 141// NewRequestFailure returns a wrapped error with additional information for
142// provided. 142// request status code, and service requestID.
143//
144// Should be used to wrap all request which involve service requests. Even if
145// the request failed without a service response, but had an HTTP status code
146// that may be meaningful.
143func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { 147func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
144 return newRequestError(err, statusCode, reqID) 148 return newRequestError(err, statusCode, reqID)
145} 149}
150
151// UnmarshalError provides the interface for the SDK failing to unmarshal data.
152type UnmarshalError interface {
153 awsError
154 Bytes() []byte
155}
156
157// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding
158// the bytes that fail to unmarshal to the error.
159func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError {
160 return &unmarshalError{
161 awsError: New("UnmarshalError", msg, err),
162 bytes: bytes,
163 }
164}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
index 0202a00..9cf7eaf 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
@@ -1,6 +1,9 @@
1package awserr 1package awserr
2 2
3import "fmt" 3import (
4 "encoding/hex"
5 "fmt"
6)
4 7
5// SprintError returns a string of the formatted error code. 8// SprintError returns a string of the formatted error code.
6// 9//
@@ -119,6 +122,7 @@ type requestError struct {
119 awsError 122 awsError
120 statusCode int 123 statusCode int
121 requestID string 124 requestID string
125 bytes []byte
122} 126}
123 127
124// newRequestError returns a wrapped error with additional information for 128// newRequestError returns a wrapped error with additional information for
@@ -170,6 +174,29 @@ func (r requestError) OrigErrs() []error {
170 return []error{r.OrigErr()} 174 return []error{r.OrigErr()}
171} 175}
172 176
177type unmarshalError struct {
178 awsError
179 bytes []byte
180}
181
182// Error returns the string representation of the error.
183// Satisfies the error interface.
184func (e unmarshalError) Error() string {
185 extra := hex.Dump(e.bytes)
186 return SprintError(e.Code(), e.Message(), extra, e.OrigErr())
187}
188
189// String returns the string representation of the error.
190// Alias for Error to satisfy the stringer interface.
191func (e unmarshalError) String() string {
192 return e.Error()
193}
194
195// Bytes returns the bytes that failed to unmarshal.
196func (e unmarshalError) Bytes() []byte {
197 return e.bytes
198}
199
173// An error list that satisfies the golang interface 200// An error list that satisfies the golang interface
174type errorList []error 201type errorList []error
175 202
@@ -181,7 +208,7 @@ func (e errorList) Error() string {
181 // How do we want to handle the array size being zero 208 // How do we want to handle the array size being zero
182 if size := len(e); size > 0 { 209 if size := len(e); size > 0 {
183 for i := 0; i < size; i++ { 210 for i := 0; i < size; i++ {
184 msg += fmt.Sprintf("%s", e[i].Error()) 211 msg += e[i].Error()
185 // We check the next index to see if it is within the slice. 212 // We check the next index to see if it is within the slice.
186 // If it is, then we append a newline. We do this, because unit tests 213 // If it is, then we append a newline. We do this, because unit tests
187 // could be broken with the additional '\n' 214 // could be broken with the additional '\n'
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
index 11c52c3..285e54d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
@@ -185,13 +185,12 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
185// SetValueAtPath sets a value at the case insensitive lexical path inside 185// SetValueAtPath sets a value at the case insensitive lexical path inside
186// of a structure. 186// of a structure.
187func SetValueAtPath(i interface{}, path string, v interface{}) { 187func SetValueAtPath(i interface{}, path string, v interface{}) {
188 if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { 188 rvals := rValuesAtPath(i, path, true, false, v == nil)
189 for _, rval := range rvals { 189 for _, rval := range rvals {
190 if rval.Kind() == reflect.Ptr && rval.IsNil() { 190 if rval.Kind() == reflect.Ptr && rval.IsNil() {
191 continue 191 continue
192 }
193 setValue(rval, v)
194 } 192 }
193 setValue(rval, v)
195 } 194 }
196} 195}
197 196
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
index 7b5e127..8958c32 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
@@ -67,10 +67,14 @@ func logRequest(r *request.Request) {
67 if !bodySeekable { 67 if !bodySeekable {
68 r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) 68 r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
69 } 69 }
70 // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's 70 // Reset the request body because dumpRequest will re-wrap the
71 // Body as a NoOpCloser and will not be reset after read by the HTTP 71 // r.HTTPRequest's Body as a NoOpCloser and will not be reset after
72 // client reader. 72 // read by the HTTP client reader.
73 r.ResetBody() 73 if err := r.Error; err != nil {
74 r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
75 r.ClientInfo.ServiceName, r.Operation.Name, err))
76 return
77 }
74 } 78 }
75 79
76 r.Config.Logger.Log(fmt.Sprintf(logReqMsg, 80 r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
index 894bbc7..4af5921 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -50,9 +50,10 @@ package credentials
50 50
51import ( 51import (
52 "fmt" 52 "fmt"
53 "github.com/aws/aws-sdk-go/aws/awserr"
54 "sync" 53 "sync"
55 "time" 54 "time"
55
56 "github.com/aws/aws-sdk-go/aws/awserr"
56) 57)
57 58
58// AnonymousCredentials is an empty Credential object that can be used as 59// AnonymousCredentials is an empty Credential object that can be used as
@@ -83,6 +84,12 @@ type Value struct {
83 ProviderName string 84 ProviderName string
84} 85}
85 86
87// HasKeys returns if the credentials Value has both AccessKeyID and
88// SecretAccessKey value set.
89func (v Value) HasKeys() bool {
90 return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0
91}
92
86// A Provider is the interface for any component which will provide credentials 93// A Provider is the interface for any component which will provide credentials
87// Value. A provider is required to manage its own Expired state, and what to 94// Value. A provider is required to manage its own Expired state, and what to
88// be expired means. 95// be expired means.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
index 0ed791b..43d4ed3 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -11,6 +11,7 @@ import (
11 "github.com/aws/aws-sdk-go/aws/client" 11 "github.com/aws/aws-sdk-go/aws/client"
12 "github.com/aws/aws-sdk-go/aws/credentials" 12 "github.com/aws/aws-sdk-go/aws/credentials"
13 "github.com/aws/aws-sdk-go/aws/ec2metadata" 13 "github.com/aws/aws-sdk-go/aws/ec2metadata"
14 "github.com/aws/aws-sdk-go/aws/request"
14 "github.com/aws/aws-sdk-go/internal/sdkuri" 15 "github.com/aws/aws-sdk-go/internal/sdkuri"
15) 16)
16 17
@@ -142,7 +143,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
142 } 143 }
143 144
144 if err := s.Err(); err != nil { 145 if err := s.Err(); err != nil {
145 return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) 146 return nil, awserr.New(request.ErrCodeSerialization,
147 "failed to read EC2 instance role from metadata service", err)
146 } 148 }
147 149
148 return credsList, nil 150 return credsList, nil
@@ -164,7 +166,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred
164 respCreds := ec2RoleCredRespBody{} 166 respCreds := ec2RoleCredRespBody{}
165 if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { 167 if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
166 return ec2RoleCredRespBody{}, 168 return ec2RoleCredRespBody{},
167 awserr.New("SerializationError", 169 awserr.New(request.ErrCodeSerialization,
168 fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), 170 fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
169 err) 171 err)
170 } 172 }
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
index ace5131..c2b2c5d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
@@ -39,6 +39,7 @@ import (
39 "github.com/aws/aws-sdk-go/aws/client/metadata" 39 "github.com/aws/aws-sdk-go/aws/client/metadata"
40 "github.com/aws/aws-sdk-go/aws/credentials" 40 "github.com/aws/aws-sdk-go/aws/credentials"
41 "github.com/aws/aws-sdk-go/aws/request" 41 "github.com/aws/aws-sdk-go/aws/request"
42 "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
42) 43)
43 44
44// ProviderName is the name of the credentials provider. 45// ProviderName is the name of the credentials provider.
@@ -174,7 +175,7 @@ func unmarshalHandler(r *request.Request) {
174 175
175 out := r.Data.(*getCredentialsOutput) 176 out := r.Data.(*getCredentialsOutput)
176 if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { 177 if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
177 r.Error = awserr.New("SerializationError", 178 r.Error = awserr.New(request.ErrCodeSerialization,
178 "failed to decode endpoint credentials", 179 "failed to decode endpoint credentials",
179 err, 180 err,
180 ) 181 )
@@ -185,11 +186,15 @@ func unmarshalError(r *request.Request) {
185 defer r.HTTPResponse.Body.Close() 186 defer r.HTTPResponse.Body.Close()
186 187
187 var errOut errorOutput 188 var errOut errorOutput
188 if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { 189 err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body)
189 r.Error = awserr.New("SerializationError", 190 if err != nil {
190 "failed to decode endpoint credentials", 191 r.Error = awserr.NewRequestFailure(
191 err, 192 awserr.New(request.ErrCodeSerialization,
193 "failed to decode error message", err),
194 r.HTTPResponse.StatusCode,
195 r.RequestID,
192 ) 196 )
197 return
193 } 198 }
194 199
195 // Response body format is not consistent between metadata endpoints. 200 // Response body format is not consistent between metadata endpoints.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
index b6dbfd2..2e528d1 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -200,7 +200,7 @@ type AssumeRoleProvider struct {
200 // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must 200 // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must
201 // have a value between 0 and 1. Any other value may lead to expected behavior. 201 // have a value between 0 and 1. Any other value may lead to expected behavior.
202 // With a MaxJitterFrac value of 0, default) will no jitter will be used. 202 // With a MaxJitterFrac value of 0, default) will no jitter will be used.
203 // 203 //
204 // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the 204 // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the
205 // AssumeRole call will be made with an arbitrary Duration between 27m and 205 // AssumeRole call will be made with an arbitrary Duration between 27m and
206 // 30m. 206 // 30m.
@@ -258,7 +258,6 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*
258 258
259// Retrieve generates a new set of temporary credentials using STS. 259// Retrieve generates a new set of temporary credentials using STS.
260func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { 260func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
261
262 // Apply defaults where parameters are not set. 261 // Apply defaults where parameters are not set.
263 if p.RoleSessionName == "" { 262 if p.RoleSessionName == "" {
264 // Try to work out a role name that will hopefully end up unique. 263 // Try to work out a role name that will hopefully end up unique.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
new file mode 100644
index 0000000..20510d9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
@@ -0,0 +1,97 @@
1package stscreds
2
3import (
4 "fmt"
5 "io/ioutil"
6 "strconv"
7 "time"
8
9 "github.com/aws/aws-sdk-go/aws"
10 "github.com/aws/aws-sdk-go/aws/awserr"
11 "github.com/aws/aws-sdk-go/aws/client"
12 "github.com/aws/aws-sdk-go/aws/credentials"
13 "github.com/aws/aws-sdk-go/service/sts"
14 "github.com/aws/aws-sdk-go/service/sts/stsiface"
15)
16
17const (
18 // ErrCodeWebIdentity will be used as an error code when constructing
19 // a new error to be returned during session creation or retrieval.
20 ErrCodeWebIdentity = "WebIdentityErr"
21
22 // WebIdentityProviderName is the web identity provider name
23 WebIdentityProviderName = "WebIdentityCredentials"
24)
25
26// now is used to return a time.Time object representing
27// the current time. This can be used to easily test and
28// compare test values.
29var now = time.Now
30
31// WebIdentityRoleProvider is used to retrieve credentials using
32// an OIDC token.
33type WebIdentityRoleProvider struct {
34 credentials.Expiry
35
36 client stsiface.STSAPI
37 ExpiryWindow time.Duration
38
39 tokenFilePath string
40 roleARN string
41 roleSessionName string
42}
43
44// NewWebIdentityCredentials will return a new set of credentials with a given
45// configuration, role arn, and token file path.
46func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials {
47 svc := sts.New(c)
48 p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path)
49 return credentials.NewCredentials(p)
50}
51
52// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
53// provided stsiface.STSAPI
54func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider {
55 return &WebIdentityRoleProvider{
56 client: svc,
57 tokenFilePath: path,
58 roleARN: roleARN,
59 roleSessionName: roleSessionName,
60 }
61}
62
63// Retrieve will attempt to assume a role from a token which is located at
64// 'WebIdentityTokenFilePath' specified destination and if that is empty an
65// error will be returned.
66func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
67 b, err := ioutil.ReadFile(p.tokenFilePath)
68 if err != nil {
69 errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath)
70 return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err)
71 }
72
73 sessionName := p.roleSessionName
74 if len(sessionName) == 0 {
75 // session name is used to uniquely identify a session. This simply
76 // uses unix time in nanoseconds to uniquely identify sessions.
77 sessionName = strconv.FormatInt(now().UnixNano(), 10)
78 }
79 resp, err := p.client.AssumeRoleWithWebIdentity(&sts.AssumeRoleWithWebIdentityInput{
80 RoleArn: &p.roleARN,
81 RoleSessionName: &sessionName,
82 WebIdentityToken: aws.String(string(b)),
83 })
84 if err != nil {
85 return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err)
86 }
87
88 p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow)
89
90 value := credentials.Value{
91 AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId),
92 SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey),
93 SessionToken: aws.StringValue(resp.Credentials.SessionToken),
94 ProviderName: WebIdentityProviderName,
95 }
96 return value, nil
97}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
index 152d785..25a66d1 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
@@ -1,30 +1,61 @@
1// Package csm provides Client Side Monitoring (CSM) which enables sending metrics 1// Package csm provides the Client Side Monitoring (CSM) client which enables
2// via UDP connection. Using the Start function will enable the reporting of 2// sending metrics via UDP connection to the CSM agent. This package provides
3// metrics on a given port. If Start is called, with different parameters, again, 3// control options, and configuration for the CSM client. The client can be
4// a panic will occur. 4// controlled manually, or automatically via the SDK's Session configuration.
5// 5//
6// Pause can be called to pause any metrics publishing on a given port. Sessions 6// Enabling CSM client via SDK's Session configuration
7// that have had their handlers modified via InjectHandlers may still be used. 7//
8// However, the handlers will act as a no-op meaning no metrics will be published. 8// The CSM client can be enabled automatically via SDK's Session configuration.
9// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT
10// environment variable is set to a non-empty value.
11//
12// The configuration options for the CSM client via the SDK's session
13// configuration are:
14//
15// * AWS_CSM_PORT=<port number>
16// The port number the CSM agent will receive metrics on.
17//
18// * AWS_CSM_HOST=<hostname or ip>
19// The hostname, or IP address the CSM agent will receive metrics on.
20// Without port number.
21//
22// Manually enabling the CSM client
23//
24// The CSM client can be started, paused, and resumed manually. The Start
25// function will enable the CSM client to publish metrics to the CSM agent. It
26// is safe to call Start concurrently, but if Start is called additional times
27// with different ClientID or address it will panic.
9// 28//
10// Example:
11// r, err := csm.Start("clientID", ":31000") 29// r, err := csm.Start("clientID", ":31000")
12// if err != nil { 30// if err != nil {
13// panic(fmt.Errorf("failed starting CSM: %v", err)) 31// panic(fmt.Errorf("failed starting CSM: %v", err))
14// } 32// }
15// 33//
34// When controlling the CSM client manually, you must also inject its request
35// handlers into the SDK's Session configuration for the SDK's API clients to
36// publish metrics.
37//
16// sess, err := session.NewSession(&aws.Config{}) 38// sess, err := session.NewSession(&aws.Config{})
17// if err != nil { 39// if err != nil {
18// panic(fmt.Errorf("failed loading session: %v", err)) 40// panic(fmt.Errorf("failed loading session: %v", err))
19// } 41// }
20// 42//
43// // Add CSM client's metric publishing request handlers to the SDK's
44// // Session Configuration.
21// r.InjectHandlers(&sess.Handlers) 45// r.InjectHandlers(&sess.Handlers)
22// 46//
23// client := s3.New(sess) 47// Controlling CSM client
24// resp, err := client.GetObject(&s3.GetObjectInput{ 48//
25// Bucket: aws.String("bucket"), 49// Once the CSM client has been enabled the Get function will return a Reporter
26// Key: aws.String("key"), 50// value that you can use to pause and resume the metrics published to the CSM
27// }) 51// agent. If Get function is called before the reporter is enabled with the
52// Start function or via SDK's Session configuration nil will be returned.
53//
54// The Pause method can be called to stop the CSM client publishing metrics to
55// the CSM agent. The Continue method will resume metric publishing.
56//
57// // Get the CSM client Reporter.
58// r := csm.Get()
28// 59//
29// // Will pause monitoring 60// // Will pause monitoring
30// r.Pause() 61// r.Pause()
@@ -35,12 +66,4 @@
35// 66//
36// // Resume monitoring 67// // Resume monitoring
37// r.Continue() 68// r.Continue()
38//
39// Start returns a Reporter that is used to enable or disable monitoring. If
40// access to the Reporter is required later, calling Get will return the Reporter
41// singleton.
42//
43// Example:
44// r := csm.Get()
45// r.Continue()
46package csm 69package csm
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
index 2f0c6ea..4b19e28 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
@@ -2,6 +2,7 @@ package csm
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "strings"
5 "sync" 6 "sync"
6) 7)
7 8
@@ -9,19 +10,40 @@ var (
9 lock sync.Mutex 10 lock sync.Mutex
10) 11)
11 12
12// Client side metric handler names
13const ( 13const (
14 APICallMetricHandlerName = "awscsm.SendAPICallMetric" 14 // DefaultPort is used when no port is specified.
15 APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" 15 DefaultPort = "31000"
16
17 // DefaultHost is the host that will be used when none is specified.
18 DefaultHost = "127.0.0.1"
16) 19)
17 20
18// Start will start the a long running go routine to capture 21// AddressWithDefaults returns a CSM address built from the host and port
22// values. If the host or port is not set, default values will be used
23// instead. If host is "localhost" it will be replaced with "127.0.0.1".
24func AddressWithDefaults(host, port string) string {
25 if len(host) == 0 || strings.EqualFold(host, "localhost") {
26 host = DefaultHost
27 }
28
29 if len(port) == 0 {
30 port = DefaultPort
31 }
32
33 // Only IP6 host can contain a colon
34 if strings.Contains(host, ":") {
35 return "[" + host + "]:" + port
36 }
37
38 return host + ":" + port
39}
40
41// Start will start a long running go routine to capture
19// client side metrics. Calling start multiple time will only 42// client side metrics. Calling start multiple time will only
20// start the metric listener once and will panic if a different 43// start the metric listener once and will panic if a different
21// client ID or port is passed in. 44// client ID or port is passed in.
22// 45//
23// Example: 46// r, err := csm.Start("clientID", "127.0.0.1:31000")
24// r, err := csm.Start("clientID", "127.0.0.1:8094")
25// if err != nil { 47// if err != nil {
26// panic(fmt.Errorf("expected no error, but received %v", err)) 48// panic(fmt.Errorf("expected no error, but received %v", err))
27// } 49// }
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
index 0b5571a..c7008d8 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
@@ -10,11 +10,6 @@ import (
10 "github.com/aws/aws-sdk-go/aws/request" 10 "github.com/aws/aws-sdk-go/aws/request"
11) 11)
12 12
13const (
14 // DefaultPort is used when no port is specified
15 DefaultPort = "31000"
16)
17
18// Reporter will gather metrics of API requests made and 13// Reporter will gather metrics of API requests made and
19// send those metrics to the CSM endpoint. 14// send those metrics to the CSM endpoint.
20type Reporter struct { 15type Reporter struct {
@@ -96,7 +91,7 @@ func getMetricException(err awserr.Error) metricException {
96 91
97 switch code { 92 switch code {
98 case "RequestError", 93 case "RequestError",
99 "SerializationError", 94 request.ErrCodeSerialization,
100 request.CanceledErrorCode: 95 request.CanceledErrorCode:
101 return sdkException{ 96 return sdkException{
102 requestException{exception: code, message: msg}, 97 requestException{exception: code, message: msg},
@@ -123,7 +118,7 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) {
123 Type: aws.String("ApiCall"), 118 Type: aws.String("ApiCall"),
124 AttemptCount: aws.Int(r.RetryCount + 1), 119 AttemptCount: aws.Int(r.RetryCount + 1),
125 Region: r.Config.Region, 120 Region: r.Config.Region,
126 Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), 121 Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)),
127 XAmzRequestID: aws.String(r.RequestID), 122 XAmzRequestID: aws.String(r.RequestID),
128 MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), 123 MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
129 } 124 }
@@ -190,8 +185,9 @@ func (rep *Reporter) start() {
190 } 185 }
191} 186}
192 187
193// Pause will pause the metric channel preventing any new metrics from 188// Pause will pause the metric channel preventing any new metrics from being
194// being added. 189// added. It is safe to call concurrently with other calls to Pause, but if
190// called concurently with Continue can lead to unexpected state.
195func (rep *Reporter) Pause() { 191func (rep *Reporter) Pause() {
196 lock.Lock() 192 lock.Lock()
197 defer lock.Unlock() 193 defer lock.Unlock()
@@ -203,8 +199,9 @@ func (rep *Reporter) Pause() {
203 rep.close() 199 rep.close()
204} 200}
205 201
206// Continue will reopen the metric channel and allow for monitoring 202// Continue will reopen the metric channel and allow for monitoring to be
207// to be resumed. 203// resumed. It is safe to call concurrently with other calls to Continue, but
204// if called concurently with Pause can lead to unexpected state.
208func (rep *Reporter) Continue() { 205func (rep *Reporter) Continue() {
209 lock.Lock() 206 lock.Lock()
210 defer lock.Unlock() 207 defer lock.Unlock()
@@ -219,10 +216,18 @@ func (rep *Reporter) Continue() {
219 rep.metricsCh.Continue() 216 rep.metricsCh.Continue()
220} 217}
221 218
219// Client side metric handler names
220const (
221 APICallMetricHandlerName = "awscsm.SendAPICallMetric"
222 APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
223)
224
222// InjectHandlers will will enable client side metrics and inject the proper 225// InjectHandlers will will enable client side metrics and inject the proper
223// handlers to handle how metrics are sent. 226// handlers to handle how metrics are sent.
224// 227//
225// Example: 228// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers
229// multiple times may lead to unexpected behavior, (e.g. duplicate metrics).
230//
226// // Start must be called in order to inject the correct handlers 231// // Start must be called in order to inject the correct handlers
227// r, err := csm.Start("clientID", "127.0.0.1:8094") 232// r, err := csm.Start("clientID", "127.0.0.1:8094")
228// if err != nil { 233// if err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
index d57a1af..2c8d5f5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -82,7 +82,7 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument
82 doc := EC2InstanceIdentityDocument{} 82 doc := EC2InstanceIdentityDocument{}
83 if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { 83 if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
84 return EC2InstanceIdentityDocument{}, 84 return EC2InstanceIdentityDocument{},
85 awserr.New("SerializationError", 85 awserr.New(request.ErrCodeSerialization,
86 "failed to decode EC2 instance identity document", err) 86 "failed to decode EC2 instance identity document", err)
87 } 87 }
88 88
@@ -101,7 +101,7 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
101 info := EC2IAMInfo{} 101 info := EC2IAMInfo{}
102 if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { 102 if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
103 return EC2IAMInfo{}, 103 return EC2IAMInfo{},
104 awserr.New("SerializationError", 104 awserr.New(request.ErrCodeSerialization,
105 "failed to decode EC2 IAM info", err) 105 "failed to decode EC2 IAM info", err)
106 } 106 }
107 107
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
index f4438ea..f0c1d31 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -123,7 +123,7 @@ func unmarshalHandler(r *request.Request) {
123 defer r.HTTPResponse.Body.Close() 123 defer r.HTTPResponse.Body.Close()
124 b := &bytes.Buffer{} 124 b := &bytes.Buffer{}
125 if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { 125 if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
126 r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) 126 r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata respose", err)
127 return 127 return
128 } 128 }
129 129
@@ -136,7 +136,7 @@ func unmarshalError(r *request.Request) {
136 defer r.HTTPResponse.Body.Close() 136 defer r.HTTPResponse.Body.Close()
137 b := &bytes.Buffer{} 137 b := &bytes.Buffer{}
138 if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { 138 if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
139 r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) 139 r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error respose", err)
140 return 140 return
141 } 141 }
142 142
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 50e170e..2e7bd7a 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -27,6 +27,7 @@ const (
27 EuWest1RegionID = "eu-west-1" // EU (Ireland). 27 EuWest1RegionID = "eu-west-1" // EU (Ireland).
28 EuWest2RegionID = "eu-west-2" // EU (London). 28 EuWest2RegionID = "eu-west-2" // EU (London).
29 EuWest3RegionID = "eu-west-3" // EU (Paris). 29 EuWest3RegionID = "eu-west-3" // EU (Paris).
30 MeSouth1RegionID = "me-south-1" // Middle East (Bahrain).
30 SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). 31 SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
31 UsEast1RegionID = "us-east-1" // US East (N. Virginia). 32 UsEast1RegionID = "us-east-1" // US East (N. Virginia).
32 UsEast2RegionID = "us-east-2" // US East (Ohio). 33 UsEast2RegionID = "us-east-2" // US East (Ohio).
@@ -128,6 +129,9 @@ var awsPartition = partition{
128 "eu-west-3": region{ 129 "eu-west-3": region{
129 Description: "EU (Paris)", 130 Description: "EU (Paris)",
130 }, 131 },
132 "me-south-1": region{
133 Description: "Middle East (Bahrain)",
134 },
131 "sa-east-1": region{ 135 "sa-east-1": region{
132 Description: "South America (Sao Paulo)", 136 Description: "South America (Sao Paulo)",
133 }, 137 },
@@ -166,6 +170,7 @@ var awsPartition = partition{
166 "eu-west-1": endpoint{}, 170 "eu-west-1": endpoint{},
167 "eu-west-2": endpoint{}, 171 "eu-west-2": endpoint{},
168 "eu-west-3": endpoint{}, 172 "eu-west-3": endpoint{},
173 "me-south-1": endpoint{},
169 "sa-east-1": endpoint{}, 174 "sa-east-1": endpoint{},
170 "us-east-1": endpoint{}, 175 "us-east-1": endpoint{},
171 "us-east-2": endpoint{}, 176 "us-east-2": endpoint{},
@@ -178,6 +183,7 @@ var awsPartition = partition{
178 Protocols: []string{"https"}, 183 Protocols: []string{"https"},
179 }, 184 },
180 Endpoints: endpoints{ 185 Endpoints: endpoints{
186 "ap-east-1": endpoint{},
181 "ap-northeast-1": endpoint{}, 187 "ap-northeast-1": endpoint{},
182 "ap-northeast-2": endpoint{}, 188 "ap-northeast-2": endpoint{},
183 "ap-south-1": endpoint{}, 189 "ap-south-1": endpoint{},
@@ -270,6 +276,12 @@ var awsPartition = partition{
270 Region: "eu-west-3", 276 Region: "eu-west-3",
271 }, 277 },
272 }, 278 },
279 "me-south-1": endpoint{
280 Hostname: "api.ecr.me-south-1.amazonaws.com",
281 CredentialScope: credentialScope{
282 Region: "me-south-1",
283 },
284 },
273 "sa-east-1": endpoint{ 285 "sa-east-1": endpoint{
274 Hostname: "api.ecr.sa-east-1.amazonaws.com", 286 Hostname: "api.ecr.sa-east-1.amazonaws.com",
275 CredentialScope: credentialScope{ 287 CredentialScope: credentialScope{
@@ -381,6 +393,7 @@ var awsPartition = partition{
381 "eu-west-1": endpoint{}, 393 "eu-west-1": endpoint{},
382 "eu-west-2": endpoint{}, 394 "eu-west-2": endpoint{},
383 "eu-west-3": endpoint{}, 395 "eu-west-3": endpoint{},
396 "me-south-1": endpoint{},
384 "sa-east-1": endpoint{}, 397 "sa-east-1": endpoint{},
385 "us-east-1": endpoint{}, 398 "us-east-1": endpoint{},
386 "us-east-2": endpoint{}, 399 "us-east-2": endpoint{},
@@ -409,6 +422,7 @@ var awsPartition = partition{
409 "eu-west-1": endpoint{}, 422 "eu-west-1": endpoint{},
410 "eu-west-2": endpoint{}, 423 "eu-west-2": endpoint{},
411 "eu-west-3": endpoint{}, 424 "eu-west-3": endpoint{},
425 "me-south-1": endpoint{},
412 "sa-east-1": endpoint{}, 426 "sa-east-1": endpoint{},
413 "us-east-1": endpoint{}, 427 "us-east-1": endpoint{},
414 "us-east-2": endpoint{}, 428 "us-east-2": endpoint{},
@@ -416,6 +430,24 @@ var awsPartition = partition{
416 "us-west-2": endpoint{}, 430 "us-west-2": endpoint{},
417 }, 431 },
418 }, 432 },
433 "appmesh": service{
434
435 Endpoints: endpoints{
436 "ap-northeast-1": endpoint{},
437 "ap-northeast-2": endpoint{},
438 "ap-south-1": endpoint{},
439 "ap-southeast-1": endpoint{},
440 "ap-southeast-2": endpoint{},
441 "ca-central-1": endpoint{},
442 "eu-central-1": endpoint{},
443 "eu-west-1": endpoint{},
444 "eu-west-2": endpoint{},
445 "us-east-1": endpoint{},
446 "us-east-2": endpoint{},
447 "us-west-1": endpoint{},
448 "us-west-2": endpoint{},
449 },
450 },
419 "appstream2": service{ 451 "appstream2": service{
420 Defaults: endpoint{ 452 Defaults: endpoint{
421 Protocols: []string{"https"}, 453 Protocols: []string{"https"},
@@ -460,6 +492,7 @@ var awsPartition = partition{
460 "ap-southeast-2": endpoint{}, 492 "ap-southeast-2": endpoint{},
461 "ca-central-1": endpoint{}, 493 "ca-central-1": endpoint{},
462 "eu-central-1": endpoint{}, 494 "eu-central-1": endpoint{},
495 "eu-north-1": endpoint{},
463 "eu-west-1": endpoint{}, 496 "eu-west-1": endpoint{},
464 "eu-west-2": endpoint{}, 497 "eu-west-2": endpoint{},
465 "us-east-1": endpoint{}, 498 "us-east-1": endpoint{},
@@ -484,6 +517,7 @@ var awsPartition = partition{
484 "eu-west-1": endpoint{}, 517 "eu-west-1": endpoint{},
485 "eu-west-2": endpoint{}, 518 "eu-west-2": endpoint{},
486 "eu-west-3": endpoint{}, 519 "eu-west-3": endpoint{},
520 "me-south-1": endpoint{},
487 "sa-east-1": endpoint{}, 521 "sa-east-1": endpoint{},
488 "us-east-1": endpoint{}, 522 "us-east-1": endpoint{},
489 "us-east-2": endpoint{}, 523 "us-east-2": endpoint{},
@@ -515,9 +549,27 @@ var awsPartition = partition{
515 "us-west-2": endpoint{}, 549 "us-west-2": endpoint{},
516 }, 550 },
517 }, 551 },
552 "backup": service{
553
554 Endpoints: endpoints{
555 "ap-northeast-1": endpoint{},
556 "ap-northeast-2": endpoint{},
557 "ap-southeast-1": endpoint{},
558 "ap-southeast-2": endpoint{},
559 "ca-central-1": endpoint{},
560 "eu-central-1": endpoint{},
561 "eu-west-1": endpoint{},
562 "eu-west-2": endpoint{},
563 "us-east-1": endpoint{},
564 "us-east-2": endpoint{},
565 "us-west-1": endpoint{},
566 "us-west-2": endpoint{},
567 },
568 },
518 "batch": service{ 569 "batch": service{
519 570
520 Endpoints: endpoints{ 571 Endpoints: endpoints{
572 "ap-east-1": endpoint{},
521 "ap-northeast-1": endpoint{}, 573 "ap-northeast-1": endpoint{},
522 "ap-northeast-2": endpoint{}, 574 "ap-northeast-2": endpoint{},
523 "ap-south-1": endpoint{}, 575 "ap-south-1": endpoint{},
@@ -584,6 +636,7 @@ var awsPartition = partition{
584 Endpoints: endpoints{ 636 Endpoints: endpoints{
585 "ap-northeast-1": endpoint{}, 637 "ap-northeast-1": endpoint{},
586 "ap-southeast-1": endpoint{}, 638 "ap-southeast-1": endpoint{},
639 "eu-central-1": endpoint{},
587 "eu-west-1": endpoint{}, 640 "eu-west-1": endpoint{},
588 "us-east-1": endpoint{}, 641 "us-east-1": endpoint{},
589 "us-east-2": endpoint{}, 642 "us-east-2": endpoint{},
@@ -619,6 +672,7 @@ var awsPartition = partition{
619 "eu-west-1": endpoint{}, 672 "eu-west-1": endpoint{},
620 "eu-west-2": endpoint{}, 673 "eu-west-2": endpoint{},
621 "eu-west-3": endpoint{}, 674 "eu-west-3": endpoint{},
675 "me-south-1": endpoint{},
622 "sa-east-1": endpoint{}, 676 "sa-east-1": endpoint{},
623 "us-east-1": endpoint{}, 677 "us-east-1": endpoint{},
624 "us-east-2": endpoint{}, 678 "us-east-2": endpoint{},
@@ -662,6 +716,7 @@ var awsPartition = partition{
662 }, 716 },
663 }, 717 },
664 Endpoints: endpoints{ 718 Endpoints: endpoints{
719 "ap-east-1": endpoint{},
665 "ap-northeast-1": endpoint{}, 720 "ap-northeast-1": endpoint{},
666 "ap-northeast-2": endpoint{}, 721 "ap-northeast-2": endpoint{},
667 "ap-south-1": endpoint{}, 722 "ap-south-1": endpoint{},
@@ -709,6 +764,7 @@ var awsPartition = partition{
709 "eu-west-1": endpoint{}, 764 "eu-west-1": endpoint{},
710 "eu-west-2": endpoint{}, 765 "eu-west-2": endpoint{},
711 "eu-west-3": endpoint{}, 766 "eu-west-3": endpoint{},
767 "me-south-1": endpoint{},
712 "sa-east-1": endpoint{}, 768 "sa-east-1": endpoint{},
713 "us-east-1": endpoint{}, 769 "us-east-1": endpoint{},
714 "us-east-2": endpoint{}, 770 "us-east-2": endpoint{},
@@ -726,6 +782,7 @@ var awsPartition = partition{
726 "ap-southeast-2": endpoint{}, 782 "ap-southeast-2": endpoint{},
727 "ca-central-1": endpoint{}, 783 "ca-central-1": endpoint{},
728 "eu-central-1": endpoint{}, 784 "eu-central-1": endpoint{},
785 "eu-north-1": endpoint{},
729 "eu-west-1": endpoint{}, 786 "eu-west-1": endpoint{},
730 "eu-west-2": endpoint{}, 787 "eu-west-2": endpoint{},
731 "eu-west-3": endpoint{}, 788 "eu-west-3": endpoint{},
@@ -789,6 +846,7 @@ var awsPartition = partition{
789 "codedeploy": service{ 846 "codedeploy": service{
790 847
791 Endpoints: endpoints{ 848 Endpoints: endpoints{
849 "ap-east-1": endpoint{},
792 "ap-northeast-1": endpoint{}, 850 "ap-northeast-1": endpoint{},
793 "ap-northeast-2": endpoint{}, 851 "ap-northeast-2": endpoint{},
794 "ap-south-1": endpoint{}, 852 "ap-south-1": endpoint{},
@@ -800,6 +858,7 @@ var awsPartition = partition{
800 "eu-west-1": endpoint{}, 858 "eu-west-1": endpoint{},
801 "eu-west-2": endpoint{}, 859 "eu-west-2": endpoint{},
802 "eu-west-3": endpoint{}, 860 "eu-west-3": endpoint{},
861 "me-south-1": endpoint{},
803 "sa-east-1": endpoint{}, 862 "sa-east-1": endpoint{},
804 "us-east-1": endpoint{}, 863 "us-east-1": endpoint{},
805 "us-east-1-fips": endpoint{ 864 "us-east-1-fips": endpoint{
@@ -937,10 +996,13 @@ var awsPartition = partition{
937 "comprehendmedical": service{ 996 "comprehendmedical": service{
938 997
939 Endpoints: endpoints{ 998 Endpoints: endpoints{
940 "eu-west-1": endpoint{}, 999 "ap-southeast-2": endpoint{},
941 "us-east-1": endpoint{}, 1000 "ca-central-1": endpoint{},
942 "us-east-2": endpoint{}, 1001 "eu-west-1": endpoint{},
943 "us-west-2": endpoint{}, 1002 "eu-west-2": endpoint{},
1003 "us-east-1": endpoint{},
1004 "us-east-2": endpoint{},
1005 "us-west-2": endpoint{},
944 }, 1006 },
945 }, 1007 },
946 "config": service{ 1008 "config": service{
@@ -958,6 +1020,7 @@ var awsPartition = partition{
958 "eu-west-1": endpoint{}, 1020 "eu-west-1": endpoint{},
959 "eu-west-2": endpoint{}, 1021 "eu-west-2": endpoint{},
960 "eu-west-3": endpoint{}, 1022 "eu-west-3": endpoint{},
1023 "me-south-1": endpoint{},
961 "sa-east-1": endpoint{}, 1024 "sa-east-1": endpoint{},
962 "us-east-1": endpoint{}, 1025 "us-east-1": endpoint{},
963 "us-east-2": endpoint{}, 1026 "us-east-2": endpoint{},
@@ -971,6 +1034,19 @@ var awsPartition = partition{
971 "us-east-1": endpoint{}, 1034 "us-east-1": endpoint{},
972 }, 1035 },
973 }, 1036 },
1037 "data.mediastore": service{
1038
1039 Endpoints: endpoints{
1040 "ap-northeast-1": endpoint{},
1041 "ap-northeast-2": endpoint{},
1042 "ap-southeast-2": endpoint{},
1043 "eu-central-1": endpoint{},
1044 "eu-north-1": endpoint{},
1045 "eu-west-1": endpoint{},
1046 "us-east-1": endpoint{},
1047 "us-west-2": endpoint{},
1048 },
1049 },
974 "datapipeline": service{ 1050 "datapipeline": service{
975 1051
976 Endpoints: endpoints{ 1052 Endpoints: endpoints{
@@ -1032,6 +1108,7 @@ var awsPartition = partition{
1032 "eu-west-1": endpoint{}, 1108 "eu-west-1": endpoint{},
1033 "eu-west-2": endpoint{}, 1109 "eu-west-2": endpoint{},
1034 "eu-west-3": endpoint{}, 1110 "eu-west-3": endpoint{},
1111 "me-south-1": endpoint{},
1035 "sa-east-1": endpoint{}, 1112 "sa-east-1": endpoint{},
1036 "us-east-1": endpoint{}, 1113 "us-east-1": endpoint{},
1037 "us-east-2": endpoint{}, 1114 "us-east-2": endpoint{},
@@ -1060,6 +1137,7 @@ var awsPartition = partition{
1060 "eu-west-1": endpoint{}, 1137 "eu-west-1": endpoint{},
1061 "eu-west-2": endpoint{}, 1138 "eu-west-2": endpoint{},
1062 "eu-west-3": endpoint{}, 1139 "eu-west-3": endpoint{},
1140 "me-south-1": endpoint{},
1063 "sa-east-1": endpoint{}, 1141 "sa-east-1": endpoint{},
1064 "us-east-1": endpoint{}, 1142 "us-east-1": endpoint{},
1065 "us-east-2": endpoint{}, 1143 "us-east-2": endpoint{},
@@ -1070,6 +1148,24 @@ var awsPartition = partition{
1070 "docdb": service{ 1148 "docdb": service{
1071 1149
1072 Endpoints: endpoints{ 1150 Endpoints: endpoints{
1151 "ap-northeast-1": endpoint{
1152 Hostname: "rds.ap-northeast-1.amazonaws.com",
1153 CredentialScope: credentialScope{
1154 Region: "ap-northeast-1",
1155 },
1156 },
1157 "ap-northeast-2": endpoint{
1158 Hostname: "rds.ap-northeast-2.amazonaws.com",
1159 CredentialScope: credentialScope{
1160 Region: "ap-northeast-2",
1161 },
1162 },
1163 "ap-southeast-2": endpoint{
1164 Hostname: "rds.ap-southeast-2.amazonaws.com",
1165 CredentialScope: credentialScope{
1166 Region: "ap-southeast-2",
1167 },
1168 },
1073 "eu-central-1": endpoint{ 1169 "eu-central-1": endpoint{
1074 Hostname: "rds.eu-central-1.amazonaws.com", 1170 Hostname: "rds.eu-central-1.amazonaws.com",
1075 CredentialScope: credentialScope{ 1171 CredentialScope: credentialScope{
@@ -1082,6 +1178,12 @@ var awsPartition = partition{
1082 Region: "eu-west-1", 1178 Region: "eu-west-1",
1083 }, 1179 },
1084 }, 1180 },
1181 "eu-west-2": endpoint{
1182 Hostname: "rds.eu-west-2.amazonaws.com",
1183 CredentialScope: credentialScope{
1184 Region: "eu-west-2",
1185 },
1186 },
1085 "us-east-1": endpoint{ 1187 "us-east-1": endpoint{
1086 Hostname: "rds.us-east-1.amazonaws.com", 1188 Hostname: "rds.us-east-1.amazonaws.com",
1087 CredentialScope: credentialScope{ 1189 CredentialScope: credentialScope{
@@ -1112,6 +1214,7 @@ var awsPartition = partition{
1112 "ap-southeast-2": endpoint{}, 1214 "ap-southeast-2": endpoint{},
1113 "ca-central-1": endpoint{}, 1215 "ca-central-1": endpoint{},
1114 "eu-central-1": endpoint{}, 1216 "eu-central-1": endpoint{},
1217 "eu-north-1": endpoint{},
1115 "eu-west-1": endpoint{}, 1218 "eu-west-1": endpoint{},
1116 "eu-west-2": endpoint{}, 1219 "eu-west-2": endpoint{},
1117 "sa-east-1": endpoint{}, 1220 "sa-east-1": endpoint{},
@@ -1133,11 +1236,17 @@ var awsPartition = partition{
1133 "ap-southeast-1": endpoint{}, 1236 "ap-southeast-1": endpoint{},
1134 "ap-southeast-2": endpoint{}, 1237 "ap-southeast-2": endpoint{},
1135 "ca-central-1": endpoint{}, 1238 "ca-central-1": endpoint{},
1136 "eu-central-1": endpoint{}, 1239 "ca-central-1-fips": endpoint{
1137 "eu-north-1": endpoint{}, 1240 Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
1138 "eu-west-1": endpoint{}, 1241 CredentialScope: credentialScope{
1139 "eu-west-2": endpoint{}, 1242 Region: "ca-central-1",
1140 "eu-west-3": endpoint{}, 1243 },
1244 },
1245 "eu-central-1": endpoint{},
1246 "eu-north-1": endpoint{},
1247 "eu-west-1": endpoint{},
1248 "eu-west-2": endpoint{},
1249 "eu-west-3": endpoint{},
1141 "local": endpoint{ 1250 "local": endpoint{
1142 Hostname: "localhost:8000", 1251 Hostname: "localhost:8000",
1143 Protocols: []string{"http"}, 1252 Protocols: []string{"http"},
@@ -1145,11 +1254,36 @@ var awsPartition = partition{
1145 Region: "us-east-1", 1254 Region: "us-east-1",
1146 }, 1255 },
1147 }, 1256 },
1148 "sa-east-1": endpoint{}, 1257 "me-south-1": endpoint{},
1149 "us-east-1": endpoint{}, 1258 "sa-east-1": endpoint{},
1259 "us-east-1": endpoint{},
1260 "us-east-1-fips": endpoint{
1261 Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
1262 CredentialScope: credentialScope{
1263 Region: "us-east-1",
1264 },
1265 },
1150 "us-east-2": endpoint{}, 1266 "us-east-2": endpoint{},
1267 "us-east-2-fips": endpoint{
1268 Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
1269 CredentialScope: credentialScope{
1270 Region: "us-east-2",
1271 },
1272 },
1151 "us-west-1": endpoint{}, 1273 "us-west-1": endpoint{},
1274 "us-west-1-fips": endpoint{
1275 Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
1276 CredentialScope: credentialScope{
1277 Region: "us-west-1",
1278 },
1279 },
1152 "us-west-2": endpoint{}, 1280 "us-west-2": endpoint{},
1281 "us-west-2-fips": endpoint{
1282 Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
1283 CredentialScope: credentialScope{
1284 Region: "us-west-2",
1285 },
1286 },
1153 }, 1287 },
1154 }, 1288 },
1155 "ec2": service{ 1289 "ec2": service{
@@ -1169,6 +1303,7 @@ var awsPartition = partition{
1169 "eu-west-1": endpoint{}, 1303 "eu-west-1": endpoint{},
1170 "eu-west-2": endpoint{}, 1304 "eu-west-2": endpoint{},
1171 "eu-west-3": endpoint{}, 1305 "eu-west-3": endpoint{},
1306 "me-south-1": endpoint{},
1172 "sa-east-1": endpoint{}, 1307 "sa-east-1": endpoint{},
1173 "us-east-1": endpoint{}, 1308 "us-east-1": endpoint{},
1174 "us-east-2": endpoint{}, 1309 "us-east-2": endpoint{},
@@ -1202,6 +1337,7 @@ var awsPartition = partition{
1202 "eu-west-1": endpoint{}, 1337 "eu-west-1": endpoint{},
1203 "eu-west-2": endpoint{}, 1338 "eu-west-2": endpoint{},
1204 "eu-west-3": endpoint{}, 1339 "eu-west-3": endpoint{},
1340 "me-south-1": endpoint{},
1205 "sa-east-1": endpoint{}, 1341 "sa-east-1": endpoint{},
1206 "us-east-1": endpoint{}, 1342 "us-east-1": endpoint{},
1207 "us-east-2": endpoint{}, 1343 "us-east-2": endpoint{},
@@ -1230,16 +1366,18 @@ var awsPartition = partition{
1230 Region: "us-west-1", 1366 Region: "us-west-1",
1231 }, 1367 },
1232 }, 1368 },
1233 "sa-east-1": endpoint{}, 1369 "me-south-1": endpoint{},
1234 "us-east-1": endpoint{}, 1370 "sa-east-1": endpoint{},
1235 "us-east-2": endpoint{}, 1371 "us-east-1": endpoint{},
1236 "us-west-1": endpoint{}, 1372 "us-east-2": endpoint{},
1237 "us-west-2": endpoint{}, 1373 "us-west-1": endpoint{},
1374 "us-west-2": endpoint{},
1238 }, 1375 },
1239 }, 1376 },
1240 "elasticbeanstalk": service{ 1377 "elasticbeanstalk": service{
1241 1378
1242 Endpoints: endpoints{ 1379 Endpoints: endpoints{
1380 "ap-east-1": endpoint{},
1243 "ap-northeast-1": endpoint{}, 1381 "ap-northeast-1": endpoint{},
1244 "ap-northeast-2": endpoint{}, 1382 "ap-northeast-2": endpoint{},
1245 "ap-south-1": endpoint{}, 1383 "ap-south-1": endpoint{},
@@ -1251,6 +1389,7 @@ var awsPartition = partition{
1251 "eu-west-1": endpoint{}, 1389 "eu-west-1": endpoint{},
1252 "eu-west-2": endpoint{}, 1390 "eu-west-2": endpoint{},
1253 "eu-west-3": endpoint{}, 1391 "eu-west-3": endpoint{},
1392 "me-south-1": endpoint{},
1254 "sa-east-1": endpoint{}, 1393 "sa-east-1": endpoint{},
1255 "us-east-1": endpoint{}, 1394 "us-east-1": endpoint{},
1256 "us-east-2": endpoint{}, 1395 "us-east-2": endpoint{},
@@ -1263,11 +1402,14 @@ var awsPartition = partition{
1263 Endpoints: endpoints{ 1402 Endpoints: endpoints{
1264 "ap-northeast-1": endpoint{}, 1403 "ap-northeast-1": endpoint{},
1265 "ap-northeast-2": endpoint{}, 1404 "ap-northeast-2": endpoint{},
1405 "ap-south-1": endpoint{},
1266 "ap-southeast-1": endpoint{}, 1406 "ap-southeast-1": endpoint{},
1267 "ap-southeast-2": endpoint{}, 1407 "ap-southeast-2": endpoint{},
1408 "ca-central-1": endpoint{},
1268 "eu-central-1": endpoint{}, 1409 "eu-central-1": endpoint{},
1269 "eu-west-1": endpoint{}, 1410 "eu-west-1": endpoint{},
1270 "eu-west-2": endpoint{}, 1411 "eu-west-2": endpoint{},
1412 "eu-west-3": endpoint{},
1271 "us-east-1": endpoint{}, 1413 "us-east-1": endpoint{},
1272 "us-east-2": endpoint{}, 1414 "us-east-2": endpoint{},
1273 "us-west-1": endpoint{}, 1415 "us-west-1": endpoint{},
@@ -1291,6 +1433,7 @@ var awsPartition = partition{
1291 "eu-west-1": endpoint{}, 1433 "eu-west-1": endpoint{},
1292 "eu-west-2": endpoint{}, 1434 "eu-west-2": endpoint{},
1293 "eu-west-3": endpoint{}, 1435 "eu-west-3": endpoint{},
1436 "me-south-1": endpoint{},
1294 "sa-east-1": endpoint{}, 1437 "sa-east-1": endpoint{},
1295 "us-east-1": endpoint{}, 1438 "us-east-1": endpoint{},
1296 "us-east-2": endpoint{}, 1439 "us-east-2": endpoint{},
@@ -1318,6 +1461,7 @@ var awsPartition = partition{
1318 "eu-west-1": endpoint{}, 1461 "eu-west-1": endpoint{},
1319 "eu-west-2": endpoint{}, 1462 "eu-west-2": endpoint{},
1320 "eu-west-3": endpoint{}, 1463 "eu-west-3": endpoint{},
1464 "me-south-1": endpoint{},
1321 "sa-east-1": endpoint{}, 1465 "sa-east-1": endpoint{},
1322 "us-east-1": endpoint{ 1466 "us-east-1": endpoint{
1323 SSLCommonName: "{service}.{region}.{dnsSuffix}", 1467 SSLCommonName: "{service}.{region}.{dnsSuffix}",
@@ -1343,10 +1487,12 @@ var awsPartition = partition{
1343 "email": service{ 1487 "email": service{
1344 1488
1345 Endpoints: endpoints{ 1489 Endpoints: endpoints{
1346 "eu-central-1": endpoint{}, 1490 "ap-south-1": endpoint{},
1347 "eu-west-1": endpoint{}, 1491 "ap-southeast-2": endpoint{},
1348 "us-east-1": endpoint{}, 1492 "eu-central-1": endpoint{},
1349 "us-west-2": endpoint{}, 1493 "eu-west-1": endpoint{},
1494 "us-east-1": endpoint{},
1495 "us-west-2": endpoint{},
1350 }, 1496 },
1351 }, 1497 },
1352 "entitlement.marketplace": service{ 1498 "entitlement.marketplace": service{
@@ -1402,6 +1548,7 @@ var awsPartition = partition{
1402 "eu-west-1": endpoint{}, 1548 "eu-west-1": endpoint{},
1403 "eu-west-2": endpoint{}, 1549 "eu-west-2": endpoint{},
1404 "eu-west-3": endpoint{}, 1550 "eu-west-3": endpoint{},
1551 "me-south-1": endpoint{},
1405 "sa-east-1": endpoint{}, 1552 "sa-east-1": endpoint{},
1406 "us-east-1": endpoint{}, 1553 "us-east-1": endpoint{},
1407 "us-east-2": endpoint{}, 1554 "us-east-2": endpoint{},
@@ -1419,6 +1566,7 @@ var awsPartition = partition{
1419 "ap-southeast-2": endpoint{}, 1566 "ap-southeast-2": endpoint{},
1420 "ca-central-1": endpoint{}, 1567 "ca-central-1": endpoint{},
1421 "eu-central-1": endpoint{}, 1568 "eu-central-1": endpoint{},
1569 "eu-north-1": endpoint{},
1422 "eu-west-1": endpoint{}, 1570 "eu-west-1": endpoint{},
1423 "eu-west-2": endpoint{}, 1571 "eu-west-2": endpoint{},
1424 "eu-west-3": endpoint{}, 1572 "eu-west-3": endpoint{},
@@ -1435,11 +1583,15 @@ var awsPartition = partition{
1435 }, 1583 },
1436 Endpoints: endpoints{ 1584 Endpoints: endpoints{
1437 "ap-northeast-1": endpoint{}, 1585 "ap-northeast-1": endpoint{},
1586 "ap-northeast-2": endpoint{},
1587 "ap-southeast-1": endpoint{},
1438 "ap-southeast-2": endpoint{}, 1588 "ap-southeast-2": endpoint{},
1439 "eu-central-1": endpoint{}, 1589 "eu-central-1": endpoint{},
1440 "eu-west-1": endpoint{}, 1590 "eu-west-1": endpoint{},
1591 "eu-west-2": endpoint{},
1441 "us-east-1": endpoint{}, 1592 "us-east-1": endpoint{},
1442 "us-east-2": endpoint{}, 1593 "us-east-2": endpoint{},
1594 "us-west-1": endpoint{},
1443 "us-west-2": endpoint{}, 1595 "us-west-2": endpoint{},
1444 }, 1596 },
1445 }, 1597 },
@@ -1447,10 +1599,14 @@ var awsPartition = partition{
1447 1599
1448 Endpoints: endpoints{ 1600 Endpoints: endpoints{
1449 "ap-northeast-1": endpoint{}, 1601 "ap-northeast-1": endpoint{},
1602 "ap-southeast-1": endpoint{},
1450 "ap-southeast-2": endpoint{}, 1603 "ap-southeast-2": endpoint{},
1604 "eu-central-1": endpoint{},
1451 "eu-west-1": endpoint{}, 1605 "eu-west-1": endpoint{},
1606 "eu-west-2": endpoint{},
1452 "us-east-1": endpoint{}, 1607 "us-east-1": endpoint{},
1453 "us-east-2": endpoint{}, 1608 "us-east-2": endpoint{},
1609 "us-west-1": endpoint{},
1454 "us-west-2": endpoint{}, 1610 "us-west-2": endpoint{},
1455 }, 1611 },
1456 }, 1612 },
@@ -1490,6 +1646,7 @@ var awsPartition = partition{
1490 "eu-west-1": endpoint{}, 1646 "eu-west-1": endpoint{},
1491 "eu-west-2": endpoint{}, 1647 "eu-west-2": endpoint{},
1492 "eu-west-3": endpoint{}, 1648 "eu-west-3": endpoint{},
1649 "me-south-1": endpoint{},
1493 "sa-east-1": endpoint{}, 1650 "sa-east-1": endpoint{},
1494 "us-east-1": endpoint{}, 1651 "us-east-1": endpoint{},
1495 "us-east-2": endpoint{}, 1652 "us-east-2": endpoint{},
@@ -1500,6 +1657,7 @@ var awsPartition = partition{
1500 "glue": service{ 1657 "glue": service{
1501 1658
1502 Endpoints: endpoints{ 1659 Endpoints: endpoints{
1660 "ap-east-1": endpoint{},
1503 "ap-northeast-1": endpoint{}, 1661 "ap-northeast-1": endpoint{},
1504 "ap-northeast-2": endpoint{}, 1662 "ap-northeast-2": endpoint{},
1505 "ap-south-1": endpoint{}, 1663 "ap-south-1": endpoint{},
@@ -1507,9 +1665,11 @@ var awsPartition = partition{
1507 "ap-southeast-2": endpoint{}, 1665 "ap-southeast-2": endpoint{},
1508 "ca-central-1": endpoint{}, 1666 "ca-central-1": endpoint{},
1509 "eu-central-1": endpoint{}, 1667 "eu-central-1": endpoint{},
1668 "eu-north-1": endpoint{},
1510 "eu-west-1": endpoint{}, 1669 "eu-west-1": endpoint{},
1511 "eu-west-2": endpoint{}, 1670 "eu-west-2": endpoint{},
1512 "eu-west-3": endpoint{}, 1671 "eu-west-3": endpoint{},
1672 "sa-east-1": endpoint{},
1513 "us-east-1": endpoint{}, 1673 "us-east-1": endpoint{},
1514 "us-east-2": endpoint{}, 1674 "us-east-2": endpoint{},
1515 "us-west-1": endpoint{}, 1675 "us-west-1": endpoint{},
@@ -1523,19 +1683,32 @@ var awsPartition = partition{
1523 }, 1683 },
1524 Endpoints: endpoints{ 1684 Endpoints: endpoints{
1525 "ap-northeast-1": endpoint{}, 1685 "ap-northeast-1": endpoint{},
1686 "ap-northeast-2": endpoint{},
1687 "ap-south-1": endpoint{},
1688 "ap-southeast-1": endpoint{},
1526 "ap-southeast-2": endpoint{}, 1689 "ap-southeast-2": endpoint{},
1527 "eu-central-1": endpoint{}, 1690 "eu-central-1": endpoint{},
1528 "eu-west-1": endpoint{}, 1691 "eu-west-1": endpoint{},
1692 "eu-west-2": endpoint{},
1529 "us-east-1": endpoint{}, 1693 "us-east-1": endpoint{},
1694 "us-east-2": endpoint{},
1530 "us-west-2": endpoint{}, 1695 "us-west-2": endpoint{},
1531 }, 1696 },
1532 }, 1697 },
1698 "groundstation": service{
1699
1700 Endpoints: endpoints{
1701 "us-east-2": endpoint{},
1702 "us-west-2": endpoint{},
1703 },
1704 },
1533 "guardduty": service{ 1705 "guardduty": service{
1534 IsRegionalized: boxedTrue, 1706 IsRegionalized: boxedTrue,
1535 Defaults: endpoint{ 1707 Defaults: endpoint{
1536 Protocols: []string{"https"}, 1708 Protocols: []string{"https"},
1537 }, 1709 },
1538 Endpoints: endpoints{ 1710 Endpoints: endpoints{
1711 "ap-east-1": endpoint{},
1539 "ap-northeast-1": endpoint{}, 1712 "ap-northeast-1": endpoint{},
1540 "ap-northeast-2": endpoint{}, 1713 "ap-northeast-2": endpoint{},
1541 "ap-south-1": endpoint{}, 1714 "ap-south-1": endpoint{},
@@ -1543,6 +1716,7 @@ var awsPartition = partition{
1543 "ap-southeast-2": endpoint{}, 1716 "ap-southeast-2": endpoint{},
1544 "ca-central-1": endpoint{}, 1717 "ca-central-1": endpoint{},
1545 "eu-central-1": endpoint{}, 1718 "eu-central-1": endpoint{},
1719 "eu-north-1": endpoint{},
1546 "eu-west-1": endpoint{}, 1720 "eu-west-1": endpoint{},
1547 "eu-west-2": endpoint{}, 1721 "eu-west-2": endpoint{},
1548 "eu-west-3": endpoint{}, 1722 "eu-west-3": endpoint{},
@@ -1595,7 +1769,9 @@ var awsPartition = partition{
1595 "ap-south-1": endpoint{}, 1769 "ap-south-1": endpoint{},
1596 "ap-southeast-2": endpoint{}, 1770 "ap-southeast-2": endpoint{},
1597 "eu-central-1": endpoint{}, 1771 "eu-central-1": endpoint{},
1772 "eu-north-1": endpoint{},
1598 "eu-west-1": endpoint{}, 1773 "eu-west-1": endpoint{},
1774 "eu-west-2": endpoint{},
1599 "us-east-1": endpoint{}, 1775 "us-east-1": endpoint{},
1600 "us-east-2": endpoint{}, 1776 "us-east-2": endpoint{},
1601 "us-west-1": endpoint{}, 1777 "us-west-1": endpoint{},
@@ -1614,11 +1790,16 @@ var awsPartition = partition{
1614 "ap-south-1": endpoint{}, 1790 "ap-south-1": endpoint{},
1615 "ap-southeast-1": endpoint{}, 1791 "ap-southeast-1": endpoint{},
1616 "ap-southeast-2": endpoint{}, 1792 "ap-southeast-2": endpoint{},
1793 "ca-central-1": endpoint{},
1617 "eu-central-1": endpoint{}, 1794 "eu-central-1": endpoint{},
1795 "eu-north-1": endpoint{},
1618 "eu-west-1": endpoint{}, 1796 "eu-west-1": endpoint{},
1619 "eu-west-2": endpoint{}, 1797 "eu-west-2": endpoint{},
1798 "eu-west-3": endpoint{},
1799 "sa-east-1": endpoint{},
1620 "us-east-1": endpoint{}, 1800 "us-east-1": endpoint{},
1621 "us-east-2": endpoint{}, 1801 "us-east-2": endpoint{},
1802 "us-west-1": endpoint{},
1622 "us-west-2": endpoint{}, 1803 "us-west-2": endpoint{},
1623 }, 1804 },
1624 }, 1805 },
@@ -1633,6 +1814,95 @@ var awsPartition = partition{
1633 "us-west-2": endpoint{}, 1814 "us-west-2": endpoint{},
1634 }, 1815 },
1635 }, 1816 },
1817 "iotevents": service{
1818
1819 Endpoints: endpoints{
1820 "ap-northeast-1": endpoint{},
1821 "ap-southeast-2": endpoint{},
1822 "eu-central-1": endpoint{},
1823 "eu-west-1": endpoint{},
1824 "us-east-1": endpoint{},
1825 "us-east-2": endpoint{},
1826 "us-west-2": endpoint{},
1827 },
1828 },
1829 "ioteventsdata": service{
1830
1831 Endpoints: endpoints{
1832 "ap-northeast-1": endpoint{
1833 Hostname: "data.iotevents.ap-northeast-1.amazonaws.com",
1834 CredentialScope: credentialScope{
1835 Region: "ap-northeast-1",
1836 },
1837 },
1838 "ap-southeast-2": endpoint{
1839 Hostname: "data.iotevents.ap-southeast-2.amazonaws.com",
1840 CredentialScope: credentialScope{
1841 Region: "ap-southeast-2",
1842 },
1843 },
1844 "eu-central-1": endpoint{
1845 Hostname: "data.iotevents.eu-central-1.amazonaws.com",
1846 CredentialScope: credentialScope{
1847 Region: "eu-central-1",
1848 },
1849 },
1850 "eu-west-1": endpoint{
1851 Hostname: "data.iotevents.eu-west-1.amazonaws.com",
1852 CredentialScope: credentialScope{
1853 Region: "eu-west-1",
1854 },
1855 },
1856 "us-east-1": endpoint{
1857 Hostname: "data.iotevents.us-east-1.amazonaws.com",
1858 CredentialScope: credentialScope{
1859 Region: "us-east-1",
1860 },
1861 },
1862 "us-east-2": endpoint{
1863 Hostname: "data.iotevents.us-east-2.amazonaws.com",
1864 CredentialScope: credentialScope{
1865 Region: "us-east-2",
1866 },
1867 },
1868 "us-west-2": endpoint{
1869 Hostname: "data.iotevents.us-west-2.amazonaws.com",
1870 CredentialScope: credentialScope{
1871 Region: "us-west-2",
1872 },
1873 },
1874 },
1875 },
1876 "iotthingsgraph": service{
1877 Defaults: endpoint{
1878 CredentialScope: credentialScope{
1879 Service: "iotthingsgraph",
1880 },
1881 },
1882 Endpoints: endpoints{
1883 "ap-northeast-1": endpoint{},
1884 "ap-southeast-2": endpoint{},
1885 "eu-west-1": endpoint{},
1886 "us-east-1": endpoint{},
1887 "us-west-2": endpoint{},
1888 },
1889 },
1890 "kafka": service{
1891
1892 Endpoints: endpoints{
1893 "ap-northeast-1": endpoint{},
1894 "ap-southeast-1": endpoint{},
1895 "ap-southeast-2": endpoint{},
1896 "eu-central-1": endpoint{},
1897 "eu-north-1": endpoint{},
1898 "eu-west-1": endpoint{},
1899 "eu-west-2": endpoint{},
1900 "eu-west-3": endpoint{},
1901 "us-east-1": endpoint{},
1902 "us-east-2": endpoint{},
1903 "us-west-2": endpoint{},
1904 },
1905 },
1636 "kinesis": service{ 1906 "kinesis": service{
1637 1907
1638 Endpoints: endpoints{ 1908 Endpoints: endpoints{
@@ -1648,6 +1918,7 @@ var awsPartition = partition{
1648 "eu-west-1": endpoint{}, 1918 "eu-west-1": endpoint{},
1649 "eu-west-2": endpoint{}, 1919 "eu-west-2": endpoint{},
1650 "eu-west-3": endpoint{}, 1920 "eu-west-3": endpoint{},
1921 "me-south-1": endpoint{},
1651 "sa-east-1": endpoint{}, 1922 "sa-east-1": endpoint{},
1652 "us-east-1": endpoint{}, 1923 "us-east-1": endpoint{},
1653 "us-east-2": endpoint{}, 1924 "us-east-2": endpoint{},
@@ -1658,11 +1929,16 @@ var awsPartition = partition{
1658 "kinesisanalytics": service{ 1929 "kinesisanalytics": service{
1659 1930
1660 Endpoints: endpoints{ 1931 Endpoints: endpoints{
1661 "eu-central-1": endpoint{}, 1932 "ap-northeast-1": endpoint{},
1662 "eu-west-1": endpoint{}, 1933 "ap-northeast-2": endpoint{},
1663 "us-east-1": endpoint{}, 1934 "ap-southeast-1": endpoint{},
1664 "us-east-2": endpoint{}, 1935 "ap-southeast-2": endpoint{},
1665 "us-west-2": endpoint{}, 1936 "eu-central-1": endpoint{},
1937 "eu-west-1": endpoint{},
1938 "eu-west-2": endpoint{},
1939 "us-east-1": endpoint{},
1940 "us-east-2": endpoint{},
1941 "us-west-2": endpoint{},
1666 }, 1942 },
1667 }, 1943 },
1668 "kinesisvideo": service{ 1944 "kinesisvideo": service{
@@ -1679,12 +1955,6 @@ var awsPartition = partition{
1679 "kms": service{ 1955 "kms": service{
1680 1956
1681 Endpoints: endpoints{ 1957 Endpoints: endpoints{
1682 "ProdFips": endpoint{
1683 Hostname: "kms-fips.ca-central-1.amazonaws.com",
1684 CredentialScope: credentialScope{
1685 Region: "ca-central-1",
1686 },
1687 },
1688 "ap-east-1": endpoint{}, 1958 "ap-east-1": endpoint{},
1689 "ap-northeast-1": endpoint{}, 1959 "ap-northeast-1": endpoint{},
1690 "ap-northeast-2": endpoint{}, 1960 "ap-northeast-2": endpoint{},
@@ -1697,6 +1967,7 @@ var awsPartition = partition{
1697 "eu-west-1": endpoint{}, 1967 "eu-west-1": endpoint{},
1698 "eu-west-2": endpoint{}, 1968 "eu-west-2": endpoint{},
1699 "eu-west-3": endpoint{}, 1969 "eu-west-3": endpoint{},
1970 "me-south-1": endpoint{},
1700 "sa-east-1": endpoint{}, 1971 "sa-east-1": endpoint{},
1701 "us-east-1": endpoint{}, 1972 "us-east-1": endpoint{},
1702 "us-east-2": endpoint{}, 1973 "us-east-2": endpoint{},
@@ -1719,6 +1990,7 @@ var awsPartition = partition{
1719 "eu-west-1": endpoint{}, 1990 "eu-west-1": endpoint{},
1720 "eu-west-2": endpoint{}, 1991 "eu-west-2": endpoint{},
1721 "eu-west-3": endpoint{}, 1992 "eu-west-3": endpoint{},
1993 "me-south-1": endpoint{},
1722 "sa-east-1": endpoint{}, 1994 "sa-east-1": endpoint{},
1723 "us-east-1": endpoint{}, 1995 "us-east-1": endpoint{},
1724 "us-east-2": endpoint{}, 1996 "us-east-2": endpoint{},
@@ -1729,16 +2001,22 @@ var awsPartition = partition{
1729 "license-manager": service{ 2001 "license-manager": service{
1730 2002
1731 Endpoints: endpoints{ 2003 Endpoints: endpoints{
2004 "ap-east-1": endpoint{},
1732 "ap-northeast-1": endpoint{}, 2005 "ap-northeast-1": endpoint{},
1733 "ap-northeast-2": endpoint{}, 2006 "ap-northeast-2": endpoint{},
1734 "ap-south-1": endpoint{}, 2007 "ap-south-1": endpoint{},
1735 "ap-southeast-1": endpoint{}, 2008 "ap-southeast-1": endpoint{},
1736 "ap-southeast-2": endpoint{}, 2009 "ap-southeast-2": endpoint{},
2010 "ca-central-1": endpoint{},
1737 "eu-central-1": endpoint{}, 2011 "eu-central-1": endpoint{},
2012 "eu-north-1": endpoint{},
1738 "eu-west-1": endpoint{}, 2013 "eu-west-1": endpoint{},
1739 "eu-west-2": endpoint{}, 2014 "eu-west-2": endpoint{},
2015 "eu-west-3": endpoint{},
2016 "sa-east-1": endpoint{},
1740 "us-east-1": endpoint{}, 2017 "us-east-1": endpoint{},
1741 "us-east-2": endpoint{}, 2018 "us-east-2": endpoint{},
2019 "us-west-1": endpoint{},
1742 "us-west-2": endpoint{}, 2020 "us-west-2": endpoint{},
1743 }, 2021 },
1744 }, 2022 },
@@ -1775,6 +2053,7 @@ var awsPartition = partition{
1775 "eu-west-1": endpoint{}, 2053 "eu-west-1": endpoint{},
1776 "eu-west-2": endpoint{}, 2054 "eu-west-2": endpoint{},
1777 "eu-west-3": endpoint{}, 2055 "eu-west-3": endpoint{},
2056 "me-south-1": endpoint{},
1778 "sa-east-1": endpoint{}, 2057 "sa-east-1": endpoint{},
1779 "us-east-1": endpoint{}, 2058 "us-east-1": endpoint{},
1780 "us-east-2": endpoint{}, 2059 "us-east-2": endpoint{},
@@ -1843,6 +2122,7 @@ var awsPartition = partition{
1843 "ap-southeast-1": endpoint{}, 2122 "ap-southeast-1": endpoint{},
1844 "ap-southeast-2": endpoint{}, 2123 "ap-southeast-2": endpoint{},
1845 "eu-central-1": endpoint{}, 2124 "eu-central-1": endpoint{},
2125 "eu-north-1": endpoint{},
1846 "eu-west-1": endpoint{}, 2126 "eu-west-1": endpoint{},
1847 "sa-east-1": endpoint{}, 2127 "sa-east-1": endpoint{},
1848 "us-east-1": endpoint{}, 2128 "us-east-1": endpoint{},
@@ -1873,6 +2153,7 @@ var awsPartition = partition{
1873 "ap-northeast-2": endpoint{}, 2153 "ap-northeast-2": endpoint{},
1874 "ap-southeast-2": endpoint{}, 2154 "ap-southeast-2": endpoint{},
1875 "eu-central-1": endpoint{}, 2155 "eu-central-1": endpoint{},
2156 "eu-north-1": endpoint{},
1876 "eu-west-1": endpoint{}, 2157 "eu-west-1": endpoint{},
1877 "us-east-1": endpoint{}, 2158 "us-east-1": endpoint{},
1878 "us-west-2": endpoint{}, 2159 "us-west-2": endpoint{},
@@ -1945,6 +2226,7 @@ var awsPartition = partition{
1945 "eu-west-1": endpoint{}, 2226 "eu-west-1": endpoint{},
1946 "eu-west-2": endpoint{}, 2227 "eu-west-2": endpoint{},
1947 "eu-west-3": endpoint{}, 2228 "eu-west-3": endpoint{},
2229 "me-south-1": endpoint{},
1948 "sa-east-1": endpoint{}, 2230 "sa-east-1": endpoint{},
1949 "us-east-1": endpoint{}, 2231 "us-east-1": endpoint{},
1950 "us-east-2": endpoint{}, 2232 "us-east-2": endpoint{},
@@ -1957,11 +2239,14 @@ var awsPartition = partition{
1957 Endpoints: endpoints{ 2239 Endpoints: endpoints{
1958 "ap-northeast-1": endpoint{}, 2240 "ap-northeast-1": endpoint{},
1959 "ap-northeast-2": endpoint{}, 2241 "ap-northeast-2": endpoint{},
2242 "ap-south-1": endpoint{},
1960 "ap-southeast-1": endpoint{}, 2243 "ap-southeast-1": endpoint{},
1961 "ap-southeast-2": endpoint{}, 2244 "ap-southeast-2": endpoint{},
2245 "ca-central-1": endpoint{},
1962 "eu-central-1": endpoint{}, 2246 "eu-central-1": endpoint{},
1963 "eu-west-1": endpoint{}, 2247 "eu-west-1": endpoint{},
1964 "eu-west-2": endpoint{}, 2248 "eu-west-2": endpoint{},
2249 "eu-west-3": endpoint{},
1965 "us-east-1": endpoint{}, 2250 "us-east-1": endpoint{},
1966 "us-east-2": endpoint{}, 2251 "us-east-2": endpoint{},
1967 "us-west-1": endpoint{}, 2252 "us-west-1": endpoint{},
@@ -1987,6 +2272,12 @@ var awsPartition = partition{
1987 Region: "ap-northeast-1", 2272 Region: "ap-northeast-1",
1988 }, 2273 },
1989 }, 2274 },
2275 "ap-northeast-2": endpoint{
2276 Hostname: "rds.ap-northeast-2.amazonaws.com",
2277 CredentialScope: credentialScope{
2278 Region: "ap-northeast-2",
2279 },
2280 },
1990 "ap-south-1": endpoint{ 2281 "ap-south-1": endpoint{
1991 Hostname: "rds.ap-south-1.amazonaws.com", 2282 Hostname: "rds.ap-south-1.amazonaws.com",
1992 CredentialScope: credentialScope{ 2283 CredentialScope: credentialScope{
@@ -2011,6 +2302,12 @@ var awsPartition = partition{
2011 Region: "eu-central-1", 2302 Region: "eu-central-1",
2012 }, 2303 },
2013 }, 2304 },
2305 "eu-north-1": endpoint{
2306 Hostname: "rds.eu-north-1.amazonaws.com",
2307 CredentialScope: credentialScope{
2308 Region: "eu-north-1",
2309 },
2310 },
2014 "eu-west-1": endpoint{ 2311 "eu-west-1": endpoint{
2015 Hostname: "rds.eu-west-1.amazonaws.com", 2312 Hostname: "rds.eu-west-1.amazonaws.com",
2016 CredentialScope: credentialScope{ 2313 CredentialScope: credentialScope{
@@ -2126,6 +2423,38 @@ var awsPartition = partition{
2126 "us-west-2": endpoint{}, 2423 "us-west-2": endpoint{},
2127 }, 2424 },
2128 }, 2425 },
2426 "projects.iot1click": service{
2427
2428 Endpoints: endpoints{
2429 "ap-northeast-1": endpoint{},
2430 "eu-central-1": endpoint{},
2431 "eu-west-1": endpoint{},
2432 "eu-west-2": endpoint{},
2433 "us-east-1": endpoint{},
2434 "us-east-2": endpoint{},
2435 "us-west-2": endpoint{},
2436 },
2437 },
2438 "ram": service{
2439
2440 Endpoints: endpoints{
2441 "ap-northeast-1": endpoint{},
2442 "ap-northeast-2": endpoint{},
2443 "ap-south-1": endpoint{},
2444 "ap-southeast-1": endpoint{},
2445 "ap-southeast-2": endpoint{},
2446 "ca-central-1": endpoint{},
2447 "eu-central-1": endpoint{},
2448 "eu-north-1": endpoint{},
2449 "eu-west-1": endpoint{},
2450 "eu-west-2": endpoint{},
2451 "eu-west-3": endpoint{},
2452 "us-east-1": endpoint{},
2453 "us-east-2": endpoint{},
2454 "us-west-1": endpoint{},
2455 "us-west-2": endpoint{},
2456 },
2457 },
2129 "rds": service{ 2458 "rds": service{
2130 2459
2131 Endpoints: endpoints{ 2460 Endpoints: endpoints{
@@ -2165,6 +2494,7 @@ var awsPartition = partition{
2165 "eu-west-1": endpoint{}, 2494 "eu-west-1": endpoint{},
2166 "eu-west-2": endpoint{}, 2495 "eu-west-2": endpoint{},
2167 "eu-west-3": endpoint{}, 2496 "eu-west-3": endpoint{},
2497 "me-south-1": endpoint{},
2168 "sa-east-1": endpoint{}, 2498 "sa-east-1": endpoint{},
2169 "us-east-1": endpoint{}, 2499 "us-east-1": endpoint{},
2170 "us-east-2": endpoint{}, 2500 "us-east-2": endpoint{},
@@ -2178,10 +2508,14 @@ var awsPartition = partition{
2178 "ap-northeast-1": endpoint{}, 2508 "ap-northeast-1": endpoint{},
2179 "ap-northeast-2": endpoint{}, 2509 "ap-northeast-2": endpoint{},
2180 "ap-south-1": endpoint{}, 2510 "ap-south-1": endpoint{},
2511 "ap-southeast-1": endpoint{},
2181 "ap-southeast-2": endpoint{}, 2512 "ap-southeast-2": endpoint{},
2513 "eu-central-1": endpoint{},
2182 "eu-west-1": endpoint{}, 2514 "eu-west-1": endpoint{},
2515 "eu-west-2": endpoint{},
2183 "us-east-1": endpoint{}, 2516 "us-east-1": endpoint{},
2184 "us-east-2": endpoint{}, 2517 "us-east-2": endpoint{},
2518 "us-west-1": endpoint{},
2185 "us-west-2": endpoint{}, 2519 "us-west-2": endpoint{},
2186 }, 2520 },
2187 }, 2521 },
@@ -2200,6 +2534,7 @@ var awsPartition = partition{
2200 "eu-west-1": endpoint{}, 2534 "eu-west-1": endpoint{},
2201 "eu-west-2": endpoint{}, 2535 "eu-west-2": endpoint{},
2202 "eu-west-3": endpoint{}, 2536 "eu-west-3": endpoint{},
2537 "me-south-1": endpoint{},
2203 "sa-east-1": endpoint{}, 2538 "sa-east-1": endpoint{},
2204 "us-east-1": endpoint{}, 2539 "us-east-1": endpoint{},
2205 "us-east-2": endpoint{}, 2540 "us-east-2": endpoint{},
@@ -2211,8 +2546,11 @@ var awsPartition = partition{
2211 2546
2212 Endpoints: endpoints{ 2547 Endpoints: endpoints{
2213 "ap-northeast-1": endpoint{}, 2548 "ap-northeast-1": endpoint{},
2549 "ap-southeast-1": endpoint{},
2550 "eu-central-1": endpoint{},
2214 "eu-west-1": endpoint{}, 2551 "eu-west-1": endpoint{},
2215 "us-east-1": endpoint{}, 2552 "us-east-1": endpoint{},
2553 "us-east-2": endpoint{},
2216 "us-west-2": endpoint{}, 2554 "us-west-2": endpoint{},
2217 }, 2555 },
2218 }, 2556 },
@@ -2281,9 +2619,33 @@ var awsPartition = partition{
2281 "eu-west-1": endpoint{}, 2619 "eu-west-1": endpoint{},
2282 "eu-west-2": endpoint{}, 2620 "eu-west-2": endpoint{},
2283 "us-east-1": endpoint{}, 2621 "us-east-1": endpoint{},
2284 "us-east-2": endpoint{}, 2622 "us-east-1-fips": endpoint{
2285 "us-west-1": endpoint{}, 2623 Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com",
2286 "us-west-2": endpoint{}, 2624 CredentialScope: credentialScope{
2625 Region: "us-east-1",
2626 },
2627 },
2628 "us-east-2": endpoint{},
2629 "us-east-2-fips": endpoint{
2630 Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com",
2631 CredentialScope: credentialScope{
2632 Region: "us-east-2",
2633 },
2634 },
2635 "us-west-1": endpoint{},
2636 "us-west-1-fips": endpoint{
2637 Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com",
2638 CredentialScope: credentialScope{
2639 Region: "us-west-1",
2640 },
2641 },
2642 "us-west-2": endpoint{},
2643 "us-west-2-fips": endpoint{
2644 Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com",
2645 CredentialScope: credentialScope{
2646 Region: "us-west-2",
2647 },
2648 },
2287 }, 2649 },
2288 }, 2650 },
2289 "s3": service{ 2651 "s3": service{
@@ -2319,8 +2681,9 @@ var awsPartition = partition{
2319 Hostname: "s3.eu-west-1.amazonaws.com", 2681 Hostname: "s3.eu-west-1.amazonaws.com",
2320 SignatureVersions: []string{"s3", "s3v4"}, 2682 SignatureVersions: []string{"s3", "s3v4"},
2321 }, 2683 },
2322 "eu-west-2": endpoint{}, 2684 "eu-west-2": endpoint{},
2323 "eu-west-3": endpoint{}, 2685 "eu-west-3": endpoint{},
2686 "me-south-1": endpoint{},
2324 "s3-external-1": endpoint{ 2687 "s3-external-1": endpoint{
2325 Hostname: "s3-external-1.amazonaws.com", 2688 Hostname: "s3-external-1.amazonaws.com",
2326 SignatureVersions: []string{"s3", "s3v4"}, 2689 SignatureVersions: []string{"s3", "s3v4"},
@@ -2571,6 +2934,7 @@ var awsPartition = partition{
2571 "ap-southeast-2": endpoint{}, 2934 "ap-southeast-2": endpoint{},
2572 "ca-central-1": endpoint{}, 2935 "ca-central-1": endpoint{},
2573 "eu-central-1": endpoint{}, 2936 "eu-central-1": endpoint{},
2937 "eu-north-1": endpoint{},
2574 "eu-west-1": endpoint{}, 2938 "eu-west-1": endpoint{},
2575 "eu-west-2": endpoint{}, 2939 "eu-west-2": endpoint{},
2576 "eu-west-3": endpoint{}, 2940 "eu-west-3": endpoint{},
@@ -2714,6 +3078,7 @@ var awsPartition = partition{
2714 "sms": service{ 3078 "sms": service{
2715 3079
2716 Endpoints: endpoints{ 3080 Endpoints: endpoints{
3081 "ap-east-1": endpoint{},
2717 "ap-northeast-1": endpoint{}, 3082 "ap-northeast-1": endpoint{},
2718 "ap-northeast-2": endpoint{}, 3083 "ap-northeast-2": endpoint{},
2719 "ap-south-1": endpoint{}, 3084 "ap-south-1": endpoint{},
@@ -2736,6 +3101,7 @@ var awsPartition = partition{
2736 3101
2737 Endpoints: endpoints{ 3102 Endpoints: endpoints{
2738 "ap-northeast-1": endpoint{}, 3103 "ap-northeast-1": endpoint{},
3104 "ap-northeast-2": endpoint{},
2739 "ap-south-1": endpoint{}, 3105 "ap-south-1": endpoint{},
2740 "ap-southeast-1": endpoint{}, 3106 "ap-southeast-1": endpoint{},
2741 "ap-southeast-2": endpoint{}, 3107 "ap-southeast-2": endpoint{},
@@ -2768,6 +3134,7 @@ var awsPartition = partition{
2768 "eu-west-1": endpoint{}, 3134 "eu-west-1": endpoint{},
2769 "eu-west-2": endpoint{}, 3135 "eu-west-2": endpoint{},
2770 "eu-west-3": endpoint{}, 3136 "eu-west-3": endpoint{},
3137 "me-south-1": endpoint{},
2771 "sa-east-1": endpoint{}, 3138 "sa-east-1": endpoint{},
2772 "us-east-1": endpoint{}, 3139 "us-east-1": endpoint{},
2773 "us-east-2": endpoint{}, 3140 "us-east-2": endpoint{},
@@ -2817,7 +3184,8 @@ var awsPartition = partition{
2817 Region: "us-west-2", 3184 Region: "us-west-2",
2818 }, 3185 },
2819 }, 3186 },
2820 "sa-east-1": endpoint{}, 3187 "me-south-1": endpoint{},
3188 "sa-east-1": endpoint{},
2821 "us-east-1": endpoint{ 3189 "us-east-1": endpoint{
2822 SSLCommonName: "queue.{dnsSuffix}", 3190 SSLCommonName: "queue.{dnsSuffix}",
2823 }, 3191 },
@@ -2841,6 +3209,7 @@ var awsPartition = partition{
2841 "eu-west-1": endpoint{}, 3209 "eu-west-1": endpoint{},
2842 "eu-west-2": endpoint{}, 3210 "eu-west-2": endpoint{},
2843 "eu-west-3": endpoint{}, 3211 "eu-west-3": endpoint{},
3212 "me-south-1": endpoint{},
2844 "sa-east-1": endpoint{}, 3213 "sa-east-1": endpoint{},
2845 "us-east-1": endpoint{}, 3214 "us-east-1": endpoint{},
2846 "us-east-2": endpoint{}, 3215 "us-east-2": endpoint{},
@@ -2863,6 +3232,7 @@ var awsPartition = partition{
2863 "eu-west-1": endpoint{}, 3232 "eu-west-1": endpoint{},
2864 "eu-west-2": endpoint{}, 3233 "eu-west-2": endpoint{},
2865 "eu-west-3": endpoint{}, 3234 "eu-west-3": endpoint{},
3235 "me-south-1": endpoint{},
2866 "sa-east-1": endpoint{}, 3236 "sa-east-1": endpoint{},
2867 "us-east-1": endpoint{}, 3237 "us-east-1": endpoint{},
2868 "us-east-2": endpoint{}, 3238 "us-east-2": endpoint{},
@@ -2884,6 +3254,7 @@ var awsPartition = partition{
2884 "eu-west-1": endpoint{}, 3254 "eu-west-1": endpoint{},
2885 "eu-west-2": endpoint{}, 3255 "eu-west-2": endpoint{},
2886 "eu-west-3": endpoint{}, 3256 "eu-west-3": endpoint{},
3257 "me-south-1": endpoint{},
2887 "sa-east-1": endpoint{}, 3258 "sa-east-1": endpoint{},
2888 "us-east-1": endpoint{}, 3259 "us-east-1": endpoint{},
2889 "us-east-2": endpoint{}, 3260 "us-east-2": endpoint{},
@@ -2905,11 +3276,17 @@ var awsPartition = partition{
2905 "ap-southeast-1": endpoint{}, 3276 "ap-southeast-1": endpoint{},
2906 "ap-southeast-2": endpoint{}, 3277 "ap-southeast-2": endpoint{},
2907 "ca-central-1": endpoint{}, 3278 "ca-central-1": endpoint{},
2908 "eu-central-1": endpoint{}, 3279 "ca-central-1-fips": endpoint{
2909 "eu-north-1": endpoint{}, 3280 Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
2910 "eu-west-1": endpoint{}, 3281 CredentialScope: credentialScope{
2911 "eu-west-2": endpoint{}, 3282 Region: "ca-central-1",
2912 "eu-west-3": endpoint{}, 3283 },
3284 },
3285 "eu-central-1": endpoint{},
3286 "eu-north-1": endpoint{},
3287 "eu-west-1": endpoint{},
3288 "eu-west-2": endpoint{},
3289 "eu-west-3": endpoint{},
2913 "local": endpoint{ 3290 "local": endpoint{
2914 Hostname: "localhost:8000", 3291 Hostname: "localhost:8000",
2915 Protocols: []string{"http"}, 3292 Protocols: []string{"http"},
@@ -2917,11 +3294,36 @@ var awsPartition = partition{
2917 Region: "us-east-1", 3294 Region: "us-east-1",
2918 }, 3295 },
2919 }, 3296 },
2920 "sa-east-1": endpoint{}, 3297 "me-south-1": endpoint{},
2921 "us-east-1": endpoint{}, 3298 "sa-east-1": endpoint{},
3299 "us-east-1": endpoint{},
3300 "us-east-1-fips": endpoint{
3301 Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
3302 CredentialScope: credentialScope{
3303 Region: "us-east-1",
3304 },
3305 },
2922 "us-east-2": endpoint{}, 3306 "us-east-2": endpoint{},
3307 "us-east-2-fips": endpoint{
3308 Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
3309 CredentialScope: credentialScope{
3310 Region: "us-east-2",
3311 },
3312 },
2923 "us-west-1": endpoint{}, 3313 "us-west-1": endpoint{},
3314 "us-west-1-fips": endpoint{
3315 Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
3316 CredentialScope: credentialScope{
3317 Region: "us-west-1",
3318 },
3319 },
2924 "us-west-2": endpoint{}, 3320 "us-west-2": endpoint{},
3321 "us-west-2-fips": endpoint{
3322 Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
3323 CredentialScope: credentialScope{
3324 Region: "us-west-2",
3325 },
3326 },
2925 }, 3327 },
2926 }, 3328 },
2927 "sts": service{ 3329 "sts": service{
@@ -2956,8 +3358,14 @@ var awsPartition = partition{
2956 "eu-west-1": endpoint{}, 3358 "eu-west-1": endpoint{},
2957 "eu-west-2": endpoint{}, 3359 "eu-west-2": endpoint{},
2958 "eu-west-3": endpoint{}, 3360 "eu-west-3": endpoint{},
2959 "sa-east-1": endpoint{}, 3361 "me-south-1": endpoint{
2960 "us-east-1": endpoint{}, 3362 Hostname: "sts.me-south-1.amazonaws.com",
3363 CredentialScope: credentialScope{
3364 Region: "me-south-1",
3365 },
3366 },
3367 "sa-east-1": endpoint{},
3368 "us-east-1": endpoint{},
2961 "us-east-1-fips": endpoint{ 3369 "us-east-1-fips": endpoint{
2962 Hostname: "sts-fips.us-east-1.amazonaws.com", 3370 Hostname: "sts-fips.us-east-1.amazonaws.com",
2963 CredentialScope: credentialScope{ 3371 CredentialScope: credentialScope{
@@ -2988,9 +3396,15 @@ var awsPartition = partition{
2988 }, 3396 },
2989 }, 3397 },
2990 "support": service{ 3398 "support": service{
3399 PartitionEndpoint: "aws-global",
2991 3400
2992 Endpoints: endpoints{ 3401 Endpoints: endpoints{
2993 "us-east-1": endpoint{}, 3402 "aws-global": endpoint{
3403 Hostname: "support.us-east-1.amazonaws.com",
3404 CredentialScope: credentialScope{
3405 Region: "us-east-1",
3406 },
3407 },
2994 }, 3408 },
2995 }, 3409 },
2996 "swf": service{ 3410 "swf": service{
@@ -3008,6 +3422,7 @@ var awsPartition = partition{
3008 "eu-west-1": endpoint{}, 3422 "eu-west-1": endpoint{},
3009 "eu-west-2": endpoint{}, 3423 "eu-west-2": endpoint{},
3010 "eu-west-3": endpoint{}, 3424 "eu-west-3": endpoint{},
3425 "me-south-1": endpoint{},
3011 "sa-east-1": endpoint{}, 3426 "sa-east-1": endpoint{},
3012 "us-east-1": endpoint{}, 3427 "us-east-1": endpoint{},
3013 "us-east-2": endpoint{}, 3428 "us-east-2": endpoint{},
@@ -3030,6 +3445,7 @@ var awsPartition = partition{
3030 "eu-west-1": endpoint{}, 3445 "eu-west-1": endpoint{},
3031 "eu-west-2": endpoint{}, 3446 "eu-west-2": endpoint{},
3032 "eu-west-3": endpoint{}, 3447 "eu-west-3": endpoint{},
3448 "me-south-1": endpoint{},
3033 "sa-east-1": endpoint{}, 3449 "sa-east-1": endpoint{},
3034 "us-east-1": endpoint{}, 3450 "us-east-1": endpoint{},
3035 "us-east-2": endpoint{}, 3451 "us-east-2": endpoint{},
@@ -3061,7 +3477,11 @@ var awsPartition = partition{
3061 Protocols: []string{"https"}, 3477 Protocols: []string{"https"},
3062 }, 3478 },
3063 Endpoints: endpoints{ 3479 Endpoints: endpoints{
3480 "ap-northeast-1": endpoint{},
3064 "ap-northeast-2": endpoint{}, 3481 "ap-northeast-2": endpoint{},
3482 "ap-south-1": endpoint{},
3483 "ap-southeast-1": endpoint{},
3484 "ca-central-1": endpoint{},
3065 "eu-central-1": endpoint{}, 3485 "eu-central-1": endpoint{},
3066 "eu-west-1": endpoint{}, 3486 "eu-west-1": endpoint{},
3067 "us-east-1": endpoint{}, 3487 "us-east-1": endpoint{},
@@ -3105,12 +3525,16 @@ var awsPartition = partition{
3105 Endpoints: endpoints{ 3525 Endpoints: endpoints{
3106 "ap-northeast-1": endpoint{}, 3526 "ap-northeast-1": endpoint{},
3107 "ap-northeast-2": endpoint{}, 3527 "ap-northeast-2": endpoint{},
3528 "ap-south-1": endpoint{},
3108 "ap-southeast-1": endpoint{}, 3529 "ap-southeast-1": endpoint{},
3109 "ap-southeast-2": endpoint{}, 3530 "ap-southeast-2": endpoint{},
3531 "ca-central-1": endpoint{},
3110 "eu-central-1": endpoint{}, 3532 "eu-central-1": endpoint{},
3111 "eu-north-1": endpoint{}, 3533 "eu-north-1": endpoint{},
3112 "eu-west-1": endpoint{}, 3534 "eu-west-1": endpoint{},
3113 "eu-west-2": endpoint{}, 3535 "eu-west-2": endpoint{},
3536 "eu-west-3": endpoint{},
3537 "sa-east-1": endpoint{},
3114 "us-east-1": endpoint{}, 3538 "us-east-1": endpoint{},
3115 "us-east-2": endpoint{}, 3539 "us-east-2": endpoint{},
3116 "us-west-1": endpoint{}, 3540 "us-west-1": endpoint{},
@@ -3157,6 +3581,7 @@ var awsPartition = partition{
3157 "xray": service{ 3581 "xray": service{
3158 3582
3159 Endpoints: endpoints{ 3583 Endpoints: endpoints{
3584 "ap-east-1": endpoint{},
3160 "ap-northeast-1": endpoint{}, 3585 "ap-northeast-1": endpoint{},
3161 "ap-northeast-2": endpoint{}, 3586 "ap-northeast-2": endpoint{},
3162 "ap-south-1": endpoint{}, 3587 "ap-south-1": endpoint{},
@@ -3433,6 +3858,15 @@ var awscnPartition = partition{
3433 "cn-northwest-1": endpoint{}, 3858 "cn-northwest-1": endpoint{},
3434 }, 3859 },
3435 }, 3860 },
3861 "greengrass": service{
3862 IsRegionalized: boxedTrue,
3863 Defaults: endpoint{
3864 Protocols: []string{"https"},
3865 },
3866 Endpoints: endpoints{
3867 "cn-north-1": endpoint{},
3868 },
3869 },
3436 "iam": service{ 3870 "iam": service{
3437 PartitionEndpoint: "aws-cn-global", 3871 PartitionEndpoint: "aws-cn-global",
3438 IsRegionalized: boxedFalse, 3872 IsRegionalized: boxedFalse,
@@ -3463,6 +3897,13 @@ var awscnPartition = partition{
3463 "cn-northwest-1": endpoint{}, 3897 "cn-northwest-1": endpoint{},
3464 }, 3898 },
3465 }, 3899 },
3900 "kms": service{
3901
3902 Endpoints: endpoints{
3903 "cn-north-1": endpoint{},
3904 "cn-northwest-1": endpoint{},
3905 },
3906 },
3466 "lambda": service{ 3907 "lambda": service{
3467 3908
3468 Endpoints: endpoints{ 3909 Endpoints: endpoints{
@@ -3470,6 +3911,13 @@ var awscnPartition = partition{
3470 "cn-northwest-1": endpoint{}, 3911 "cn-northwest-1": endpoint{},
3471 }, 3912 },
3472 }, 3913 },
3914 "license-manager": service{
3915
3916 Endpoints: endpoints{
3917 "cn-north-1": endpoint{},
3918 "cn-northwest-1": endpoint{},
3919 },
3920 },
3473 "logs": service{ 3921 "logs": service{
3474 3922
3475 Endpoints: endpoints{ 3923 Endpoints: endpoints{
@@ -3480,7 +3928,12 @@ var awscnPartition = partition{
3480 "mediaconvert": service{ 3928 "mediaconvert": service{
3481 3929
3482 Endpoints: endpoints{ 3930 Endpoints: endpoints{
3483 "cn-northwest-1": endpoint{}, 3931 "cn-northwest-1": endpoint{
3932 Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn",
3933 CredentialScope: credentialScope{
3934 Region: "cn-northwest-1",
3935 },
3936 },
3484 }, 3937 },
3485 }, 3938 },
3486 "monitoring": service{ 3939 "monitoring": service{
@@ -3615,6 +4068,18 @@ var awscnPartition = partition{
3615 "cn-northwest-1": endpoint{}, 4068 "cn-northwest-1": endpoint{},
3616 }, 4069 },
3617 }, 4070 },
4071 "support": service{
4072 PartitionEndpoint: "aws-cn-global",
4073
4074 Endpoints: endpoints{
4075 "aws-cn-global": endpoint{
4076 Hostname: "support.cn-north-1.amazonaws.com",
4077 CredentialScope: credentialScope{
4078 Region: "cn-north-1",
4079 },
4080 },
4081 },
4082 },
3618 "swf": service{ 4083 "swf": service{
3619 4084
3620 Endpoints: endpoints{ 4085 Endpoints: endpoints{
@@ -3668,6 +4133,15 @@ var awsusgovPartition = partition{
3668 "us-gov-west-1": endpoint{}, 4133 "us-gov-west-1": endpoint{},
3669 }, 4134 },
3670 }, 4135 },
4136 "acm-pca": service{
4137 Defaults: endpoint{
4138 Protocols: []string{"https"},
4139 },
4140 Endpoints: endpoints{
4141 "us-gov-east-1": endpoint{},
4142 "us-gov-west-1": endpoint{},
4143 },
4144 },
3671 "api.ecr": service{ 4145 "api.ecr": service{
3672 4146
3673 Endpoints: endpoints{ 4147 Endpoints: endpoints{
@@ -3713,6 +4187,7 @@ var awsusgovPartition = partition{
3713 "athena": service{ 4187 "athena": service{
3714 4188
3715 Endpoints: endpoints{ 4189 Endpoints: endpoints{
4190 "us-gov-east-1": endpoint{},
3716 "us-gov-west-1": endpoint{}, 4191 "us-gov-west-1": endpoint{},
3717 }, 4192 },
3718 }, 4193 },
@@ -3762,9 +4237,17 @@ var awsusgovPartition = partition{
3762 "us-gov-west-1": endpoint{}, 4237 "us-gov-west-1": endpoint{},
3763 }, 4238 },
3764 }, 4239 },
4240 "codebuild": service{
4241
4242 Endpoints: endpoints{
4243 "us-gov-east-1": endpoint{},
4244 "us-gov-west-1": endpoint{},
4245 },
4246 },
3765 "codecommit": service{ 4247 "codecommit": service{
3766 4248
3767 Endpoints: endpoints{ 4249 Endpoints: endpoints{
4250 "us-gov-east-1": endpoint{},
3768 "us-gov-west-1": endpoint{}, 4251 "us-gov-west-1": endpoint{},
3769 }, 4252 },
3770 }, 4253 },
@@ -3802,6 +4285,12 @@ var awsusgovPartition = partition{
3802 "us-gov-west-1": endpoint{}, 4285 "us-gov-west-1": endpoint{},
3803 }, 4286 },
3804 }, 4287 },
4288 "datasync": service{
4289
4290 Endpoints: endpoints{
4291 "us-gov-west-1": endpoint{},
4292 },
4293 },
3805 "directconnect": service{ 4294 "directconnect": service{
3806 4295
3807 Endpoints: endpoints{ 4296 Endpoints: endpoints{
@@ -3819,6 +4308,7 @@ var awsusgovPartition = partition{
3819 "ds": service{ 4308 "ds": service{
3820 4309
3821 Endpoints: endpoints{ 4310 Endpoints: endpoints{
4311 "us-gov-east-1": endpoint{},
3822 "us-gov-west-1": endpoint{}, 4312 "us-gov-west-1": endpoint{},
3823 }, 4313 },
3824 }, 4314 },
@@ -3826,6 +4316,12 @@ var awsusgovPartition = partition{
3826 4316
3827 Endpoints: endpoints{ 4317 Endpoints: endpoints{
3828 "us-gov-east-1": endpoint{}, 4318 "us-gov-east-1": endpoint{},
4319 "us-gov-east-1-fips": endpoint{
4320 Hostname: "dynamodb.us-gov-east-1.amazonaws.com",
4321 CredentialScope: credentialScope{
4322 Region: "us-gov-east-1",
4323 },
4324 },
3829 "us-gov-west-1": endpoint{}, 4325 "us-gov-west-1": endpoint{},
3830 "us-gov-west-1-fips": endpoint{ 4326 "us-gov-west-1-fips": endpoint{
3831 Hostname: "dynamodb.us-gov-west-1.amazonaws.com", 4327 Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
@@ -3927,6 +4423,7 @@ var awsusgovPartition = partition{
3927 "firehose": service{ 4423 "firehose": service{
3928 4424
3929 Endpoints: endpoints{ 4425 Endpoints: endpoints{
4426 "us-gov-east-1": endpoint{},
3930 "us-gov-west-1": endpoint{}, 4427 "us-gov-west-1": endpoint{},
3931 }, 4428 },
3932 }, 4429 },
@@ -3942,6 +4439,16 @@ var awsusgovPartition = partition{
3942 "glue": service{ 4439 "glue": service{
3943 4440
3944 Endpoints: endpoints{ 4441 Endpoints: endpoints{
4442 "us-gov-east-1": endpoint{},
4443 "us-gov-west-1": endpoint{},
4444 },
4445 },
4446 "greengrass": service{
4447 IsRegionalized: boxedTrue,
4448 Defaults: endpoint{
4449 Protocols: []string{"https"},
4450 },
4451 Endpoints: endpoints{
3945 "us-gov-west-1": endpoint{}, 4452 "us-gov-west-1": endpoint{},
3946 }, 4453 },
3947 }, 4454 },
@@ -4048,12 +4555,31 @@ var awsusgovPartition = partition{
4048 "us-gov-west-1": endpoint{}, 4555 "us-gov-west-1": endpoint{},
4049 }, 4556 },
4050 }, 4557 },
4558 "organizations": service{
4559 PartitionEndpoint: "aws-us-gov-global",
4560 IsRegionalized: boxedFalse,
4561
4562 Endpoints: endpoints{
4563 "aws-us-gov-global": endpoint{
4564 Hostname: "organizations.us-gov-west-1.amazonaws.com",
4565 CredentialScope: credentialScope{
4566 Region: "us-gov-west-1",
4567 },
4568 },
4569 },
4570 },
4051 "polly": service{ 4571 "polly": service{
4052 4572
4053 Endpoints: endpoints{ 4573 Endpoints: endpoints{
4054 "us-gov-west-1": endpoint{}, 4574 "us-gov-west-1": endpoint{},
4055 }, 4575 },
4056 }, 4576 },
4577 "ram": service{
4578
4579 Endpoints: endpoints{
4580 "us-gov-west-1": endpoint{},
4581 },
4582 },
4057 "rds": service{ 4583 "rds": service{
4058 4584
4059 Endpoints: endpoints{ 4585 Endpoints: endpoints{
@@ -4137,6 +4663,28 @@ var awsusgovPartition = partition{
4137 }, 4663 },
4138 }, 4664 },
4139 }, 4665 },
4666 "secretsmanager": service{
4667
4668 Endpoints: endpoints{
4669 "us-gov-west-1": endpoint{},
4670 "us-gov-west-1-fips": endpoint{
4671 Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com",
4672 CredentialScope: credentialScope{
4673 Region: "us-gov-west-1",
4674 },
4675 },
4676 },
4677 },
4678 "serverlessrepo": service{
4679 Defaults: endpoint{
4680 Protocols: []string{"https"},
4681 },
4682 Endpoints: endpoints{
4683 "us-gov-west-1": endpoint{
4684 Protocols: []string{"https"},
4685 },
4686 },
4687 },
4140 "sms": service{ 4688 "sms": service{
4141 4689
4142 Endpoints: endpoints{ 4690 Endpoints: endpoints{
@@ -4198,6 +4746,12 @@ var awsusgovPartition = partition{
4198 }, 4746 },
4199 Endpoints: endpoints{ 4747 Endpoints: endpoints{
4200 "us-gov-east-1": endpoint{}, 4748 "us-gov-east-1": endpoint{},
4749 "us-gov-east-1-fips": endpoint{
4750 Hostname: "dynamodb.us-gov-east-1.amazonaws.com",
4751 CredentialScope: credentialScope{
4752 Region: "us-gov-east-1",
4753 },
4754 },
4201 "us-gov-west-1": endpoint{}, 4755 "us-gov-west-1": endpoint{},
4202 "us-gov-west-1-fips": endpoint{ 4756 "us-gov-west-1-fips": endpoint{
4203 Hostname: "dynamodb.us-gov-west-1.amazonaws.com", 4757 Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
index 000dd79..ca8fc82 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
@@ -2,7 +2,7 @@ package endpoints
2 2
3// Service identifiers 3// Service identifiers
4// 4//
5// Deprecated: Use client package's EndpointID value instead of these 5// Deprecated: Use client package's EndpointsID value instead of these
6// ServiceIDs. These IDs are not maintained, and are out of date. 6// ServiceIDs. These IDs are not maintained, and are out of date.
7const ( 7const (
8 A4bServiceID = "a4b" // A4b. 8 A4bServiceID = "a4b" // A4b.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
index 271da43..d9b37f4 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
@@ -1,18 +1,17 @@
1// +build !appengine,!plan9
2
3package request 1package request
4 2
5import ( 3import (
6 "net" 4 "strings"
7 "os"
8 "syscall"
9) 5)
10 6
11func isErrConnectionReset(err error) bool { 7func isErrConnectionReset(err error) bool {
12 if opErr, ok := err.(*net.OpError); ok { 8 if strings.Contains(err.Error(), "read: connection reset") {
13 if sysErr, ok := opErr.Err.(*os.SyscallError); ok { 9 return false
14 return sysErr.Err == syscall.ECONNRESET 10 }
15 } 11
12 if strings.Contains(err.Error(), "connection reset") ||
13 strings.Contains(err.Error(), "broken pipe") {
14 return true
16 } 15 }
17 16
18 return false 17 return false
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
deleted file mode 100644
index daf9eca..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
+++ /dev/null
@@ -1,11 +0,0 @@
1// +build appengine plan9
2
3package request
4
5import (
6 "strings"
7)
8
9func isErrConnectionReset(err error) bool {
10 return strings.Contains(err.Error(), "connection reset")
11}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
index 8ef8548..627ec72 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -59,6 +59,51 @@ func (h *Handlers) Clear() {
59 h.Complete.Clear() 59 h.Complete.Clear()
60} 60}
61 61
62// IsEmpty returns if there are no handlers in any of the handlerlists.
63func (h *Handlers) IsEmpty() bool {
64 if h.Validate.Len() != 0 {
65 return false
66 }
67 if h.Build.Len() != 0 {
68 return false
69 }
70 if h.Send.Len() != 0 {
71 return false
72 }
73 if h.Sign.Len() != 0 {
74 return false
75 }
76 if h.Unmarshal.Len() != 0 {
77 return false
78 }
79 if h.UnmarshalStream.Len() != 0 {
80 return false
81 }
82 if h.UnmarshalMeta.Len() != 0 {
83 return false
84 }
85 if h.UnmarshalError.Len() != 0 {
86 return false
87 }
88 if h.ValidateResponse.Len() != 0 {
89 return false
90 }
91 if h.Retry.Len() != 0 {
92 return false
93 }
94 if h.AfterRetry.Len() != 0 {
95 return false
96 }
97 if h.CompleteAttempt.Len() != 0 {
98 return false
99 }
100 if h.Complete.Len() != 0 {
101 return false
102 }
103
104 return true
105}
106
62// A HandlerListRunItem represents an entry in the HandlerList which 107// A HandlerListRunItem represents an entry in the HandlerList which
63// is being run. 108// is being run.
64type HandlerListRunItem struct { 109type HandlerListRunItem struct {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
index b0c2ef4..9370fa5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
@@ -15,12 +15,15 @@ type offsetReader struct {
15 closed bool 15 closed bool
16} 16}
17 17
18func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { 18func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) {
19 reader := &offsetReader{} 19 reader := &offsetReader{}
20 buf.Seek(offset, sdkio.SeekStart) 20 _, err := buf.Seek(offset, sdkio.SeekStart)
21 if err != nil {
22 return nil, err
23 }
21 24
22 reader.buf = buf 25 reader.buf = buf
23 return reader 26 return reader, nil
24} 27}
25 28
26// Close will close the instance of the offset reader's access to 29// Close will close the instance of the offset reader's access to
@@ -54,7 +57,9 @@ func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
54 57
55// CloseAndCopy will return a new offsetReader with a copy of the old buffer 58// CloseAndCopy will return a new offsetReader with a copy of the old buffer
56// and close the old buffer. 59// and close the old buffer.
57func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { 60func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) {
58 o.Close() 61 if err := o.Close(); err != nil {
62 return nil, err
63 }
59 return newOffsetReader(o.buf, offset) 64 return newOffsetReader(o.buf, offset)
60} 65}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
index 8f2eb3e..e7c9b2b 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -4,6 +4,7 @@ import (
4 "bytes" 4 "bytes"
5 "fmt" 5 "fmt"
6 "io" 6 "io"
7 "net"
7 "net/http" 8 "net/http"
8 "net/url" 9 "net/url"
9 "reflect" 10 "reflect"
@@ -231,6 +232,10 @@ func (r *Request) WillRetry() bool {
231 return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() 232 return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
232} 233}
233 234
235func fmtAttemptCount(retryCount, maxRetries int) string {
236 return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries)
237}
238
234// ParamsFilled returns if the request's parameters have been populated 239// ParamsFilled returns if the request's parameters have been populated
235// and the parameters are valid. False is returned if no parameters are 240// and the parameters are valid. False is returned if no parameters are
236// provided or invalid. 241// provided or invalid.
@@ -259,7 +264,18 @@ func (r *Request) SetStringBody(s string) {
259// SetReaderBody will set the request's body reader. 264// SetReaderBody will set the request's body reader.
260func (r *Request) SetReaderBody(reader io.ReadSeeker) { 265func (r *Request) SetReaderBody(reader io.ReadSeeker) {
261 r.Body = reader 266 r.Body = reader
262 r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. 267
268 if aws.IsReaderSeekable(reader) {
269 var err error
270 // Get the Bodies current offset so retries will start from the same
271 // initial position.
272 r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent)
273 if err != nil {
274 r.Error = awserr.New(ErrCodeSerialization,
275 "failed to determine start of request body", err)
276 return
277 }
278 }
263 r.ResetBody() 279 r.ResetBody()
264} 280}
265 281
@@ -330,16 +346,15 @@ func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, err
330 return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil 346 return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
331} 347}
332 348
333func debugLogReqError(r *Request, stage string, retrying bool, err error) { 349const (
350 notRetrying = "not retrying"
351)
352
353func debugLogReqError(r *Request, stage, retryStr string, err error) {
334 if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { 354 if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
335 return 355 return
336 } 356 }
337 357
338 retryStr := "not retrying"
339 if retrying {
340 retryStr = "will retry"
341 }
342
343 r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", 358 r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
344 stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) 359 stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
345} 360}
@@ -358,12 +373,12 @@ func (r *Request) Build() error {
358 if !r.built { 373 if !r.built {
359 r.Handlers.Validate.Run(r) 374 r.Handlers.Validate.Run(r)
360 if r.Error != nil { 375 if r.Error != nil {
361 debugLogReqError(r, "Validate Request", false, r.Error) 376 debugLogReqError(r, "Validate Request", notRetrying, r.Error)
362 return r.Error 377 return r.Error
363 } 378 }
364 r.Handlers.Build.Run(r) 379 r.Handlers.Build.Run(r)
365 if r.Error != nil { 380 if r.Error != nil {
366 debugLogReqError(r, "Build Request", false, r.Error) 381 debugLogReqError(r, "Build Request", notRetrying, r.Error)
367 return r.Error 382 return r.Error
368 } 383 }
369 r.built = true 384 r.built = true
@@ -379,7 +394,7 @@ func (r *Request) Build() error {
379func (r *Request) Sign() error { 394func (r *Request) Sign() error {
380 r.Build() 395 r.Build()
381 if r.Error != nil { 396 if r.Error != nil {
382 debugLogReqError(r, "Build Request", false, r.Error) 397 debugLogReqError(r, "Build Request", notRetrying, r.Error)
383 return r.Error 398 return r.Error
384 } 399 }
385 400
@@ -387,12 +402,16 @@ func (r *Request) Sign() error {
387 return r.Error 402 return r.Error
388} 403}
389 404
390func (r *Request) getNextRequestBody() (io.ReadCloser, error) { 405func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) {
391 if r.safeBody != nil { 406 if r.safeBody != nil {
392 r.safeBody.Close() 407 r.safeBody.Close()
393 } 408 }
394 409
395 r.safeBody = newOffsetReader(r.Body, r.BodyStart) 410 r.safeBody, err = newOffsetReader(r.Body, r.BodyStart)
411 if err != nil {
412 return nil, awserr.New(ErrCodeSerialization,
413 "failed to get next request body reader", err)
414 }
396 415
397 // Go 1.8 tightened and clarified the rules code needs to use when building 416 // Go 1.8 tightened and clarified the rules code needs to use when building
398 // requests with the http package. Go 1.8 removed the automatic detection 417 // requests with the http package. Go 1.8 removed the automatic detection
@@ -409,10 +428,10 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
409 // Related golang/go#18257 428 // Related golang/go#18257
410 l, err := aws.SeekerLen(r.Body) 429 l, err := aws.SeekerLen(r.Body)
411 if err != nil { 430 if err != nil {
412 return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) 431 return nil, awserr.New(ErrCodeSerialization,
432 "failed to compute request body size", err)
413 } 433 }
414 434
415 var body io.ReadCloser
416 if l == 0 { 435 if l == 0 {
417 body = NoBody 436 body = NoBody
418 } else if l > 0 { 437 } else if l > 0 {
@@ -473,13 +492,13 @@ func (r *Request) Send() error {
473 r.AttemptTime = time.Now() 492 r.AttemptTime = time.Now()
474 493
475 if err := r.Sign(); err != nil { 494 if err := r.Sign(); err != nil {
476 debugLogReqError(r, "Sign Request", false, err) 495 debugLogReqError(r, "Sign Request", notRetrying, err)
477 return err 496 return err
478 } 497 }
479 498
480 if err := r.sendRequest(); err == nil { 499 if err := r.sendRequest(); err == nil {
481 return nil 500 return nil
482 } else if !shouldRetryCancel(r.Error) { 501 } else if !shouldRetryError(r.Error) {
483 return err 502 return err
484 } else { 503 } else {
485 r.Handlers.Retry.Run(r) 504 r.Handlers.Retry.Run(r)
@@ -489,13 +508,16 @@ func (r *Request) Send() error {
489 return r.Error 508 return r.Error
490 } 509 }
491 510
492 r.prepareRetry() 511 if err := r.prepareRetry(); err != nil {
512 r.Error = err
513 return err
514 }
493 continue 515 continue
494 } 516 }
495 } 517 }
496} 518}
497 519
498func (r *Request) prepareRetry() { 520func (r *Request) prepareRetry() error {
499 if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { 521 if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
500 r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", 522 r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
501 r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) 523 r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
@@ -506,12 +528,19 @@ func (r *Request) prepareRetry() {
506 // the request's body even though the Client's Do returned. 528 // the request's body even though the Client's Do returned.
507 r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) 529 r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
508 r.ResetBody() 530 r.ResetBody()
531 if err := r.Error; err != nil {
532 return awserr.New(ErrCodeSerialization,
533 "failed to prepare body for retry", err)
534
535 }
509 536
510 // Closing response body to ensure that no response body is leaked 537 // Closing response body to ensure that no response body is leaked
511 // between retry attempts. 538 // between retry attempts.
512 if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { 539 if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
513 r.HTTPResponse.Body.Close() 540 r.HTTPResponse.Body.Close()
514 } 541 }
542
543 return nil
515} 544}
516 545
517func (r *Request) sendRequest() (sendErr error) { 546func (r *Request) sendRequest() (sendErr error) {
@@ -520,7 +549,9 @@ func (r *Request) sendRequest() (sendErr error) {
520 r.Retryable = nil 549 r.Retryable = nil
521 r.Handlers.Send.Run(r) 550 r.Handlers.Send.Run(r)
522 if r.Error != nil { 551 if r.Error != nil {
523 debugLogReqError(r, "Send Request", r.WillRetry(), r.Error) 552 debugLogReqError(r, "Send Request",
553 fmtAttemptCount(r.RetryCount, r.MaxRetries()),
554 r.Error)
524 return r.Error 555 return r.Error
525 } 556 }
526 557
@@ -528,13 +559,17 @@ func (r *Request) sendRequest() (sendErr error) {
528 r.Handlers.ValidateResponse.Run(r) 559 r.Handlers.ValidateResponse.Run(r)
529 if r.Error != nil { 560 if r.Error != nil {
530 r.Handlers.UnmarshalError.Run(r) 561 r.Handlers.UnmarshalError.Run(r)
531 debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error) 562 debugLogReqError(r, "Validate Response",
563 fmtAttemptCount(r.RetryCount, r.MaxRetries()),
564 r.Error)
532 return r.Error 565 return r.Error
533 } 566 }
534 567
535 r.Handlers.Unmarshal.Run(r) 568 r.Handlers.Unmarshal.Run(r)
536 if r.Error != nil { 569 if r.Error != nil {
537 debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error) 570 debugLogReqError(r, "Unmarshal Response",
571 fmtAttemptCount(r.RetryCount, r.MaxRetries()),
572 r.Error)
538 return r.Error 573 return r.Error
539 } 574 }
540 575
@@ -565,13 +600,13 @@ type temporary interface {
565 Temporary() bool 600 Temporary() bool
566} 601}
567 602
568func shouldRetryCancel(err error) bool { 603func shouldRetryError(origErr error) bool {
569 switch err := err.(type) { 604 switch err := origErr.(type) {
570 case awserr.Error: 605 case awserr.Error:
571 if err.Code() == CanceledErrorCode { 606 if err.Code() == CanceledErrorCode {
572 return false 607 return false
573 } 608 }
574 return shouldRetryCancel(err.OrigErr()) 609 return shouldRetryError(err.OrigErr())
575 case *url.Error: 610 case *url.Error:
576 if strings.Contains(err.Error(), "connection refused") { 611 if strings.Contains(err.Error(), "connection refused") {
577 // Refused connections should be retried as the service may not yet 612 // Refused connections should be retried as the service may not yet
@@ -581,14 +616,17 @@ func shouldRetryCancel(err error) bool {
581 } 616 }
582 // *url.Error only implements Temporary after golang 1.6 but since 617 // *url.Error only implements Temporary after golang 1.6 but since
583 // url.Error only wraps the error: 618 // url.Error only wraps the error:
584 return shouldRetryCancel(err.Err) 619 return shouldRetryError(err.Err)
585 case temporary: 620 case temporary:
621 if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
622 return true
623 }
586 // If the error is temporary, we want to allow continuation of the 624 // If the error is temporary, we want to allow continuation of the
587 // retry process 625 // retry process
588 return err.Temporary() 626 return err.Temporary() || isErrConnectionReset(origErr)
589 case nil: 627 case nil:
590 // `awserr.Error.OrigErr()` can be nil, meaning there was an error but 628 // `awserr.Error.OrigErr()` can be nil, meaning there was an error but
591 // because we don't know the cause, it is marked as retriable. See 629 // because we don't know the cause, it is marked as retryable. See
592 // TestRequest4xxUnretryable for an example. 630 // TestRequest4xxUnretryable for an example.
593 return true 631 return true
594 default: 632 default:
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
index 7c6a800..de1292f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
@@ -4,6 +4,8 @@ package request
4 4
5import ( 5import (
6 "net/http" 6 "net/http"
7
8 "github.com/aws/aws-sdk-go/aws/awserr"
7) 9)
8 10
9// NoBody is a http.NoBody reader instructing Go HTTP client to not include 11// NoBody is a http.NoBody reader instructing Go HTTP client to not include
@@ -24,7 +26,8 @@ var NoBody = http.NoBody
24func (r *Request) ResetBody() { 26func (r *Request) ResetBody() {
25 body, err := r.getNextRequestBody() 27 body, err := r.getNextRequestBody()
26 if err != nil { 28 if err != nil {
27 r.Error = err 29 r.Error = awserr.New(ErrCodeSerialization,
30 "failed to reset request body", err)
28 return 31 return
29 } 32 }
30 33
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
index a633ed5..f093fc5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -146,7 +146,7 @@ func (r *Request) nextPageTokens() []interface{} {
146 return nil 146 return nil
147 } 147 }
148 case bool: 148 case bool:
149 if v == false { 149 if !v {
150 return nil 150 return nil
151 } 151 }
152 } 152 }
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
new file mode 100644
index 0000000..ce41518
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
@@ -0,0 +1,258 @@
1package session
2
3import (
4 "fmt"
5 "os"
6
7 "github.com/aws/aws-sdk-go/aws"
8 "github.com/aws/aws-sdk-go/aws/awserr"
9 "github.com/aws/aws-sdk-go/aws/credentials"
10 "github.com/aws/aws-sdk-go/aws/credentials/processcreds"
11 "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
12 "github.com/aws/aws-sdk-go/aws/defaults"
13 "github.com/aws/aws-sdk-go/aws/request"
14 "github.com/aws/aws-sdk-go/internal/shareddefaults"
15)
16
17func resolveCredentials(cfg *aws.Config,
18 envCfg envConfig, sharedCfg sharedConfig,
19 handlers request.Handlers,
20 sessOpts Options,
21) (*credentials.Credentials, error) {
22
23 switch {
24 case len(envCfg.Profile) != 0:
25 // User explicitly provided an Profile, so load from shared config
26 // first.
27 return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
28
29 case envCfg.Creds.HasKeys():
30 // Environment credentials
31 return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil
32
33 case len(envCfg.WebIdentityTokenFilePath) != 0:
34 // Web identity token from environment, RoleARN required to also be
35 // set.
36 return assumeWebIdentity(cfg, handlers,
37 envCfg.WebIdentityTokenFilePath,
38 envCfg.RoleARN,
39 envCfg.RoleSessionName,
40 )
41
42 default:
43 // Fallback to the "default" credential resolution chain.
44 return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
45 }
46}
47
48// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but
49// 'AWS_IAM_ROLE_ARN' was not set.
50var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil)
51
52// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_IAM_ROLE_ARN' was set but
53// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set.
54var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil)
55
56func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers,
57 filepath string,
58 roleARN, sessionName string,
59) (*credentials.Credentials, error) {
60
61 if len(filepath) == 0 {
62 return nil, WebIdentityEmptyTokenFilePathErr
63 }
64
65 if len(roleARN) == 0 {
66 return nil, WebIdentityEmptyRoleARNErr
67 }
68
69 creds := stscreds.NewWebIdentityCredentials(
70 &Session{
71 Config: cfg,
72 Handlers: handlers.Copy(),
73 },
74 roleARN,
75 sessionName,
76 filepath,
77 )
78
79 return creds, nil
80}
81
82func resolveCredsFromProfile(cfg *aws.Config,
83 envCfg envConfig, sharedCfg sharedConfig,
84 handlers request.Handlers,
85 sessOpts Options,
86) (creds *credentials.Credentials, err error) {
87
88 switch {
89 case sharedCfg.SourceProfile != nil:
90 // Assume IAM role with credentials source from a different profile.
91 creds, err = resolveCredsFromProfile(cfg, envCfg,
92 *sharedCfg.SourceProfile, handlers, sessOpts,
93 )
94
95 case sharedCfg.Creds.HasKeys():
96 // Static Credentials from Shared Config/Credentials file.
97 creds = credentials.NewStaticCredentialsFromCreds(
98 sharedCfg.Creds,
99 )
100
101 case len(sharedCfg.CredentialProcess) != 0:
102 // Get credentials from CredentialProcess
103 creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
104
105 case len(sharedCfg.CredentialSource) != 0:
106 creds, err = resolveCredsFromSource(cfg, envCfg,
107 sharedCfg, handlers, sessOpts,
108 )
109
110 case len(sharedCfg.WebIdentityTokenFile) != 0:
111 // Credentials from Assume Web Identity token require an IAM Role, and
112 // that roll will be assumed. May be wrapped with another assume role
113 // via SourceProfile.
114 return assumeWebIdentity(cfg, handlers,
115 sharedCfg.WebIdentityTokenFile,
116 sharedCfg.RoleARN,
117 sharedCfg.RoleSessionName,
118 )
119
120 default:
121 // Fallback to default credentials provider, include mock errors for
122 // the credential chain so user can identify why credentials failed to
123 // be retrieved.
124 creds = credentials.NewCredentials(&credentials.ChainProvider{
125 VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
126 Providers: []credentials.Provider{
127 &credProviderError{
128 Err: awserr.New("EnvAccessKeyNotFound",
129 "failed to find credentials in the environment.", nil),
130 },
131 &credProviderError{
132 Err: awserr.New("SharedCredsLoad",
133 fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil),
134 },
135 defaults.RemoteCredProvider(*cfg, handlers),
136 },
137 })
138 }
139 if err != nil {
140 return nil, err
141 }
142
143 if len(sharedCfg.RoleARN) > 0 {
144 cfgCp := *cfg
145 cfgCp.Credentials = creds
146 return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts)
147 }
148
149 return creds, nil
150}
151
152// valid credential source values
153const (
154 credSourceEc2Metadata = "Ec2InstanceMetadata"
155 credSourceEnvironment = "Environment"
156 credSourceECSContainer = "EcsContainer"
157)
158
159func resolveCredsFromSource(cfg *aws.Config,
160 envCfg envConfig, sharedCfg sharedConfig,
161 handlers request.Handlers,
162 sessOpts Options,
163) (creds *credentials.Credentials, err error) {
164
165 switch sharedCfg.CredentialSource {
166 case credSourceEc2Metadata:
167 p := defaults.RemoteCredProvider(*cfg, handlers)
168 creds = credentials.NewCredentials(p)
169
170 case credSourceEnvironment:
171 creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds)
172
173 case credSourceECSContainer:
174 if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
175 return nil, ErrSharedConfigECSContainerEnvVarEmpty
176 }
177
178 p := defaults.RemoteCredProvider(*cfg, handlers)
179 creds = credentials.NewCredentials(p)
180
181 default:
182 return nil, ErrSharedConfigInvalidCredSource
183 }
184
185 return creds, nil
186}
187
188func credsFromAssumeRole(cfg aws.Config,
189 handlers request.Handlers,
190 sharedCfg sharedConfig,
191 sessOpts Options,
192) (*credentials.Credentials, error) {
193
194 if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil {
195 // AssumeRole Token provider is required if doing Assume Role
196 // with MFA.
197 return nil, AssumeRoleTokenProviderNotSetError{}
198 }
199
200 return stscreds.NewCredentials(
201 &Session{
202 Config: &cfg,
203 Handlers: handlers.Copy(),
204 },
205 sharedCfg.RoleARN,
206 func(opt *stscreds.AssumeRoleProvider) {
207 opt.RoleSessionName = sharedCfg.RoleSessionName
208 opt.Duration = sessOpts.AssumeRoleDuration
209
210 // Assume role with external ID
211 if len(sharedCfg.ExternalID) > 0 {
212 opt.ExternalID = aws.String(sharedCfg.ExternalID)
213 }
214
215 // Assume role with MFA
216 if len(sharedCfg.MFASerial) > 0 {
217 opt.SerialNumber = aws.String(sharedCfg.MFASerial)
218 opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
219 }
220 },
221 ), nil
222}
223
224// AssumeRoleTokenProviderNotSetError is an error returned when creating a
225// session when the MFAToken option is not set when shared config is configured
226// load assume a role with an MFA token.
227type AssumeRoleTokenProviderNotSetError struct{}
228
229// Code is the short id of the error.
230func (e AssumeRoleTokenProviderNotSetError) Code() string {
231 return "AssumeRoleTokenProviderNotSetError"
232}
233
234// Message is the description of the error
235func (e AssumeRoleTokenProviderNotSetError) Message() string {
236 return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
237}
238
239// OrigErr is the underlying error that caused the failure.
240func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
241 return nil
242}
243
244// Error satisfies the error interface.
245func (e AssumeRoleTokenProviderNotSetError) Error() string {
246 return awserr.SprintError(e.Code(), e.Message(), "", nil)
247}
248
249type credProviderError struct {
250 Err error
251}
252
253func (c credProviderError) Retrieve() (credentials.Value, error) {
254 return credentials.Value{}, c.Err
255}
256func (c credProviderError) IsExpired() bool {
257 return true
258}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
index e3959b9..3a998d5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -102,18 +102,38 @@ type envConfig struct {
102 CSMEnabled bool 102 CSMEnabled bool
103 CSMPort string 103 CSMPort string
104 CSMClientID string 104 CSMClientID string
105 CSMHost string
105 106
106 enableEndpointDiscovery string
107 // Enables endpoint discovery via environment variables. 107 // Enables endpoint discovery via environment variables.
108 // 108 //
109 // AWS_ENABLE_ENDPOINT_DISCOVERY=true 109 // AWS_ENABLE_ENDPOINT_DISCOVERY=true
110 EnableEndpointDiscovery *bool 110 EnableEndpointDiscovery *bool
111 enableEndpointDiscovery string
112
113 // Specifies the WebIdentity token the SDK should use to assume a role
114 // with.
115 //
116 // AWS_WEB_IDENTITY_TOKEN_FILE=file_path
117 WebIdentityTokenFilePath string
118
119 // Specifies the IAM role arn to use when assuming an role.
120 //
121 // AWS_ROLE_ARN=role_arn
122 RoleARN string
123
124 // Specifies the IAM role session name to use when assuming a role.
125 //
126 // AWS_ROLE_SESSION_NAME=session_name
127 RoleSessionName string
111} 128}
112 129
113var ( 130var (
114 csmEnabledEnvKey = []string{ 131 csmEnabledEnvKey = []string{
115 "AWS_CSM_ENABLED", 132 "AWS_CSM_ENABLED",
116 } 133 }
134 csmHostEnvKey = []string{
135 "AWS_CSM_HOST",
136 }
117 csmPortEnvKey = []string{ 137 csmPortEnvKey = []string{
118 "AWS_CSM_PORT", 138 "AWS_CSM_PORT",
119 } 139 }
@@ -150,6 +170,15 @@ var (
150 sharedConfigFileEnvKey = []string{ 170 sharedConfigFileEnvKey = []string{
151 "AWS_CONFIG_FILE", 171 "AWS_CONFIG_FILE",
152 } 172 }
173 webIdentityTokenFilePathEnvKey = []string{
174 "AWS_WEB_IDENTITY_TOKEN_FILE",
175 }
176 roleARNEnvKey = []string{
177 "AWS_ROLE_ARN",
178 }
179 roleSessionNameEnvKey = []string{
180 "AWS_ROLE_SESSION_NAME",
181 }
153) 182)
154 183
155// loadEnvConfig retrieves the SDK's environment configuration. 184// loadEnvConfig retrieves the SDK's environment configuration.
@@ -178,23 +207,31 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
178 207
179 cfg.EnableSharedConfig = enableSharedConfig 208 cfg.EnableSharedConfig = enableSharedConfig
180 209
181 setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) 210 // Static environment credentials
182 setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) 211 var creds credentials.Value
183 setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) 212 setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey)
213 setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey)
214 setFromEnvVal(&creds.SessionToken, credSessionEnvKey)
215 if creds.HasKeys() {
216 // Require logical grouping of credentials
217 creds.ProviderName = EnvProviderName
218 cfg.Creds = creds
219 }
220
221 // Role Metadata
222 setFromEnvVal(&cfg.RoleARN, roleARNEnvKey)
223 setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey)
224
225 // Web identity environment variables
226 setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey)
184 227
185 // CSM environment variables 228 // CSM environment variables
186 setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) 229 setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey)
230 setFromEnvVal(&cfg.CSMHost, csmHostEnvKey)
187 setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) 231 setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
188 setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) 232 setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
189 cfg.CSMEnabled = len(cfg.csmEnabled) > 0 233 cfg.CSMEnabled = len(cfg.csmEnabled) > 0
190 234
191 // Require logical grouping of credentials
192 if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
193 cfg.Creds = credentials.Value{}
194 } else {
195 cfg.Creds.ProviderName = EnvProviderName
196 }
197
198 regionKeys := regionEnvKeys 235 regionKeys := regionEnvKeys
199 profileKeys := profileEnvKeys 236 profileKeys := profileEnvKeys
200 if !cfg.EnableSharedConfig { 237 if !cfg.EnableSharedConfig {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
index be4b5f0..3a28da5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -8,19 +8,17 @@ import (
8 "io/ioutil" 8 "io/ioutil"
9 "net/http" 9 "net/http"
10 "os" 10 "os"
11 "time"
11 12
12 "github.com/aws/aws-sdk-go/aws" 13 "github.com/aws/aws-sdk-go/aws"
13 "github.com/aws/aws-sdk-go/aws/awserr" 14 "github.com/aws/aws-sdk-go/aws/awserr"
14 "github.com/aws/aws-sdk-go/aws/client" 15 "github.com/aws/aws-sdk-go/aws/client"
15 "github.com/aws/aws-sdk-go/aws/corehandlers" 16 "github.com/aws/aws-sdk-go/aws/corehandlers"
16 "github.com/aws/aws-sdk-go/aws/credentials" 17 "github.com/aws/aws-sdk-go/aws/credentials"
17 "github.com/aws/aws-sdk-go/aws/credentials/processcreds"
18 "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
19 "github.com/aws/aws-sdk-go/aws/csm" 18 "github.com/aws/aws-sdk-go/aws/csm"
20 "github.com/aws/aws-sdk-go/aws/defaults" 19 "github.com/aws/aws-sdk-go/aws/defaults"
21 "github.com/aws/aws-sdk-go/aws/endpoints" 20 "github.com/aws/aws-sdk-go/aws/endpoints"
22 "github.com/aws/aws-sdk-go/aws/request" 21 "github.com/aws/aws-sdk-go/aws/request"
23 "github.com/aws/aws-sdk-go/internal/shareddefaults"
24) 22)
25 23
26const ( 24const (
@@ -107,7 +105,15 @@ func New(cfgs ...*aws.Config) *Session {
107 105
108 s := deprecatedNewSession(cfgs...) 106 s := deprecatedNewSession(cfgs...)
109 if envCfg.CSMEnabled { 107 if envCfg.CSMEnabled {
110 enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) 108 err := enableCSM(&s.Handlers, envCfg.CSMClientID,
109 envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger)
110 if err != nil {
111 err = fmt.Errorf("failed to enable CSM, %v", err)
112 s.Config.Logger.Log("ERROR:", err.Error())
113 s.Handlers.Validate.PushBack(func(r *request.Request) {
114 r.Error = err
115 })
116 }
111 } 117 }
112 118
113 return s 119 return s
@@ -210,6 +216,12 @@ type Options struct {
210 // the config enables assume role wit MFA via the mfa_serial field. 216 // the config enables assume role wit MFA via the mfa_serial field.
211 AssumeRoleTokenProvider func() (string, error) 217 AssumeRoleTokenProvider func() (string, error)
212 218
219 // When the SDK's shared config is configured to assume a role this option
220 // may be provided to set the expiry duration of the STS credentials.
221 // Defaults to 15 minutes if not set as documented in the
222 // stscreds.AssumeRoleProvider.
223 AssumeRoleDuration time.Duration
224
213 // Reader for a custom Credentials Authority (CA) bundle in PEM format that 225 // Reader for a custom Credentials Authority (CA) bundle in PEM format that
214 // the SDK will use instead of the default system's root CA bundle. Use this 226 // the SDK will use instead of the default system's root CA bundle. Use this
215 // only if you want to replace the CA bundle the SDK uses for TLS requests. 227 // only if you want to replace the CA bundle the SDK uses for TLS requests.
@@ -224,6 +236,12 @@ type Options struct {
224 // to also enable this feature. CustomCABundle session option field has priority 236 // to also enable this feature. CustomCABundle session option field has priority
225 // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. 237 // over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
226 CustomCABundle io.Reader 238 CustomCABundle io.Reader
239
240 // The handlers that the session and all API clients will be created with.
241 // This must be a complete set of handlers. Use the defaults.Handlers()
242 // function to initialize this value before changing the handlers to be
243 // used by the SDK.
244 Handlers request.Handlers
227} 245}
228 246
229// NewSessionWithOptions returns a new Session created from SDK defaults, config files, 247// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
@@ -329,27 +347,36 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
329 return s 347 return s
330} 348}
331 349
332func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { 350func enableCSM(handlers *request.Handlers,
333 logger.Log("Enabling CSM") 351 clientID, host, port string,
334 if len(port) == 0 { 352 logger aws.Logger,
335 port = csm.DefaultPort 353) error {
354 if logger != nil {
355 logger.Log("Enabling CSM")
336 } 356 }
337 357
338 r, err := csm.Start(clientID, "127.0.0.1:"+port) 358 r, err := csm.Start(clientID, csm.AddressWithDefaults(host, port))
339 if err != nil { 359 if err != nil {
340 return 360 return err
341 } 361 }
342 r.InjectHandlers(handlers) 362 r.InjectHandlers(handlers)
363
364 return nil
343} 365}
344 366
345func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { 367func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
346 cfg := defaults.Config() 368 cfg := defaults.Config()
347 handlers := defaults.Handlers() 369
370 handlers := opts.Handlers
371 if handlers.IsEmpty() {
372 handlers = defaults.Handlers()
373 }
348 374
349 // Get a merged version of the user provided config to determine if 375 // Get a merged version of the user provided config to determine if
350 // credentials were. 376 // credentials were.
351 userCfg := &aws.Config{} 377 userCfg := &aws.Config{}
352 userCfg.MergeIn(cfgs...) 378 userCfg.MergeIn(cfgs...)
379 cfg.MergeIn(userCfg)
353 380
354 // Ordered config files will be loaded in with later files overwriting 381 // Ordered config files will be loaded in with later files overwriting
355 // previous config file values. 382 // previous config file values.
@@ -366,9 +393,11 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
366 } 393 }
367 394
368 // Load additional config from file(s) 395 // Load additional config from file(s)
369 sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) 396 sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig)
370 if err != nil { 397 if err != nil {
371 return nil, err 398 if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
399 return nil, err
400 }
372 } 401 }
373 402
374 if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { 403 if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
@@ -382,7 +411,11 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
382 411
383 initHandlers(s) 412 initHandlers(s)
384 if envCfg.CSMEnabled { 413 if envCfg.CSMEnabled {
385 enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) 414 err := enableCSM(&s.Handlers, envCfg.CSMClientID,
415 envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger)
416 if err != nil {
417 return nil, err
418 }
386 } 419 }
387 420
388 // Setup HTTP client with custom cert bundle if enabled 421 // Setup HTTP client with custom cert bundle if enabled
@@ -443,9 +476,11 @@ func loadCertPool(r io.Reader) (*x509.CertPool, error) {
443 return p, nil 476 return p, nil
444} 477}
445 478
446func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { 479func mergeConfigSrcs(cfg, userCfg *aws.Config,
447 // Merge in user provided configuration 480 envCfg envConfig, sharedCfg sharedConfig,
448 cfg.MergeIn(userCfg) 481 handlers request.Handlers,
482 sessOpts Options,
483) error {
449 484
450 // Region if not already set by user 485 // Region if not already set by user
451 if len(aws.StringValue(cfg.Region)) == 0 { 486 if len(aws.StringValue(cfg.Region)) == 0 {
@@ -464,164 +499,19 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
464 } 499 }
465 } 500 }
466 501
467 // Configure credentials if not already set 502 // Configure credentials if not already set by the user when creating the
503 // Session.
468 if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { 504 if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
469 505 creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts)
470 // inspect the profile to see if a credential source has been specified. 506 if err != nil {
471 if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { 507 return err
472
473 // if both credential_source and source_profile have been set, return an error
474 // as this is undefined behavior.
475 if len(sharedCfg.AssumeRole.SourceProfile) > 0 {
476 return ErrSharedConfigSourceCollision
477 }
478
479 // valid credential source values
480 const (
481 credSourceEc2Metadata = "Ec2InstanceMetadata"
482 credSourceEnvironment = "Environment"
483 credSourceECSContainer = "EcsContainer"
484 )
485
486 switch sharedCfg.AssumeRole.CredentialSource {
487 case credSourceEc2Metadata:
488 cfgCp := *cfg
489 p := defaults.RemoteCredProvider(cfgCp, handlers)
490 cfgCp.Credentials = credentials.NewCredentials(p)
491
492 if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
493 // AssumeRole Token provider is required if doing Assume Role
494 // with MFA.
495 return AssumeRoleTokenProviderNotSetError{}
496 }
497
498 cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
499 case credSourceEnvironment:
500 cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
501 envCfg.Creds,
502 )
503 case credSourceECSContainer:
504 if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
505 return ErrSharedConfigECSContainerEnvVarEmpty
506 }
507
508 cfgCp := *cfg
509 p := defaults.RemoteCredProvider(cfgCp, handlers)
510 creds := credentials.NewCredentials(p)
511
512 cfg.Credentials = creds
513 default:
514 return ErrSharedConfigInvalidCredSource
515 }
516
517 return nil
518 }
519
520 if len(envCfg.Creds.AccessKeyID) > 0 {
521 cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
522 envCfg.Creds,
523 )
524 } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
525 cfgCp := *cfg
526 cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
527 sharedCfg.AssumeRoleSource.Creds,
528 )
529
530 if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
531 // AssumeRole Token provider is required if doing Assume Role
532 // with MFA.
533 return AssumeRoleTokenProviderNotSetError{}
534 }
535
536 cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
537 } else if len(sharedCfg.Creds.AccessKeyID) > 0 {
538 cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
539 sharedCfg.Creds,
540 )
541 } else if len(sharedCfg.CredentialProcess) > 0 {
542 cfg.Credentials = processcreds.NewCredentials(
543 sharedCfg.CredentialProcess,
544 )
545 } else {
546 // Fallback to default credentials provider, include mock errors
547 // for the credential chain so user can identify why credentials
548 // failed to be retrieved.
549 cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
550 VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
551 Providers: []credentials.Provider{
552 &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
553 &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
554 defaults.RemoteCredProvider(*cfg, handlers),
555 },
556 })
557 } 508 }
509 cfg.Credentials = creds
558 } 510 }
559 511
560 return nil 512 return nil
561} 513}
562 514
563func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials {
564 return stscreds.NewCredentials(
565 &Session{
566 Config: &cfg,
567 Handlers: handlers.Copy(),
568 },
569 sharedCfg.AssumeRole.RoleARN,
570 func(opt *stscreds.AssumeRoleProvider) {
571 opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
572
573 // Assume role with external ID
574 if len(sharedCfg.AssumeRole.ExternalID) > 0 {
575 opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
576 }
577
578 // Assume role with MFA
579 if len(sharedCfg.AssumeRole.MFASerial) > 0 {
580 opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
581 opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
582 }
583 },
584 )
585}
586
587// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
588// MFAToken option is not set when shared config is configured load assume a
589// role with an MFA token.
590type AssumeRoleTokenProviderNotSetError struct{}
591
592// Code is the short id of the error.
593func (e AssumeRoleTokenProviderNotSetError) Code() string {
594 return "AssumeRoleTokenProviderNotSetError"
595}
596
597// Message is the description of the error
598func (e AssumeRoleTokenProviderNotSetError) Message() string {
599 return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
600}
601
602// OrigErr is the underlying error that caused the failure.
603func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
604 return nil
605}
606
607// Error satisfies the error interface.
608func (e AssumeRoleTokenProviderNotSetError) Error() string {
609 return awserr.SprintError(e.Code(), e.Message(), "", nil)
610}
611
612type credProviderError struct {
613 Err error
614}
615
616var emptyCreds = credentials.Value{}
617
618func (c credProviderError) Retrieve() (credentials.Value, error) {
619 return credentials.Value{}, c.Err
620}
621func (c credProviderError) IsExpired() bool {
622 return true
623}
624
625func initHandlers(s *Session) { 515func initHandlers(s *Session) {
626 // Add the Validate parameter handler if it is not disabled. 516 // Add the Validate parameter handler if it is not disabled.
627 s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) 517 s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
index 7cb4402..5170b49 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -5,7 +5,6 @@ import (
5 5
6 "github.com/aws/aws-sdk-go/aws/awserr" 6 "github.com/aws/aws-sdk-go/aws/awserr"
7 "github.com/aws/aws-sdk-go/aws/credentials" 7 "github.com/aws/aws-sdk-go/aws/credentials"
8
9 "github.com/aws/aws-sdk-go/internal/ini" 8 "github.com/aws/aws-sdk-go/internal/ini"
10) 9)
11 10
@@ -28,8 +27,12 @@ const (
28 27
29 // endpoint discovery group 28 // endpoint discovery group
30 enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional 29 enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
30
31 // External Credential Process 31 // External Credential Process
32 credentialProcessKey = `credential_process` 32 credentialProcessKey = `credential_process` // optional
33
34 // Web Identity Token File
35 webIdentityTokenFileKey = `web_identity_token_file` // optional
33 36
34 // DefaultSharedConfigProfile is the default profile to be used when 37 // DefaultSharedConfigProfile is the default profile to be used when
35 // loading configuration from the config files if another profile name 38 // loading configuration from the config files if another profile name
@@ -37,36 +40,33 @@ const (
37 DefaultSharedConfigProfile = `default` 40 DefaultSharedConfigProfile = `default`
38) 41)
39 42
40type assumeRoleConfig struct {
41 RoleARN string
42 SourceProfile string
43 CredentialSource string
44 ExternalID string
45 MFASerial string
46 RoleSessionName string
47}
48
49// sharedConfig represents the configuration fields of the SDK config files. 43// sharedConfig represents the configuration fields of the SDK config files.
50type sharedConfig struct { 44type sharedConfig struct {
51 // Credentials values from the config file. Both aws_access_key_id 45 // Credentials values from the config file. Both aws_access_key_id and
52 // and aws_secret_access_key must be provided together in the same file 46 // aws_secret_access_key must be provided together in the same file to be
53 // to be considered valid. The values will be ignored if not a complete group. 47 // considered valid. The values will be ignored if not a complete group.
54 // aws_session_token is an optional field that can be provided if both of the 48 // aws_session_token is an optional field that can be provided if both of
55 // other two fields are also provided. 49 // the other two fields are also provided.
56 // 50 //
57 // aws_access_key_id 51 // aws_access_key_id
58 // aws_secret_access_key 52 // aws_secret_access_key
59 // aws_session_token 53 // aws_session_token
60 Creds credentials.Value 54 Creds credentials.Value
61 55
62 AssumeRole assumeRoleConfig 56 CredentialSource string
63 AssumeRoleSource *sharedConfig 57 CredentialProcess string
58 WebIdentityTokenFile string
59
60 RoleARN string
61 RoleSessionName string
62 ExternalID string
63 MFASerial string
64 64
65 // An external process to request credentials 65 SourceProfileName string
66 CredentialProcess string 66 SourceProfile *sharedConfig
67 67
68 // Region is the region the SDK should use for looking up AWS service endpoints 68 // Region is the region the SDK should use for looking up AWS service
69 // and signing requests. 69 // endpoints and signing requests.
70 // 70 //
71 // region 71 // region
72 Region string 72 Region string
@@ -83,17 +83,18 @@ type sharedConfigFile struct {
83 IniData ini.Sections 83 IniData ini.Sections
84} 84}
85 85
86// loadSharedConfig retrieves the configuration from the list of files 86// loadSharedConfig retrieves the configuration from the list of files using
87// using the profile provided. The order the files are listed will determine 87// the profile provided. The order the files are listed will determine
88// precedence. Values in subsequent files will overwrite values defined in 88// precedence. Values in subsequent files will overwrite values defined in
89// earlier files. 89// earlier files.
90// 90//
91// For example, given two files A and B. Both define credentials. If the order 91// For example, given two files A and B. Both define credentials. If the order
92// of the files are A then B, B's credential values will be used instead of A's. 92// of the files are A then B, B's credential values will be used instead of
93// A's.
93// 94//
94// See sharedConfig.setFromFile for information how the config files 95// See sharedConfig.setFromFile for information how the config files
95// will be loaded. 96// will be loaded.
96func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { 97func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) {
97 if len(profile) == 0 { 98 if len(profile) == 0 {
98 profile = DefaultSharedConfigProfile 99 profile = DefaultSharedConfigProfile
99 } 100 }
@@ -104,16 +105,11 @@ func loadSharedConfig(profile string, filenames []string) (sharedConfig, error)
104 } 105 }
105 106
106 cfg := sharedConfig{} 107 cfg := sharedConfig{}
107 if err = cfg.setFromIniFiles(profile, files); err != nil { 108 profiles := map[string]struct{}{}
109 if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil {
108 return sharedConfig{}, err 110 return sharedConfig{}, err
109 } 111 }
110 112
111 if len(cfg.AssumeRole.SourceProfile) > 0 {
112 if err := cfg.setAssumeRoleSource(profile, files); err != nil {
113 return sharedConfig{}, err
114 }
115 }
116
117 return cfg, nil 113 return cfg, nil
118} 114}
119 115
@@ -137,60 +133,88 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
137 return files, nil 133 return files, nil
138} 134}
139 135
140func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { 136func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error {
141 var assumeRoleSrc sharedConfig 137 // Trim files from the list that don't exist.
142 138 var skippedFiles int
143 if len(cfg.AssumeRole.CredentialSource) > 0 { 139 var profileNotFoundErr error
144 // setAssumeRoleSource is only called when source_profile is found. 140 for _, f := range files {
145 // If both source_profile and credential_source are set, then 141 if err := cfg.setFromIniFile(profile, f, exOpts); err != nil {
146 // ErrSharedConfigSourceCollision will be returned 142 if _, ok := err.(SharedConfigProfileNotExistsError); ok {
147 return ErrSharedConfigSourceCollision 143 // Ignore profiles not defined in individual files.
144 profileNotFoundErr = err
145 skippedFiles++
146 continue
147 }
148 return err
149 }
150 }
151 if skippedFiles == len(files) {
152 // If all files were skipped because the profile is not found, return
153 // the original profile not found error.
154 return profileNotFoundErr
148 } 155 }
149 156
150 // Multiple level assume role chains are not support 157 if _, ok := profiles[profile]; ok {
151 if cfg.AssumeRole.SourceProfile == origProfile { 158 // if this is the second instance of the profile the Assume Role
152 assumeRoleSrc = *cfg 159 // options must be cleared because they are only valid for the
153 assumeRoleSrc.AssumeRole = assumeRoleConfig{} 160 // first reference of a profile. The self linked instance of the
161 // profile only have credential provider options.
162 cfg.clearAssumeRoleOptions()
154 } else { 163 } else {
155 err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) 164 // First time a profile has been seen, It must either be a assume role
156 if err != nil { 165 // or credentials. Assert if the credential type requires a role ARN,
166 // the ARN is also set.
167 if err := cfg.validateCredentialsRequireARN(profile); err != nil {
157 return err 168 return err
158 } 169 }
159 } 170 }
171 profiles[profile] = struct{}{}
160 172
161 if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { 173 if err := cfg.validateCredentialType(); err != nil {
162 return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} 174 return err
163 } 175 }
164 176
165 cfg.AssumeRoleSource = &assumeRoleSrc 177 // Link source profiles for assume roles
166 178 if len(cfg.SourceProfileName) != 0 {
167 return nil 179 // Linked profile via source_profile ignore credential provider
168} 180 // options, the source profile must provide the credentials.
181 cfg.clearCredentialOptions()
169 182
170func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { 183 srcCfg := &sharedConfig{}
171 // Trim files from the list that don't exist. 184 err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts)
172 for _, f := range files { 185 if err != nil {
173 if err := cfg.setFromIniFile(profile, f); err != nil { 186 // SourceProfile that doesn't exist is an error in configuration.
174 if _, ok := err.(SharedConfigProfileNotExistsError); ok { 187 if _, ok := err.(SharedConfigProfileNotExistsError); ok {
175 // Ignore proviles missings 188 err = SharedConfigAssumeRoleError{
176 continue 189 RoleARN: cfg.RoleARN,
190 SourceProfile: cfg.SourceProfileName,
191 }
177 } 192 }
178 return err 193 return err
179 } 194 }
195
196 if !srcCfg.hasCredentials() {
197 return SharedConfigAssumeRoleError{
198 RoleARN: cfg.RoleARN,
199 SourceProfile: cfg.SourceProfileName,
200 }
201 }
202
203 cfg.SourceProfile = srcCfg
180 } 204 }
181 205
182 return nil 206 return nil
183} 207}
184 208
185// setFromFile loads the configuration from the file using 209// setFromFile loads the configuration from the file using the profile
186// the profile provided. A sharedConfig pointer type value is used so that 210// provided. A sharedConfig pointer type value is used so that multiple config
187// multiple config file loadings can be chained. 211// file loadings can be chained.
188// 212//
189// Only loads complete logically grouped values, and will not set fields in cfg 213// Only loads complete logically grouped values, and will not set fields in cfg
190// for incomplete grouped values in the config. Such as credentials. For example 214// for incomplete grouped values in the config. Such as credentials. For
191// if a config file only includes aws_access_key_id but no aws_secret_access_key 215// example if a config file only includes aws_access_key_id but no
192// the aws_access_key_id will be ignored. 216// aws_secret_access_key the aws_access_key_id will be ignored.
193func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { 217func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error {
194 section, ok := file.IniData.GetSection(profile) 218 section, ok := file.IniData.GetSection(profile)
195 if !ok { 219 if !ok {
196 // Fallback to to alternate profile name: profile <name> 220 // Fallback to to alternate profile name: profile <name>
@@ -200,42 +224,30 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e
200 } 224 }
201 } 225 }
202 226
203 // Shared Credentials 227 if exOpts {
204 akid := section.String(accessKeyIDKey) 228 // Assume Role Parameters
205 secret := section.String(secretAccessKey) 229 updateString(&cfg.RoleARN, section, roleArnKey)
206 if len(akid) > 0 && len(secret) > 0 { 230 updateString(&cfg.ExternalID, section, externalIDKey)
207 cfg.Creds = credentials.Value{ 231 updateString(&cfg.MFASerial, section, mfaSerialKey)
208 AccessKeyID: akid, 232 updateString(&cfg.RoleSessionName, section, roleSessionNameKey)
209 SecretAccessKey: secret, 233 updateString(&cfg.SourceProfileName, section, sourceProfileKey)
210 SessionToken: section.String(sessionTokenKey), 234 updateString(&cfg.CredentialSource, section, credentialSourceKey)
211 ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
212 }
213 }
214 235
215 // Assume Role 236 updateString(&cfg.Region, section, regionKey)
216 roleArn := section.String(roleArnKey)
217 srcProfile := section.String(sourceProfileKey)
218 credentialSource := section.String(credentialSourceKey)
219 hasSource := len(srcProfile) > 0 || len(credentialSource) > 0
220 if len(roleArn) > 0 && hasSource {
221 cfg.AssumeRole = assumeRoleConfig{
222 RoleARN: roleArn,
223 SourceProfile: srcProfile,
224 CredentialSource: credentialSource,
225 ExternalID: section.String(externalIDKey),
226 MFASerial: section.String(mfaSerialKey),
227 RoleSessionName: section.String(roleSessionNameKey),
228 }
229 } 237 }
230 238
231 // `credential_process` 239 updateString(&cfg.CredentialProcess, section, credentialProcessKey)
232 if credProc := section.String(credentialProcessKey); len(credProc) > 0 { 240 updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey)
233 cfg.CredentialProcess = credProc
234 }
235 241
236 // Region 242 // Shared Credentials
237 if v := section.String(regionKey); len(v) > 0 { 243 creds := credentials.Value{
238 cfg.Region = v 244 AccessKeyID: section.String(accessKeyIDKey),
245 SecretAccessKey: section.String(secretAccessKey),
246 SessionToken: section.String(sessionTokenKey),
247 ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
248 }
249 if creds.HasKeys() {
250 cfg.Creds = creds
239 } 251 }
240 252
241 // Endpoint discovery 253 // Endpoint discovery
@@ -247,6 +259,95 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e
247 return nil 259 return nil
248} 260}
249 261
262func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error {
263 var credSource string
264
265 switch {
266 case len(cfg.SourceProfileName) != 0:
267 credSource = sourceProfileKey
268 case len(cfg.CredentialSource) != 0:
269 credSource = credentialSourceKey
270 case len(cfg.WebIdentityTokenFile) != 0:
271 credSource = webIdentityTokenFileKey
272 }
273
274 if len(credSource) != 0 && len(cfg.RoleARN) == 0 {
275 return CredentialRequiresARNError{
276 Type: credSource,
277 Profile: profile,
278 }
279 }
280
281 return nil
282}
283
284func (cfg *sharedConfig) validateCredentialType() error {
285 // Only one or no credential type can be defined.
286 if !oneOrNone(
287 len(cfg.SourceProfileName) != 0,
288 len(cfg.CredentialSource) != 0,
289 len(cfg.CredentialProcess) != 0,
290 len(cfg.WebIdentityTokenFile) != 0,
291 ) {
292 return ErrSharedConfigSourceCollision
293 }
294
295 return nil
296}
297
298func (cfg *sharedConfig) hasCredentials() bool {
299 switch {
300 case len(cfg.SourceProfileName) != 0:
301 case len(cfg.CredentialSource) != 0:
302 case len(cfg.CredentialProcess) != 0:
303 case len(cfg.WebIdentityTokenFile) != 0:
304 case cfg.Creds.HasKeys():
305 default:
306 return false
307 }
308
309 return true
310}
311
312func (cfg *sharedConfig) clearCredentialOptions() {
313 cfg.CredentialSource = ""
314 cfg.CredentialProcess = ""
315 cfg.WebIdentityTokenFile = ""
316 cfg.Creds = credentials.Value{}
317}
318
319func (cfg *sharedConfig) clearAssumeRoleOptions() {
320 cfg.RoleARN = ""
321 cfg.ExternalID = ""
322 cfg.MFASerial = ""
323 cfg.RoleSessionName = ""
324 cfg.SourceProfileName = ""
325}
326
327func oneOrNone(bs ...bool) bool {
328 var count int
329
330 for _, b := range bs {
331 if b {
332 count++
333 if count > 1 {
334 return false
335 }
336 }
337 }
338
339 return true
340}
341
342// updateString will only update the dst with the value in the section key, key
343// is present in the section.
344func updateString(dst *string, section ini.Section, key string) {
345 if !section.Has(key) {
346 return
347 }
348 *dst = section.String(key)
349}
350
250// SharedConfigLoadError is an error for the shared config file failed to load. 351// SharedConfigLoadError is an error for the shared config file failed to load.
251type SharedConfigLoadError struct { 352type SharedConfigLoadError struct {
252 Filename string 353 Filename string
@@ -304,7 +405,8 @@ func (e SharedConfigProfileNotExistsError) Error() string {
304// profile contains assume role information, but that information is invalid 405// profile contains assume role information, but that information is invalid
305// or not complete. 406// or not complete.
306type SharedConfigAssumeRoleError struct { 407type SharedConfigAssumeRoleError struct {
307 RoleARN string 408 RoleARN string
409 SourceProfile string
308} 410}
309 411
310// Code is the short id of the error. 412// Code is the short id of the error.
@@ -314,8 +416,10 @@ func (e SharedConfigAssumeRoleError) Code() string {
314 416
315// Message is the description of the error 417// Message is the description of the error
316func (e SharedConfigAssumeRoleError) Message() string { 418func (e SharedConfigAssumeRoleError) Message() string {
317 return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", 419 return fmt.Sprintf(
318 e.RoleARN) 420 "failed to load assume role for %s, source profile %s has no shared credentials",
421 e.RoleARN, e.SourceProfile,
422 )
319} 423}
320 424
321// OrigErr is the underlying error that caused the failure. 425// OrigErr is the underlying error that caused the failure.
@@ -327,3 +431,36 @@ func (e SharedConfigAssumeRoleError) OrigErr() error {
327func (e SharedConfigAssumeRoleError) Error() string { 431func (e SharedConfigAssumeRoleError) Error() string {
328 return awserr.SprintError(e.Code(), e.Message(), "", nil) 432 return awserr.SprintError(e.Code(), e.Message(), "", nil)
329} 433}
434
435// CredentialRequiresARNError provides the error for shared config credentials
436// that are incorrectly configured in the shared config or credentials file.
437type CredentialRequiresARNError struct {
438 // type of credentials that were configured.
439 Type string
440
441 // Profile name the credentials were in.
442 Profile string
443}
444
445// Code is the short id of the error.
446func (e CredentialRequiresARNError) Code() string {
447 return "CredentialRequiresARNError"
448}
449
450// Message is the description of the error
451func (e CredentialRequiresARNError) Message() string {
452 return fmt.Sprintf(
453 "credential type %s requires role_arn, profile %s",
454 e.Type, e.Profile,
455 )
456}
457
458// OrigErr is the underlying error that caused the failure.
459func (e CredentialRequiresARNError) OrigErr() error {
460 return nil
461}
462
463// Error satisfies the error interface.
464func (e CredentialRequiresARNError) Error() string {
465 return awserr.SprintError(e.Code(), e.Message(), "", nil)
466}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index 523db79..8104793 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -687,7 +687,11 @@ func (ctx *signingCtx) buildBodyDigest() error {
687 if !aws.IsReaderSeekable(ctx.Body) { 687 if !aws.IsReaderSeekable(ctx.Body) {
688 return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) 688 return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
689 } 689 }
690 hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) 690 hashBytes, err := makeSha256Reader(ctx.Body)
691 if err != nil {
692 return err
693 }
694 hash = hex.EncodeToString(hashBytes)
691 } 695 }
692 696
693 if includeSHA256Header { 697 if includeSHA256Header {
@@ -734,10 +738,16 @@ func makeSha256(data []byte) []byte {
734 return hash.Sum(nil) 738 return hash.Sum(nil)
735} 739}
736 740
737func makeSha256Reader(reader io.ReadSeeker) []byte { 741func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) {
738 hash := sha256.New() 742 hash := sha256.New()
739 start, _ := reader.Seek(0, sdkio.SeekCurrent) 743 start, err := reader.Seek(0, sdkio.SeekCurrent)
740 defer reader.Seek(start, sdkio.SeekStart) 744 if err != nil {
745 return nil, err
746 }
747 defer func() {
748 // ensure error is return if unable to seek back to start of payload.
749 _, err = reader.Seek(start, sdkio.SeekStart)
750 }()
741 751
742 // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies 752 // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
743 // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. 753 // smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
@@ -748,7 +758,7 @@ func makeSha256Reader(reader io.ReadSeeker) []byte {
748 io.CopyN(hash, reader, size) 758 io.CopyN(hash, reader, size)
749 } 759 }
750 760
751 return hash.Sum(nil) 761 return hash.Sum(nil), nil
752} 762}
753 763
754const doubleSpace = " " 764const doubleSpace = " "
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
index 8b6f234..4550915 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/types.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go
@@ -7,13 +7,18 @@ import (
7 "github.com/aws/aws-sdk-go/internal/sdkio" 7 "github.com/aws/aws-sdk-go/internal/sdkio"
8) 8)
9 9
10// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should 10// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
11// only be used with an io.Reader that is also an io.Seeker. Doing so may 11// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
12// cause request signature errors, or request body's not sent for GET, HEAD 12// streaming payload API operations.
13// and DELETE HTTP methods.
14// 13//
15// Deprecated: Should only be used with io.ReadSeeker. If using for 14// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API
16// S3 PutObject to stream content use s3manager.Uploader instead. 15// operation's input will prevent that operation being retried in the case of
16// network errors, and cause operation requests to fail if the operation
17// requires payload signing.
18//
19// Note: If using With S3 PutObject to stream an object upload The SDK's S3
20// Upload manager (s3manager.Uploader) provides support for streaming with the
21// ability to retry network errors.
17func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { 22func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
18 return ReaderSeekerCloser{r} 23 return ReaderSeekerCloser{r}
19} 24}
@@ -43,7 +48,8 @@ func IsReaderSeekable(r io.Reader) bool {
43// Read reads from the reader up to size of p. The number of bytes read, and 48// Read reads from the reader up to size of p. The number of bytes read, and
44// error if it occurred will be returned. 49// error if it occurred will be returned.
45// 50//
46// If the reader is not an io.Reader zero bytes read, and nil error will be returned. 51// If the reader is not an io.Reader zero bytes read, and nil error will be
52// returned.
47// 53//
48// Performs the same functionality as io.Reader Read 54// Performs the same functionality as io.Reader Read
49func (r ReaderSeekerCloser) Read(p []byte) (int, error) { 55func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index 15ad9cf..23aae7d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
5const SDKName = "aws-sdk-go" 5const SDKName = "aws-sdk-go"
6 6
7// SDKVersion is the version of this SDK 7// SDKVersion is the version of this SDK
8const SDKVersion = "1.19.18" 8const SDKVersion = "1.21.7"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
index f997033..e56dcee 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
@@ -304,7 +304,9 @@ loop:
304 stmt := newCommentStatement(tok) 304 stmt := newCommentStatement(tok)
305 stack.Push(stmt) 305 stack.Push(stmt)
306 default: 306 default:
307 return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok)) 307 return nil, NewParseError(
308 fmt.Sprintf("invalid state with ASTKind %v and TokenType %v",
309 k, tok.Type()))
308 } 310 }
309 311
310 if len(tokens) > 0 { 312 if len(tokens) > 0 {
@@ -314,7 +316,7 @@ loop:
314 316
315 // this occurs when a statement has not been completed 317 // this occurs when a statement has not been completed
316 if stack.top > 1 { 318 if stack.top > 1 {
317 return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container)) 319 return nil, NewParseError(fmt.Sprintf("incomplete ini expression"))
318 } 320 }
319 321
320 // returns a sublist which excludes the start symbol 322 // returns a sublist which excludes the start symbol
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
new file mode 100644
index 0000000..864fb67
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
@@ -0,0 +1,296 @@
1// Package jsonutil provides JSON serialization of AWS requests and responses.
2package jsonutil
3
4import (
5 "bytes"
6 "encoding/base64"
7 "encoding/json"
8 "fmt"
9 "math"
10 "reflect"
11 "sort"
12 "strconv"
13 "time"
14
15 "github.com/aws/aws-sdk-go/aws"
16 "github.com/aws/aws-sdk-go/private/protocol"
17)
18
19var timeType = reflect.ValueOf(time.Time{}).Type()
20var byteSliceType = reflect.ValueOf([]byte{}).Type()
21
22// BuildJSON builds a JSON string for a given object v.
23func BuildJSON(v interface{}) ([]byte, error) {
24 var buf bytes.Buffer
25
26 err := buildAny(reflect.ValueOf(v), &buf, "")
27 return buf.Bytes(), err
28}
29
30func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
31 origVal := value
32 value = reflect.Indirect(value)
33 if !value.IsValid() {
34 return nil
35 }
36
37 vtype := value.Type()
38
39 t := tag.Get("type")
40 if t == "" {
41 switch vtype.Kind() {
42 case reflect.Struct:
43 // also it can't be a time object
44 if value.Type() != timeType {
45 t = "structure"
46 }
47 case reflect.Slice:
48 // also it can't be a byte slice
49 if _, ok := value.Interface().([]byte); !ok {
50 t = "list"
51 }
52 case reflect.Map:
53 // cannot be a JSONValue map
54 if _, ok := value.Interface().(aws.JSONValue); !ok {
55 t = "map"
56 }
57 }
58 }
59
60 switch t {
61 case "structure":
62 if field, ok := vtype.FieldByName("_"); ok {
63 tag = field.Tag
64 }
65 return buildStruct(value, buf, tag)
66 case "list":
67 return buildList(value, buf, tag)
68 case "map":
69 return buildMap(value, buf, tag)
70 default:
71 return buildScalar(origVal, buf, tag)
72 }
73}
74
75func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
76 if !value.IsValid() {
77 return nil
78 }
79
80 // unwrap payloads
81 if payload := tag.Get("payload"); payload != "" {
82 field, _ := value.Type().FieldByName(payload)
83 tag = field.Tag
84 value = elemOf(value.FieldByName(payload))
85
86 if !value.IsValid() {
87 return nil
88 }
89 }
90
91 buf.WriteByte('{')
92
93 t := value.Type()
94 first := true
95 for i := 0; i < t.NumField(); i++ {
96 member := value.Field(i)
97
98 // This allocates the most memory.
99 // Additionally, we cannot skip nil fields due to
100 // idempotency auto filling.
101 field := t.Field(i)
102
103 if field.PkgPath != "" {
104 continue // ignore unexported fields
105 }
106 if field.Tag.Get("json") == "-" {
107 continue
108 }
109 if field.Tag.Get("location") != "" {
110 continue // ignore non-body elements
111 }
112 if field.Tag.Get("ignore") != "" {
113 continue
114 }
115
116 if protocol.CanSetIdempotencyToken(member, field) {
117 token := protocol.GetIdempotencyToken()
118 member = reflect.ValueOf(&token)
119 }
120
121 if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
122 continue // ignore unset fields
123 }
124
125 if first {
126 first = false
127 } else {
128 buf.WriteByte(',')
129 }
130
131 // figure out what this field is called
132 name := field.Name
133 if locName := field.Tag.Get("locationName"); locName != "" {
134 name = locName
135 }
136
137 writeString(name, buf)
138 buf.WriteString(`:`)
139
140 err := buildAny(member, buf, field.Tag)
141 if err != nil {
142 return err
143 }
144
145 }
146
147 buf.WriteString("}")
148
149 return nil
150}
151
152func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
153 buf.WriteString("[")
154
155 for i := 0; i < value.Len(); i++ {
156 buildAny(value.Index(i), buf, "")
157
158 if i < value.Len()-1 {
159 buf.WriteString(",")
160 }
161 }
162
163 buf.WriteString("]")
164
165 return nil
166}
167
168type sortedValues []reflect.Value
169
170func (sv sortedValues) Len() int { return len(sv) }
171func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
172func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() }
173
174func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
175 buf.WriteString("{")
176
177 sv := sortedValues(value.MapKeys())
178 sort.Sort(sv)
179
180 for i, k := range sv {
181 if i > 0 {
182 buf.WriteByte(',')
183 }
184
185 writeString(k.String(), buf)
186 buf.WriteString(`:`)
187
188 buildAny(value.MapIndex(k), buf, "")
189 }
190
191 buf.WriteString("}")
192
193 return nil
194}
195
196func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
197 // prevents allocation on the heap.
198 scratch := [64]byte{}
199 switch value := reflect.Indirect(v); value.Kind() {
200 case reflect.String:
201 writeString(value.String(), buf)
202 case reflect.Bool:
203 if value.Bool() {
204 buf.WriteString("true")
205 } else {
206 buf.WriteString("false")
207 }
208 case reflect.Int64:
209 buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10))
210 case reflect.Float64:
211 f := value.Float()
212 if math.IsInf(f, 0) || math.IsNaN(f) {
213 return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)}
214 }
215 buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
216 default:
217 switch converted := value.Interface().(type) {
218 case time.Time:
219 format := tag.Get("timestampFormat")
220 if len(format) == 0 {
221 format = protocol.UnixTimeFormatName
222 }
223
224 ts := protocol.FormatTime(format, converted)
225 if format != protocol.UnixTimeFormatName {
226 ts = `"` + ts + `"`
227 }
228
229 buf.WriteString(ts)
230 case []byte:
231 if !value.IsNil() {
232 buf.WriteByte('"')
233 if len(converted) < 1024 {
234 // for small buffers, using Encode directly is much faster.
235 dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted)))
236 base64.StdEncoding.Encode(dst, converted)
237 buf.Write(dst)
238 } else {
239 // for large buffers, avoid unnecessary extra temporary
240 // buffer space.
241 enc := base64.NewEncoder(base64.StdEncoding, buf)
242 enc.Write(converted)
243 enc.Close()
244 }
245 buf.WriteByte('"')
246 }
247 case aws.JSONValue:
248 str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape)
249 if err != nil {
250 return fmt.Errorf("unable to encode JSONValue, %v", err)
251 }
252 buf.WriteString(str)
253 default:
254 return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
255 }
256 }
257 return nil
258}
259
260var hex = "0123456789abcdef"
261
262func writeString(s string, buf *bytes.Buffer) {
263 buf.WriteByte('"')
264 for i := 0; i < len(s); i++ {
265 if s[i] == '"' {
266 buf.WriteString(`\"`)
267 } else if s[i] == '\\' {
268 buf.WriteString(`\\`)
269 } else if s[i] == '\b' {
270 buf.WriteString(`\b`)
271 } else if s[i] == '\f' {
272 buf.WriteString(`\f`)
273 } else if s[i] == '\r' {
274 buf.WriteString(`\r`)
275 } else if s[i] == '\t' {
276 buf.WriteString(`\t`)
277 } else if s[i] == '\n' {
278 buf.WriteString(`\n`)
279 } else if s[i] < 32 {
280 buf.WriteString("\\u00")
281 buf.WriteByte(hex[s[i]>>4])
282 buf.WriteByte(hex[s[i]&0xF])
283 } else {
284 buf.WriteByte(s[i])
285 }
286 }
287 buf.WriteByte('"')
288}
289
290// Returns the reflection element of a value, if it is a pointer.
291func elemOf(value reflect.Value) reflect.Value {
292 for value.Kind() == reflect.Ptr {
293 value = value.Elem()
294 }
295 return value
296}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
new file mode 100644
index 0000000..ea0da79
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
@@ -0,0 +1,250 @@
1package jsonutil
2
3import (
4 "bytes"
5 "encoding/base64"
6 "encoding/json"
7 "fmt"
8 "io"
9 "reflect"
10 "time"
11
12 "github.com/aws/aws-sdk-go/aws"
13 "github.com/aws/aws-sdk-go/aws/awserr"
14 "github.com/aws/aws-sdk-go/private/protocol"
15)
16
17// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in
18// type. The value to unmarshal the json document into must be a pointer to the
19// type.
20func UnmarshalJSONError(v interface{}, stream io.Reader) error {
21 var errBuf bytes.Buffer
22 body := io.TeeReader(stream, &errBuf)
23
24 err := json.NewDecoder(body).Decode(v)
25 if err != nil {
26 msg := "failed decoding error message"
27 if err == io.EOF {
28 msg = "error message missing"
29 err = nil
30 }
31 return awserr.NewUnmarshalError(err, msg, errBuf.Bytes())
32 }
33
34 return nil
35}
36
37// UnmarshalJSON reads a stream and unmarshals the results in object v.
38func UnmarshalJSON(v interface{}, stream io.Reader) error {
39 var out interface{}
40
41 err := json.NewDecoder(stream).Decode(&out)
42 if err == io.EOF {
43 return nil
44 } else if err != nil {
45 return err
46 }
47
48 return unmarshalAny(reflect.ValueOf(v), out, "")
49}
50
51func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
52 vtype := value.Type()
53 if vtype.Kind() == reflect.Ptr {
54 vtype = vtype.Elem() // check kind of actual element type
55 }
56
57 t := tag.Get("type")
58 if t == "" {
59 switch vtype.Kind() {
60 case reflect.Struct:
61 // also it can't be a time object
62 if _, ok := value.Interface().(*time.Time); !ok {
63 t = "structure"
64 }
65 case reflect.Slice:
66 // also it can't be a byte slice
67 if _, ok := value.Interface().([]byte); !ok {
68 t = "list"
69 }
70 case reflect.Map:
71 // cannot be a JSONValue map
72 if _, ok := value.Interface().(aws.JSONValue); !ok {
73 t = "map"
74 }
75 }
76 }
77
78 switch t {
79 case "structure":
80 if field, ok := vtype.FieldByName("_"); ok {
81 tag = field.Tag
82 }
83 return unmarshalStruct(value, data, tag)
84 case "list":
85 return unmarshalList(value, data, tag)
86 case "map":
87 return unmarshalMap(value, data, tag)
88 default:
89 return unmarshalScalar(value, data, tag)
90 }
91}
92
93func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
94 if data == nil {
95 return nil
96 }
97 mapData, ok := data.(map[string]interface{})
98 if !ok {
99 return fmt.Errorf("JSON value is not a structure (%#v)", data)
100 }
101
102 t := value.Type()
103 if value.Kind() == reflect.Ptr {
104 if value.IsNil() { // create the structure if it's nil
105 s := reflect.New(value.Type().Elem())
106 value.Set(s)
107 value = s
108 }
109
110 value = value.Elem()
111 t = t.Elem()
112 }
113
114 // unwrap any payloads
115 if payload := tag.Get("payload"); payload != "" {
116 field, _ := t.FieldByName(payload)
117 return unmarshalAny(value.FieldByName(payload), data, field.Tag)
118 }
119
120 for i := 0; i < t.NumField(); i++ {
121 field := t.Field(i)
122 if field.PkgPath != "" {
123 continue // ignore unexported fields
124 }
125
126 // figure out what this field is called
127 name := field.Name
128 if locName := field.Tag.Get("locationName"); locName != "" {
129 name = locName
130 }
131
132 member := value.FieldByIndex(field.Index)
133 err := unmarshalAny(member, mapData[name], field.Tag)
134 if err != nil {
135 return err
136 }
137 }
138 return nil
139}
140
141func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
142 if data == nil {
143 return nil
144 }
145 listData, ok := data.([]interface{})
146 if !ok {
147 return fmt.Errorf("JSON value is not a list (%#v)", data)
148 }
149
150 if value.IsNil() {
151 l := len(listData)
152 value.Set(reflect.MakeSlice(value.Type(), l, l))
153 }
154
155 for i, c := range listData {
156 err := unmarshalAny(value.Index(i), c, "")
157 if err != nil {
158 return err
159 }
160 }
161
162 return nil
163}
164
165func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
166 if data == nil {
167 return nil
168 }
169 mapData, ok := data.(map[string]interface{})
170 if !ok {
171 return fmt.Errorf("JSON value is not a map (%#v)", data)
172 }
173
174 if value.IsNil() {
175 value.Set(reflect.MakeMap(value.Type()))
176 }
177
178 for k, v := range mapData {
179 kvalue := reflect.ValueOf(k)
180 vvalue := reflect.New(value.Type().Elem()).Elem()
181
182 unmarshalAny(vvalue, v, "")
183 value.SetMapIndex(kvalue, vvalue)
184 }
185
186 return nil
187}
188
189func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
190
191 switch d := data.(type) {
192 case nil:
193 return nil // nothing to do here
194 case string:
195 switch value.Interface().(type) {
196 case *string:
197 value.Set(reflect.ValueOf(&d))
198 case []byte:
199 b, err := base64.StdEncoding.DecodeString(d)
200 if err != nil {
201 return err
202 }
203 value.Set(reflect.ValueOf(b))
204 case *time.Time:
205 format := tag.Get("timestampFormat")
206 if len(format) == 0 {
207 format = protocol.ISO8601TimeFormatName
208 }
209
210 t, err := protocol.ParseTime(format, d)
211 if err != nil {
212 return err
213 }
214 value.Set(reflect.ValueOf(&t))
215 case aws.JSONValue:
216 // No need to use escaping as the value is a non-quoted string.
217 v, err := protocol.DecodeJSONValue(d, protocol.NoEscape)
218 if err != nil {
219 return err
220 }
221 value.Set(reflect.ValueOf(v))
222 default:
223 return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
224 }
225 case float64:
226 switch value.Interface().(type) {
227 case *int64:
228 di := int64(d)
229 value.Set(reflect.ValueOf(&di))
230 case *float64:
231 value.Set(reflect.ValueOf(&d))
232 case *time.Time:
233 // Time unmarshaled from a float64 can only be epoch seconds
234 t := time.Unix(int64(d), 0).UTC()
235 value.Set(reflect.ValueOf(&t))
236 default:
237 return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
238 }
239 case bool:
240 switch value.Interface().(type) {
241 case *bool:
242 value.Set(reflect.ValueOf(&d))
243 default:
244 return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
245 }
246 default:
247 return fmt.Errorf("unsupported JSON value (%v)", data)
248 }
249 return nil
250}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
index 60e5b09..0cb99eb 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -21,7 +21,7 @@ func Build(r *request.Request) {
21 "Version": {r.ClientInfo.APIVersion}, 21 "Version": {r.ClientInfo.APIVersion},
22 } 22 }
23 if err := queryutil.Parse(body, r.Params, false); err != nil { 23 if err := queryutil.Parse(body, r.Params, false); err != nil {
24 r.Error = awserr.New("SerializationError", "failed encoding Query request", err) 24 r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err)
25 return 25 return
26 } 26 }
27 27
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
index 3495c73..f69c1ef 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
@@ -24,7 +24,7 @@ func Unmarshal(r *request.Request) {
24 err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") 24 err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
25 if err != nil { 25 if err != nil {
26 r.Error = awserr.NewRequestFailure( 26 r.Error = awserr.NewRequestFailure(
27 awserr.New("SerializationError", "failed decoding Query response", err), 27 awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err),
28 r.HTTPResponse.StatusCode, 28 r.HTTPResponse.StatusCode,
29 r.RequestID, 29 r.RequestID,
30 ) 30 )
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
index 46d354e..831b011 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
@@ -2,73 +2,68 @@ package query
2 2
3import ( 3import (
4 "encoding/xml" 4 "encoding/xml"
5 "io/ioutil" 5 "fmt"
6 6
7 "github.com/aws/aws-sdk-go/aws/awserr" 7 "github.com/aws/aws-sdk-go/aws/awserr"
8 "github.com/aws/aws-sdk-go/aws/request" 8 "github.com/aws/aws-sdk-go/aws/request"
9 "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
9) 10)
10 11
12// UnmarshalErrorHandler is a name request handler to unmarshal request errors
13var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
14
11type xmlErrorResponse struct { 15type xmlErrorResponse struct {
12 XMLName xml.Name `xml:"ErrorResponse"` 16 Code string `xml:"Error>Code"`
13 Code string `xml:"Error>Code"` 17 Message string `xml:"Error>Message"`
14 Message string `xml:"Error>Message"` 18 RequestID string `xml:"RequestId"`
15 RequestID string `xml:"RequestId"`
16} 19}
17 20
18type xmlServiceUnavailableResponse struct { 21type xmlResponseError struct {
19 XMLName xml.Name `xml:"ServiceUnavailableException"` 22 xmlErrorResponse
20} 23}
21 24
22// UnmarshalErrorHandler is a name request handler to unmarshal request errors 25func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
23var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} 26 const svcUnavailableTagName = "ServiceUnavailableException"
27 const errorResponseTagName = "ErrorResponse"
28
29 switch start.Name.Local {
30 case svcUnavailableTagName:
31 e.Code = svcUnavailableTagName
32 e.Message = "service is unavailable"
33 return d.Skip()
34
35 case errorResponseTagName:
36 return d.DecodeElement(&e.xmlErrorResponse, &start)
37
38 default:
39 return fmt.Errorf("unknown error response tag, %v", start)
40 }
41}
24 42
25// UnmarshalError unmarshals an error response for an AWS Query service. 43// UnmarshalError unmarshals an error response for an AWS Query service.
26func UnmarshalError(r *request.Request) { 44func UnmarshalError(r *request.Request) {
27 defer r.HTTPResponse.Body.Close() 45 defer r.HTTPResponse.Body.Close()
28 46
29 bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) 47 var respErr xmlResponseError
48 err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body)
30 if err != nil { 49 if err != nil {
31 r.Error = awserr.NewRequestFailure( 50 r.Error = awserr.NewRequestFailure(
32 awserr.New("SerializationError", "failed to read from query HTTP response body", err), 51 awserr.New(request.ErrCodeSerialization,
52 "failed to unmarshal error message", err),
33 r.HTTPResponse.StatusCode, 53 r.HTTPResponse.StatusCode,
34 r.RequestID, 54 r.RequestID,
35 ) 55 )
36 return 56 return
37 } 57 }
38 58
39 // First check for specific error 59 reqID := respErr.RequestID
40 resp := xmlErrorResponse{} 60 if len(reqID) == 0 {
41 decodeErr := xml.Unmarshal(bodyBytes, &resp) 61 reqID = r.RequestID
42 if decodeErr == nil {
43 reqID := resp.RequestID
44 if reqID == "" {
45 reqID = r.RequestID
46 }
47 r.Error = awserr.NewRequestFailure(
48 awserr.New(resp.Code, resp.Message, nil),
49 r.HTTPResponse.StatusCode,
50 reqID,
51 )
52 return
53 }
54
55 // Check for unhandled error
56 servUnavailResp := xmlServiceUnavailableResponse{}
57 unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
58 if unavailErr == nil {
59 r.Error = awserr.NewRequestFailure(
60 awserr.New("ServiceUnavailableException", "service is unavailable", nil),
61 r.HTTPResponse.StatusCode,
62 r.RequestID,
63 )
64 return
65 } 62 }
66 63
67 // Failed to retrieve any error message from the response body
68 r.Error = awserr.NewRequestFailure( 64 r.Error = awserr.NewRequestFailure(
69 awserr.New("SerializationError", 65 awserr.New(respErr.Code, respErr.Message, nil),
70 "failed to decode query XML error response", decodeErr),
71 r.HTTPResponse.StatusCode, 66 r.HTTPResponse.StatusCode,
72 r.RequestID, 67 reqID,
73 ) 68 )
74} 69}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
index b80f84f..1301b14 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -25,6 +25,8 @@ var noEscape [256]bool
25 25
26var errValueNotSet = fmt.Errorf("value not set") 26var errValueNotSet = fmt.Errorf("value not set")
27 27
28var byteSliceType = reflect.TypeOf([]byte{})
29
28func init() { 30func init() {
29 for i := 0; i < len(noEscape); i++ { 31 for i := 0; i < len(noEscape); i++ {
30 // AWS expects every character except these to be escaped 32 // AWS expects every character except these to be escaped
@@ -94,6 +96,14 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo
94 continue 96 continue
95 } 97 }
96 98
99 // Support the ability to customize values to be marshaled as a
100 // blob even though they were modeled as a string. Required for S3
101 // API operations like SSECustomerKey is modeled as stirng but
102 // required to be base64 encoded in request.
103 if field.Tag.Get("marshal-as") == "blob" {
104 m = m.Convert(byteSliceType)
105 }
106
97 var err error 107 var err error
98 switch field.Tag.Get("location") { 108 switch field.Tag.Get("location") {
99 case "headers": // header maps 109 case "headers": // header maps
@@ -137,7 +147,7 @@ func buildBody(r *request.Request, v reflect.Value) {
137 case string: 147 case string:
138 r.SetStringBody(reader) 148 r.SetStringBody(reader)
139 default: 149 default:
140 r.Error = awserr.New("SerializationError", 150 r.Error = awserr.New(request.ErrCodeSerialization,
141 "failed to encode REST request", 151 "failed to encode REST request",
142 fmt.Errorf("unknown payload type %s", payload.Type())) 152 fmt.Errorf("unknown payload type %s", payload.Type()))
143 } 153 }
@@ -152,7 +162,7 @@ func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.
152 if err == errValueNotSet { 162 if err == errValueNotSet {
153 return nil 163 return nil
154 } else if err != nil { 164 } else if err != nil {
155 return awserr.New("SerializationError", "failed to encode REST request", err) 165 return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
156 } 166 }
157 167
158 name = strings.TrimSpace(name) 168 name = strings.TrimSpace(name)
@@ -170,7 +180,7 @@ func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag)
170 if err == errValueNotSet { 180 if err == errValueNotSet {
171 continue 181 continue
172 } else if err != nil { 182 } else if err != nil {
173 return awserr.New("SerializationError", "failed to encode REST request", err) 183 return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
174 184
175 } 185 }
176 keyStr := strings.TrimSpace(key.String()) 186 keyStr := strings.TrimSpace(key.String())
@@ -186,7 +196,7 @@ func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) e
186 if err == errValueNotSet { 196 if err == errValueNotSet {
187 return nil 197 return nil
188 } else if err != nil { 198 } else if err != nil {
189 return awserr.New("SerializationError", "failed to encode REST request", err) 199 return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
190 } 200 }
191 201
192 u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) 202 u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
@@ -219,7 +229,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string, tag reflec
219 if err == errValueNotSet { 229 if err == errValueNotSet {
220 return nil 230 return nil
221 } else if err != nil { 231 } else if err != nil {
222 return awserr.New("SerializationError", "failed to encode REST request", err) 232 return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
223 } 233 }
224 query.Set(name, str) 234 query.Set(name, str)
225 } 235 }
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
index 33fd53b..de02136 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -57,7 +57,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
57 defer r.HTTPResponse.Body.Close() 57 defer r.HTTPResponse.Body.Close()
58 b, err := ioutil.ReadAll(r.HTTPResponse.Body) 58 b, err := ioutil.ReadAll(r.HTTPResponse.Body)
59 if err != nil { 59 if err != nil {
60 r.Error = awserr.New("SerializationError", "failed to decode REST response", err) 60 r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
61 } else { 61 } else {
62 payload.Set(reflect.ValueOf(b)) 62 payload.Set(reflect.ValueOf(b))
63 } 63 }
@@ -65,7 +65,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
65 defer r.HTTPResponse.Body.Close() 65 defer r.HTTPResponse.Body.Close()
66 b, err := ioutil.ReadAll(r.HTTPResponse.Body) 66 b, err := ioutil.ReadAll(r.HTTPResponse.Body)
67 if err != nil { 67 if err != nil {
68 r.Error = awserr.New("SerializationError", "failed to decode REST response", err) 68 r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
69 } else { 69 } else {
70 str := string(b) 70 str := string(b)
71 payload.Set(reflect.ValueOf(&str)) 71 payload.Set(reflect.ValueOf(&str))
@@ -77,7 +77,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
77 case "io.ReadSeeker": 77 case "io.ReadSeeker":
78 b, err := ioutil.ReadAll(r.HTTPResponse.Body) 78 b, err := ioutil.ReadAll(r.HTTPResponse.Body)
79 if err != nil { 79 if err != nil {
80 r.Error = awserr.New("SerializationError", 80 r.Error = awserr.New(request.ErrCodeSerialization,
81 "failed to read response body", err) 81 "failed to read response body", err)
82 return 82 return
83 } 83 }
@@ -85,7 +85,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
85 default: 85 default:
86 io.Copy(ioutil.Discard, r.HTTPResponse.Body) 86 io.Copy(ioutil.Discard, r.HTTPResponse.Body)
87 defer r.HTTPResponse.Body.Close() 87 defer r.HTTPResponse.Body.Close()
88 r.Error = awserr.New("SerializationError", 88 r.Error = awserr.New(request.ErrCodeSerialization,
89 "failed to decode REST response", 89 "failed to decode REST response",
90 fmt.Errorf("unknown payload type %s", payload.Type())) 90 fmt.Errorf("unknown payload type %s", payload.Type()))
91 } 91 }
@@ -115,14 +115,14 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) {
115 case "header": 115 case "header":
116 err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) 116 err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
117 if err != nil { 117 if err != nil {
118 r.Error = awserr.New("SerializationError", "failed to decode REST response", err) 118 r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
119 break 119 break
120 } 120 }
121 case "headers": 121 case "headers":
122 prefix := field.Tag.Get("locationName") 122 prefix := field.Tag.Get("locationName")
123 err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) 123 err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
124 if err != nil { 124 if err != nil {
125 r.Error = awserr.New("SerializationError", "failed to decode REST response", err) 125 r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
126 break 126 break
127 } 127 }
128 } 128 }
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
index b0f4e24..cf56964 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
@@ -37,7 +37,8 @@ func Build(r *request.Request) {
37 err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) 37 err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
38 if err != nil { 38 if err != nil {
39 r.Error = awserr.NewRequestFailure( 39 r.Error = awserr.NewRequestFailure(
40 awserr.New("SerializationError", "failed to encode rest XML request", err), 40 awserr.New(request.ErrCodeSerialization,
41 "failed to encode rest XML request", err),
41 r.HTTPResponse.StatusCode, 42 r.HTTPResponse.StatusCode,
42 r.RequestID, 43 r.RequestID,
43 ) 44 )
@@ -55,7 +56,8 @@ func Unmarshal(r *request.Request) {
55 err := xmlutil.UnmarshalXML(r.Data, decoder, "") 56 err := xmlutil.UnmarshalXML(r.Data, decoder, "")
56 if err != nil { 57 if err != nil {
57 r.Error = awserr.NewRequestFailure( 58 r.Error = awserr.NewRequestFailure(
58 awserr.New("SerializationError", "failed to decode REST XML response", err), 59 awserr.New(request.ErrCodeSerialization,
60 "failed to decode REST XML response", err),
59 r.HTTPResponse.StatusCode, 61 r.HTTPResponse.StatusCode,
60 r.RequestID, 62 r.RequestID,
61 ) 63 )
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
index ff1ef68..7108d38 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -1,6 +1,7 @@
1package xmlutil 1package xmlutil
2 2
3import ( 3import (
4 "bytes"
4 "encoding/base64" 5 "encoding/base64"
5 "encoding/xml" 6 "encoding/xml"
6 "fmt" 7 "fmt"
@@ -10,9 +11,27 @@ import (
10 "strings" 11 "strings"
11 "time" 12 "time"
12 13
14 "github.com/aws/aws-sdk-go/aws/awserr"
13 "github.com/aws/aws-sdk-go/private/protocol" 15 "github.com/aws/aws-sdk-go/private/protocol"
14) 16)
15 17
18// UnmarshalXMLError unmarshals the XML error from the stream into the value
19// type specified. The value must be a pointer. If the message fails to
20// unmarshal, the message content will be included in the returned error as a
21// awserr.UnmarshalError.
22func UnmarshalXMLError(v interface{}, stream io.Reader) error {
23 var errBuf bytes.Buffer
24 body := io.TeeReader(stream, &errBuf)
25
26 err := xml.NewDecoder(body).Decode(v)
27 if err != nil && err != io.EOF {
28 return awserr.NewUnmarshalError(err,
29 "failed to unmarshal error message", errBuf.Bytes())
30 }
31
32 return nil
33}
34
16// UnmarshalXML deserializes an xml.Decoder into the container v. V 35// UnmarshalXML deserializes an xml.Decoder into the container v. V
17// needs to match the shape of the XML expected to be decoded. 36// needs to match the shape of the XML expected to be decoded.
18// If the shape doesn't match unmarshaling will fail. 37// If the shape doesn't match unmarshaling will fail.
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
index 83a42d2..139c27d 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -545,6 +545,10 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt
545// Deletes an analytics configuration for the bucket (specified by the analytics 545// Deletes an analytics configuration for the bucket (specified by the analytics
546// configuration ID). 546// configuration ID).
547// 547//
548// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
549// action. The bucket owner has this permission by default. The bucket owner
550// can grant this permission to others.
551//
548// Returns awserr.Error for service API and SDK errors. Use runtime type assertions 552// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
549// with awserr.Error's Code and Message methods to get detailed information about 553// with awserr.Error's Code and Message methods to get detailed information about
550// the error. 554// the error.
@@ -1071,7 +1075,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput)
1071// DeleteBucketReplication API operation for Amazon Simple Storage Service. 1075// DeleteBucketReplication API operation for Amazon Simple Storage Service.
1072// 1076//
1073// Deletes the replication configuration from the bucket. For information about 1077// Deletes the replication configuration from the bucket. For information about
1074// replication configuration, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) 1078// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
1075// in the Amazon S3 Developer Guide. 1079// in the Amazon S3 Developer Guide.
1076// 1080//
1077// Returns awserr.Error for service API and SDK errors. Use runtime type assertions 1081// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -3335,8 +3339,8 @@ func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfiguration
3335 3339
3336// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. 3340// GetObjectLockConfiguration API operation for Amazon Simple Storage Service.
3337// 3341//
3338// Gets the Object Lock configuration for a bucket. The rule specified in the 3342// Gets the object lock configuration for a bucket. The rule specified in the
3339// Object Lock configuration will be applied by default to every new object 3343// object lock configuration will be applied by default to every new object
3340// placed in the specified bucket. 3344// placed in the specified bucket.
3341// 3345//
3342// Returns awserr.Error for service API and SDK errors. Use runtime type assertions 3346// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -4210,7 +4214,7 @@ func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipa
4210// // Example iterating over at most 3 pages of a ListMultipartUploads operation. 4214// // Example iterating over at most 3 pages of a ListMultipartUploads operation.
4211// pageNum := 0 4215// pageNum := 0
4212// err := client.ListMultipartUploadsPages(params, 4216// err := client.ListMultipartUploadsPages(params,
4213// func(page *ListMultipartUploadsOutput, lastPage bool) bool { 4217// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool {
4214// pageNum++ 4218// pageNum++
4215// fmt.Println(page) 4219// fmt.Println(page)
4216// return pageNum <= 3 4220// return pageNum <= 3
@@ -4340,7 +4344,7 @@ func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVer
4340// // Example iterating over at most 3 pages of a ListObjectVersions operation. 4344// // Example iterating over at most 3 pages of a ListObjectVersions operation.
4341// pageNum := 0 4345// pageNum := 0
4342// err := client.ListObjectVersionsPages(params, 4346// err := client.ListObjectVersionsPages(params,
4343// func(page *ListObjectVersionsOutput, lastPage bool) bool { 4347// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool {
4344// pageNum++ 4348// pageNum++
4345// fmt.Println(page) 4349// fmt.Println(page)
4346// return pageNum <= 3 4350// return pageNum <= 3
@@ -4477,7 +4481,7 @@ func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, op
4477// // Example iterating over at most 3 pages of a ListObjects operation. 4481// // Example iterating over at most 3 pages of a ListObjects operation.
4478// pageNum := 0 4482// pageNum := 0
4479// err := client.ListObjectsPages(params, 4483// err := client.ListObjectsPages(params,
4480// func(page *ListObjectsOutput, lastPage bool) bool { 4484// func(page *s3.ListObjectsOutput, lastPage bool) bool {
4481// pageNum++ 4485// pageNum++
4482// fmt.Println(page) 4486// fmt.Println(page)
4483// return pageNum <= 3 4487// return pageNum <= 3
@@ -4615,7 +4619,7 @@ func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input
4615// // Example iterating over at most 3 pages of a ListObjectsV2 operation. 4619// // Example iterating over at most 3 pages of a ListObjectsV2 operation.
4616// pageNum := 0 4620// pageNum := 0
4617// err := client.ListObjectsV2Pages(params, 4621// err := client.ListObjectsV2Pages(params,
4618// func(page *ListObjectsV2Output, lastPage bool) bool { 4622// func(page *s3.ListObjectsV2Output, lastPage bool) bool {
4619// pageNum++ 4623// pageNum++
4620// fmt.Println(page) 4624// fmt.Println(page)
4621// return pageNum <= 3 4625// return pageNum <= 3
@@ -4745,7 +4749,7 @@ func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts .
4745// // Example iterating over at most 3 pages of a ListParts operation. 4749// // Example iterating over at most 3 pages of a ListParts operation.
4746// pageNum := 0 4750// pageNum := 0
4747// err := client.ListPartsPages(params, 4751// err := client.ListPartsPages(params,
4748// func(page *ListPartsOutput, lastPage bool) bool { 4752// func(page *s3.ListPartsOutput, lastPage bool) bool {
4749// pageNum++ 4753// pageNum++
4750// fmt.Println(page) 4754// fmt.Println(page)
4751// return pageNum <= 3 4755// return pageNum <= 3
@@ -5754,8 +5758,7 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R
5754 5758
5755// PutBucketPolicy API operation for Amazon Simple Storage Service. 5759// PutBucketPolicy API operation for Amazon Simple Storage Service.
5756// 5760//
5757// Replaces a policy on a bucket. If the bucket already has a policy, the one 5761// Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
5758// in this request completely replaces it.
5759// 5762//
5760// Returns awserr.Error for service API and SDK errors. Use runtime type assertions 5763// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5761// with awserr.Error's Code and Message methods to get detailed information about 5764// with awserr.Error's Code and Message methods to get detailed information about
@@ -5831,7 +5834,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
5831// PutBucketReplication API operation for Amazon Simple Storage Service. 5834// PutBucketReplication API operation for Amazon Simple Storage Service.
5832// 5835//
5833// Creates a replication configuration or replaces an existing one. For more 5836// Creates a replication configuration or replaces an existing one. For more
5834// information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) 5837// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
5835// in the Amazon S3 Developer Guide. 5838// in the Amazon S3 Developer Guide.
5836// 5839//
5837// Returns awserr.Error for service API and SDK errors. Use runtime type assertions 5840// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -6439,8 +6442,8 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration
6439 6442
6440// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. 6443// PutObjectLockConfiguration API operation for Amazon Simple Storage Service.
6441// 6444//
6442// Places an Object Lock configuration on the specified bucket. The rule specified 6445// Places an object lock configuration on the specified bucket. The rule specified
6443// in the Object Lock configuration will be applied by default to every new 6446// in the object lock configuration will be applied by default to every new
6444// object placed in the specified bucket. 6447// object placed in the specified bucket.
6445// 6448//
6446// Returns awserr.Error for service API and SDK errors. Use runtime type assertions 6449// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -7010,13 +7013,16 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp
7010 return out, req.Send() 7013 return out, req.Send()
7011} 7014}
7012 7015
7013// Specifies the days since the initiation of an Incomplete Multipart Upload 7016// Specifies the days since the initiation of an incomplete multipart upload
7014// that Lifecycle will wait before permanently removing all parts of the upload. 7017// that Amazon S3 will wait before permanently removing all parts of the upload.
7018// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
7019// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
7020// in the Amazon Simple Storage Service Developer Guide.
7015type AbortIncompleteMultipartUpload struct { 7021type AbortIncompleteMultipartUpload struct {
7016 _ struct{} `type:"structure"` 7022 _ struct{} `type:"structure"`
7017 7023
7018 // Indicates the number of days that must pass since initiation for Lifecycle 7024 // Specifies the number of days after which Amazon S3 aborts an incomplete multipart
7019 // to abort an Incomplete Multipart Upload. 7025 // upload.
7020 DaysAfterInitiation *int64 `type:"integer"` 7026 DaysAfterInitiation *int64 `type:"integer"`
7021} 7027}
7022 7028
@@ -7039,9 +7045,13 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI
7039type AbortMultipartUploadInput struct { 7045type AbortMultipartUploadInput struct {
7040 _ struct{} `type:"structure"` 7046 _ struct{} `type:"structure"`
7041 7047
7048 // Name of the bucket to which the multipart upload was initiated.
7049 //
7042 // Bucket is a required field 7050 // Bucket is a required field
7043 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 7051 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
7044 7052
7053 // Key of the object for which the multipart upload was initiated.
7054 //
7045 // Key is a required field 7055 // Key is a required field
7046 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` 7056 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
7047 7057
@@ -7051,6 +7061,8 @@ type AbortMultipartUploadInput struct {
7051 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html 7061 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
7052 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` 7062 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
7053 7063
7064 // Upload ID that identifies the multipart upload.
7065 //
7054 // UploadId is a required field 7066 // UploadId is a required field
7055 UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` 7067 UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
7056} 7068}
@@ -7145,10 +7157,13 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart
7145 return s 7157 return s
7146} 7158}
7147 7159
7160// Configures the transfer acceleration state for an Amazon S3 bucket. For more
7161// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
7162// in the Amazon Simple Storage Service Developer Guide.
7148type AccelerateConfiguration struct { 7163type AccelerateConfiguration struct {
7149 _ struct{} `type:"structure"` 7164 _ struct{} `type:"structure"`
7150 7165
7151 // The accelerate configuration of the bucket. 7166 // Specifies the transfer acceleration status of the bucket.
7152 Status *string `type:"string" enum:"BucketAccelerateStatus"` 7167 Status *string `type:"string" enum:"BucketAccelerateStatus"`
7153} 7168}
7154 7169
@@ -7168,12 +7183,14 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration {
7168 return s 7183 return s
7169} 7184}
7170 7185
7186// Contains the elements that set the ACL permissions for an object per grantee.
7171type AccessControlPolicy struct { 7187type AccessControlPolicy struct {
7172 _ struct{} `type:"structure"` 7188 _ struct{} `type:"structure"`
7173 7189
7174 // A list of grants. 7190 // A list of grants.
7175 Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` 7191 Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
7176 7192
7193 // Container for the bucket owner's display name and ID.
7177 Owner *Owner `type:"structure"` 7194 Owner *Owner `type:"structure"`
7178} 7195}
7179 7196
@@ -7223,7 +7240,9 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy {
7223type AccessControlTranslation struct { 7240type AccessControlTranslation struct {
7224 _ struct{} `type:"structure"` 7241 _ struct{} `type:"structure"`
7225 7242
7226 // The override value for the owner of the replica object. 7243 // Specifies the replica ownership. For default and valid values, see PUT bucket
7244 // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html)
7245 // in the Amazon Simple Storage Service API Reference.
7227 // 7246 //
7228 // Owner is a required field 7247 // Owner is a required field
7229 Owner *string `type:"string" required:"true" enum:"OwnerOverride"` 7248 Owner *string `type:"string" required:"true" enum:"OwnerOverride"`
@@ -7258,10 +7277,14 @@ func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation
7258 return s 7277 return s
7259} 7278}
7260 7279
7280// A conjunction (logical AND) of predicates, which is used in evaluating a
7281// metrics filter. The operator must have at least two predicates in any combination,
7282// and an object must match all of the predicates for the filter to apply.
7261type AnalyticsAndOperator struct { 7283type AnalyticsAndOperator struct {
7262 _ struct{} `type:"structure"` 7284 _ struct{} `type:"structure"`
7263 7285
7264 // The prefix to use when evaluating an AND predicate. 7286 // The prefix to use when evaluating an AND predicate: The prefix that an object
7287 // must have to be included in the metrics results.
7265 Prefix *string `type:"string"` 7288 Prefix *string `type:"string"`
7266 7289
7267 // The list of tags to use when evaluating an AND predicate. 7290 // The list of tags to use when evaluating an AND predicate.
@@ -7310,6 +7333,11 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator {
7310 return s 7333 return s
7311} 7334}
7312 7335
7336// Specifies the configuration and any analyses for the analytics filter of
7337// an Amazon S3 bucket.
7338//
7339// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html)
7340// in the Amazon Simple Storage Service API Reference.
7313type AnalyticsConfiguration struct { 7341type AnalyticsConfiguration struct {
7314 _ struct{} `type:"structure"` 7342 _ struct{} `type:"structure"`
7315 7343
@@ -7318,13 +7346,13 @@ type AnalyticsConfiguration struct {
7318 // If no filter is provided, all objects will be considered in any analysis. 7346 // If no filter is provided, all objects will be considered in any analysis.
7319 Filter *AnalyticsFilter `type:"structure"` 7347 Filter *AnalyticsFilter `type:"structure"`
7320 7348
7321 // The identifier used to represent an analytics configuration. 7349 // The ID that identifies the analytics configuration.
7322 // 7350 //
7323 // Id is a required field 7351 // Id is a required field
7324 Id *string `type:"string" required:"true"` 7352 Id *string `type:"string" required:"true"`
7325 7353
7326 // If present, it indicates that data related to access patterns will be collected 7354 // Contains data related to access patterns to be collected and made available
7327 // and made available to analyze the tradeoffs between different storage classes. 7355 // to analyze the tradeoffs between different storage classes.
7328 // 7356 //
7329 // StorageClassAnalysis is a required field 7357 // StorageClassAnalysis is a required field
7330 StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` 7358 StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"`
@@ -7384,6 +7412,7 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis
7384 return s 7412 return s
7385} 7413}
7386 7414
7415// Where to publish the analytics results.
7387type AnalyticsExportDestination struct { 7416type AnalyticsExportDestination struct {
7388 _ struct{} `type:"structure"` 7417 _ struct{} `type:"structure"`
7389 7418
@@ -7492,7 +7521,7 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter {
7492type AnalyticsS3BucketDestination struct { 7521type AnalyticsS3BucketDestination struct {
7493 _ struct{} `type:"structure"` 7522 _ struct{} `type:"structure"`
7494 7523
7495 // The Amazon resource name (ARN) of the bucket to which data is exported. 7524 // The Amazon Resource Name (ARN) of the bucket to which data is exported.
7496 // 7525 //
7497 // Bucket is a required field 7526 // Bucket is a required field
7498 Bucket *string `type:"string" required:"true"` 7527 Bucket *string `type:"string" required:"true"`
@@ -7501,13 +7530,12 @@ type AnalyticsS3BucketDestination struct {
7501 // the owner will not be validated prior to exporting data. 7530 // the owner will not be validated prior to exporting data.
7502 BucketAccountId *string `type:"string"` 7531 BucketAccountId *string `type:"string"`
7503 7532
7504 // The file format used when exporting data to Amazon S3. 7533 // Specifies the file format used when exporting data to Amazon S3.
7505 // 7534 //
7506 // Format is a required field 7535 // Format is a required field
7507 Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` 7536 Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"`
7508 7537
7509 // The prefix to use when exporting data. The exported data begins with this 7538 // The prefix to use when exporting data. The prefix is prepended to all results.
7510 // prefix.
7511 Prefix *string `type:"string"` 7539 Prefix *string `type:"string"`
7512} 7540}
7513 7541
@@ -7600,9 +7628,14 @@ func (s *Bucket) SetName(v string) *Bucket {
7600 return s 7628 return s
7601} 7629}
7602 7630
7631// Specifies the lifecycle configuration for objects in an Amazon S3 bucket.
7632// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
7633// in the Amazon Simple Storage Service Developer Guide.
7603type BucketLifecycleConfiguration struct { 7634type BucketLifecycleConfiguration struct {
7604 _ struct{} `type:"structure"` 7635 _ struct{} `type:"structure"`
7605 7636
7637 // A lifecycle rule for individual objects in an Amazon S3 bucket.
7638 //
7606 // Rules is a required field 7639 // Rules is a required field
7607 Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` 7640 Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
7608} 7641}
@@ -7649,9 +7682,10 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec
7649type BucketLoggingStatus struct { 7682type BucketLoggingStatus struct {
7650 _ struct{} `type:"structure"` 7683 _ struct{} `type:"structure"`
7651 7684
7652 // Container for logging information. Presence of this element indicates that 7685 // Describes where logs are stored and the prefix that Amazon S3 assigns to
7653 // logging is enabled. Parameters TargetBucket and TargetPrefix are required 7686 // all log object keys for a bucket. For more information, see PUT Bucket logging
7654 // in this case. 7687 // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
7688 // in the Amazon Simple Storage Service API Reference.
7655 LoggingEnabled *LoggingEnabled `type:"structure"` 7689 LoggingEnabled *LoggingEnabled `type:"structure"`
7656} 7690}
7657 7691
@@ -7686,9 +7720,15 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin
7686 return s 7720 return s
7687} 7721}
7688 7722
7723// Describes the cross-origin access configuration for objects in an Amazon
7724// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing
7725// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
7726// Simple Storage Service Developer Guide.
7689type CORSConfiguration struct { 7727type CORSConfiguration struct {
7690 _ struct{} `type:"structure"` 7728 _ struct{} `type:"structure"`
7691 7729
7730 // A set of allowed origins and methods.
7731 //
7692 // CORSRules is a required field 7732 // CORSRules is a required field
7693 CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` 7733 CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"`
7694} 7734}
@@ -7732,14 +7772,18 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration {
7732 return s 7772 return s
7733} 7773}
7734 7774
7775// Specifies a cross-origin access rule for an Amazon S3 bucket.
7735type CORSRule struct { 7776type CORSRule struct {
7736 _ struct{} `type:"structure"` 7777 _ struct{} `type:"structure"`
7737 7778
7738 // Specifies which headers are allowed in a pre-flight OPTIONS request. 7779 // Headers that are specified in the Access-Control-Request-Headers header.
7780 // These headers are allowed in a preflight OPTIONS request. In response to
7781 // any preflight OPTIONS request, Amazon S3 returns any requested headers that
7782 // are allowed.
7739 AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` 7783 AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"`
7740 7784
7741 // Identifies HTTP methods that the domain/origin specified in the rule is allowed 7785 // An HTTP method that you allow the origin to execute. Valid values are GET,
7742 // to execute. 7786 // PUT, HEAD, POST, and DELETE.
7743 // 7787 //
7744 // AllowedMethods is a required field 7788 // AllowedMethods is a required field
7745 AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` 7789 AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"`
@@ -8290,6 +8334,7 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart {
8290 return s 8334 return s
8291} 8335}
8292 8336
8337// Specifies a condition that must be met for a redirect to apply.
8293type Condition struct { 8338type Condition struct {
8294 _ struct{} `type:"structure"` 8339 _ struct{} `type:"structure"`
8295 8340
@@ -8409,7 +8454,7 @@ type CopyObjectInput struct {
8409 // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt 8454 // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
8410 // the source object. The encryption key provided in this header must be one 8455 // the source object. The encryption key provided in this header must be one
8411 // that was used when the source object was created. 8456 // that was used when the source object was created.
8412 CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` 8457 CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
8413 8458
8414 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. 8459 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
8415 // Amazon S3 uses this header for a message integrity check to ensure the encryption 8460 // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -8444,10 +8489,10 @@ type CopyObjectInput struct {
8444 // Specifies whether you want to apply a Legal Hold to the copied object. 8489 // Specifies whether you want to apply a Legal Hold to the copied object.
8445 ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` 8490 ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
8446 8491
8447 // The Object Lock mode that you want to apply to the copied object. 8492 // The object lock mode that you want to apply to the copied object.
8448 ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` 8493 ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
8449 8494
8450 // The date and time when you want the copied object's Object Lock to expire. 8495 // The date and time when you want the copied object's object lock to expire.
8451 ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` 8496 ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
8452 8497
8453 // Confirms that the requester knows that she or he will be charged for the 8498 // Confirms that the requester knows that she or he will be charged for the
@@ -8464,13 +8509,18 @@ type CopyObjectInput struct {
8464 // does not store the encryption key. The key must be appropriate for use with 8509 // does not store the encryption key. The key must be appropriate for use with
8465 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm 8510 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
8466 // header. 8511 // header.
8467 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` 8512 SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
8468 8513
8469 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. 8514 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
8470 // Amazon S3 uses this header for a message integrity check to ensure the encryption 8515 // Amazon S3 uses this header for a message integrity check to ensure the encryption
8471 // key was transmitted without error. 8516 // key was transmitted without error.
8472 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` 8517 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
8473 8518
8519 // Specifies the AWS KMS Encryption Context to use for object encryption. The
8520 // value of this header is a base64-encoded UTF-8 string holding JSON with the
8521 // encryption context key-value pairs.
8522 SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
8523
8474 // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT 8524 // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
8475 // requests for an object protected by AWS KMS will fail if not made via SSL 8525 // requests for an object protected by AWS KMS will fail if not made via SSL
8476 // or using SigV4. Documentation on configuring any of the officially supported 8526 // or using SigV4. Documentation on configuring any of the officially supported
@@ -8735,6 +8785,12 @@ func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput {
8735 return s 8785 return s
8736} 8786}
8737 8787
8788// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
8789func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput {
8790 s.SSEKMSEncryptionContext = &v
8791 return s
8792}
8793
8738// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. 8794// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
8739func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { 8795func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput {
8740 s.SSEKMSKeyId = &v 8796 s.SSEKMSKeyId = &v
@@ -8795,6 +8851,11 @@ type CopyObjectOutput struct {
8795 // verification of the customer-provided encryption key. 8851 // verification of the customer-provided encryption key.
8796 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` 8852 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
8797 8853
8854 // If present, specifies the AWS KMS Encryption Context to use for object encryption.
8855 // The value of this header is a base64-encoded UTF-8 string holding JSON with
8856 // the encryption context key-value pairs.
8857 SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
8858
8798 // If present, specifies the ID of the AWS Key Management Service (KMS) master 8859 // If present, specifies the ID of the AWS Key Management Service (KMS) master
8799 // encryption key that was used for the object. 8860 // encryption key that was used for the object.
8800 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` 8861 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
@@ -8853,6 +8914,12 @@ func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput {
8853 return s 8914 return s
8854} 8915}
8855 8916
8917// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
8918func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput {
8919 s.SSEKMSEncryptionContext = &v
8920 return s
8921}
8922
8856// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. 8923// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
8857func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { 8924func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput {
8858 s.SSEKMSKeyId = &v 8925 s.SSEKMSKeyId = &v
@@ -8984,7 +9051,8 @@ type CreateBucketInput struct {
8984 // Allows grantee to write the ACL for the applicable bucket. 9051 // Allows grantee to write the ACL for the applicable bucket.
8985 GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` 9052 GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
8986 9053
8987 // Specifies whether you want S3 Object Lock to be enabled for the new bucket. 9054 // Specifies whether you want Amazon S3 object lock to be enabled for the new
9055 // bucket.
8988 ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` 9056 ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"`
8989} 9057}
8990 9058
@@ -9147,10 +9215,10 @@ type CreateMultipartUploadInput struct {
9147 // Specifies whether you want to apply a Legal Hold to the uploaded object. 9215 // Specifies whether you want to apply a Legal Hold to the uploaded object.
9148 ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` 9216 ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
9149 9217
9150 // Specifies the Object Lock mode that you want to apply to the uploaded object. 9218 // Specifies the object lock mode that you want to apply to the uploaded object.
9151 ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` 9219 ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
9152 9220
9153 // Specifies the date and time when you want the Object Lock to expire. 9221 // Specifies the date and time when you want the object lock to expire.
9154 ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` 9222 ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
9155 9223
9156 // Confirms that the requester knows that she or he will be charged for the 9224 // Confirms that the requester knows that she or he will be charged for the
@@ -9167,13 +9235,18 @@ type CreateMultipartUploadInput struct {
9167 // does not store the encryption key. The key must be appropriate for use with 9235 // does not store the encryption key. The key must be appropriate for use with
9168 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm 9236 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
9169 // header. 9237 // header.
9170 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` 9238 SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
9171 9239
9172 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. 9240 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
9173 // Amazon S3 uses this header for a message integrity check to ensure the encryption 9241 // Amazon S3 uses this header for a message integrity check to ensure the encryption
9174 // key was transmitted without error. 9242 // key was transmitted without error.
9175 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` 9243 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
9176 9244
9245 // Specifies the AWS KMS Encryption Context to use for object encryption. The
9246 // value of this header is a base64-encoded UTF-8 string holding JSON with the
9247 // encryption context key-value pairs.
9248 SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
9249
9177 // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT 9250 // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
9178 // requests for an object protected by AWS KMS will fail if not made via SSL 9251 // requests for an object protected by AWS KMS will fail if not made via SSL
9179 // or using SigV4. Documentation on configuring any of the officially supported 9252 // or using SigV4. Documentation on configuring any of the officially supported
@@ -9368,6 +9441,12 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMulti
9368 return s 9441 return s
9369} 9442}
9370 9443
9444// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
9445func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput {
9446 s.SSEKMSEncryptionContext = &v
9447 return s
9448}
9449
9371// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. 9450// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
9372func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { 9451func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput {
9373 s.SSEKMSKeyId = &v 9452 s.SSEKMSKeyId = &v
@@ -9428,6 +9507,11 @@ type CreateMultipartUploadOutput struct {
9428 // verification of the customer-provided encryption key. 9507 // verification of the customer-provided encryption key.
9429 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` 9508 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
9430 9509
9510 // If present, specifies the AWS KMS Encryption Context to use for object encryption.
9511 // The value of this header is a base64-encoded UTF-8 string holding JSON with
9512 // the encryption context key-value pairs.
9513 SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
9514
9431 // If present, specifies the ID of the AWS Key Management Service (KMS) master 9515 // If present, specifies the ID of the AWS Key Management Service (KMS) master
9432 // encryption key that was used for the object. 9516 // encryption key that was used for the object.
9433 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` 9517 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
@@ -9499,6 +9583,12 @@ func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMult
9499 return s 9583 return s
9500} 9584}
9501 9585
9586// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
9587func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput {
9588 s.SSEKMSEncryptionContext = &v
9589 return s
9590}
9591
9502// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. 9592// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
9503func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { 9593func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput {
9504 s.SSEKMSKeyId = &v 9594 s.SSEKMSKeyId = &v
@@ -9517,7 +9607,7 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo
9517 return s 9607 return s
9518} 9608}
9519 9609
9520// The container element for specifying the default Object Lock retention settings 9610// The container element for specifying the default object lock retention settings
9521// for new objects placed in the specified bucket. 9611// for new objects placed in the specified bucket.
9522type DefaultRetention struct { 9612type DefaultRetention struct {
9523 _ struct{} `type:"structure"` 9613 _ struct{} `type:"structure"`
@@ -9525,7 +9615,7 @@ type DefaultRetention struct {
9525 // The number of days that you want to specify for the default retention period. 9615 // The number of days that you want to specify for the default retention period.
9526 Days *int64 `type:"integer"` 9616 Days *int64 `type:"integer"`
9527 9617
9528 // The default Object Lock retention mode you want to apply to new objects placed 9618 // The default object lock retention mode you want to apply to new objects placed
9529 // in the specified bucket. 9619 // in the specified bucket.
9530 Mode *string `type:"string" enum:"ObjectLockRetentionMode"` 9620 Mode *string `type:"string" enum:"ObjectLockRetentionMode"`
9531 9621
@@ -9625,7 +9715,7 @@ type DeleteBucketAnalyticsConfigurationInput struct {
9625 // Bucket is a required field 9715 // Bucket is a required field
9626 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 9716 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9627 9717
9628 // The identifier used to represent an analytics configuration. 9718 // The ID that identifies the analytics configuration.
9629 // 9719 //
9630 // Id is a required field 9720 // Id is a required field
9631 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` 9721 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
@@ -10425,7 +10515,7 @@ type DeleteObjectInput struct {
10425 // Bucket is a required field 10515 // Bucket is a required field
10426 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 10516 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10427 10517
10428 // Indicates whether S3 Object Lock should bypass Governance-mode restrictions 10518 // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions
10429 // to process this operation. 10519 // to process this operation.
10430 BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` 10520 BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
10431 10521
@@ -10665,7 +10755,7 @@ type DeleteObjectsInput struct {
10665 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 10755 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10666 10756
10667 // Specifies whether you want to delete this object even if it has a Governance-type 10757 // Specifies whether you want to delete this object even if it has a Governance-type
10668 // Object Lock in place. You must have sufficient permissions to perform this 10758 // object lock in place. You must have sufficient permissions to perform this
10669 // operation. 10759 // operation.
10670 BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` 10760 BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
10671 10761
@@ -10902,33 +10992,33 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject {
10902 return s 10992 return s
10903} 10993}
10904 10994
10905// A container for information about the replication destination. 10995// Specifies information about where to publish analysis or configuration results
10996// for an Amazon S3 bucket.
10906type Destination struct { 10997type Destination struct {
10907 _ struct{} `type:"structure"` 10998 _ struct{} `type:"structure"`
10908 10999
10909 // A container for information about access control for replicas. 11000 // Specify this only in a cross-account scenario (where source and destination
10910 // 11001 // bucket owners are not the same), and you want to change replica ownership
10911 // Use this element only in a cross-account scenario where source and destination 11002 // to the AWS account that owns the destination bucket. If this is not specified
10912 // bucket owners are not the same to change replica ownership to the AWS account 11003 // in the replication configuration, the replicas are owned by same AWS account
10913 // that owns the destination bucket. If you don't add this element to the replication 11004 // that owns the source object.
10914 // configuration, the replicas are owned by same AWS account that owns the source
10915 // object.
10916 AccessControlTranslation *AccessControlTranslation `type:"structure"` 11005 AccessControlTranslation *AccessControlTranslation `type:"structure"`
10917 11006
10918 // The account ID of the destination bucket. Currently, Amazon S3 verifies this 11007 // Destination bucket owner account ID. In a cross-account scenario, if you
10919 // value only if Access Control Translation is enabled. 11008 // direct Amazon S3 to change replica ownership to the AWS account that owns
10920 // 11009 // the destination bucket by specifying the AccessControlTranslation property,
10921 // In a cross-account scenario, if you change replica ownership to the AWS account 11010 // this is the account ID of the destination bucket owner. For more information,
10922 // that owns the destination bucket by adding the AccessControlTranslation element, 11011 // see Cross-Region Replication Additional Configuration: Change Replica Owner
10923 // this is the account ID of the owner of the destination bucket. 11012 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in
11013 // the Amazon Simple Storage Service Developer Guide.
10924 Account *string `type:"string"` 11014 Account *string `type:"string"`
10925 11015
10926 // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to 11016 // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to
10927 // store replicas of the object identified by the rule. 11017 // store replicas of the object identified by the rule.
10928 // 11018 //
10929 // If there are multiple rules in your replication configuration, all rules 11019 // A replication configuration can replicate objects to only one destination
10930 // must specify the same bucket as the destination. A replication configuration 11020 // bucket. If there are multiple rules in your replication configuration, all
10931 // can replicate objects to only one destination bucket. 11021 // rules must specify the same destination bucket.
10932 // 11022 //
10933 // Bucket is a required field 11023 // Bucket is a required field
10934 Bucket *string `type:"string" required:"true"` 11024 Bucket *string `type:"string" required:"true"`
@@ -10937,8 +11027,13 @@ type Destination struct {
10937 // is specified, you must specify this element. 11027 // is specified, you must specify this element.
10938 EncryptionConfiguration *EncryptionConfiguration `type:"structure"` 11028 EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
10939 11029
10940 // The class of storage used to store the object. By default Amazon S3 uses 11030 // The storage class to use when replicating objects, such as standard or reduced
10941 // storage class of the source object when creating a replica. 11031 // redundancy. By default, Amazon S3 uses the storage class of the source object
11032 // to create the object replica.
11033 //
11034 // For valid values, see the StorageClass element of the PUT Bucket replication
11035 // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html)
11036 // action in the Amazon Simple Storage Service API Reference.
10942 StorageClass *string `type:"string" enum:"StorageClass"` 11037 StorageClass *string `type:"string" enum:"StorageClass"`
10943} 11038}
10944 11039
@@ -11068,13 +11163,13 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption {
11068 return s 11163 return s
11069} 11164}
11070 11165
11071// A container for information about the encryption-based configuration for 11166// Specifies encryption-related information for an Amazon S3 bucket that is
11072// replicas. 11167// a destination for replicated objects.
11073type EncryptionConfiguration struct { 11168type EncryptionConfiguration struct {
11074 _ struct{} `type:"structure"` 11169 _ struct{} `type:"structure"`
11075 11170
11076 // The ID of the AWS KMS key for the AWS Region where the destination bucket 11171 // Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket.
11077 // resides. Amazon S3 uses this key to encrypt the replica object. 11172 // Amazon S3 uses this key to encrypt replica objects.
11078 ReplicaKmsKeyID *string `type:"string"` 11173 ReplicaKmsKeyID *string `type:"string"`
11079} 11174}
11080 11175
@@ -11207,18 +11302,19 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument {
11207 return s 11302 return s
11208} 11303}
11209 11304
11210// A container for a key value pair that defines the criteria for the filter 11305// Specifies the Amazon S3 object key name to filter on and whether to filter
11211// rule. 11306// on the suffix or prefix of the key name.
11212type FilterRule struct { 11307type FilterRule struct {
11213 _ struct{} `type:"structure"` 11308 _ struct{} `type:"structure"`
11214 11309
11215 // The object key name prefix or suffix identifying one or more objects to which 11310 // The object key name prefix or suffix identifying one or more objects to which
11216 // the filtering rule applies. The maximum prefix length is 1,024 characters. 11311 // the filtering rule applies. The maximum length is 1,024 characters. Overlapping
11217 // Overlapping prefixes and suffixes are not supported. For more information, 11312 // prefixes and suffixes are not supported. For more information, see Configuring
11218 // see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) 11313 // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
11219 // in the Amazon Simple Storage Service Developer Guide. 11314 // in the Amazon Simple Storage Service Developer Guide.
11220 Name *string `type:"string" enum:"FilterRuleName"` 11315 Name *string `type:"string" enum:"FilterRuleName"`
11221 11316
11317 // The value that the filter searches for in object key names.
11222 Value *string `type:"string"` 11318 Value *string `type:"string"`
11223} 11319}
11224 11320
@@ -11400,7 +11496,7 @@ type GetBucketAnalyticsConfigurationInput struct {
11400 // Bucket is a required field 11496 // Bucket is a required field
11401 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 11497 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
11402 11498
11403 // The identifier used to represent an analytics configuration. 11499 // The ID that identifies the analytics configuration.
11404 // 11500 //
11405 // Id is a required field 11501 // Id is a required field
11406 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` 11502 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
@@ -11597,8 +11693,7 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) {
11597type GetBucketEncryptionOutput struct { 11693type GetBucketEncryptionOutput struct {
11598 _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` 11694 _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"`
11599 11695
11600 // Container for server-side encryption configuration rules. Currently S3 supports 11696 // Specifies the default server-side-encryption configuration.
11601 // one rule only.
11602 ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` 11697 ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"`
11603} 11698}
11604 11699
@@ -11956,9 +12051,10 @@ func (s *GetBucketLoggingInput) getBucket() (v string) {
11956type GetBucketLoggingOutput struct { 12051type GetBucketLoggingOutput struct {
11957 _ struct{} `type:"structure"` 12052 _ struct{} `type:"structure"`
11958 12053
11959 // Container for logging information. Presence of this element indicates that 12054 // Describes where logs are stored and the prefix that Amazon S3 assigns to
11960 // logging is enabled. Parameters TargetBucket and TargetPrefix are required 12055 // all log object keys for a bucket. For more information, see PUT Bucket logging
11961 // in this case. 12056 // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
12057 // in the Amazon Simple Storage Service API Reference.
11962 LoggingEnabled *LoggingEnabled `type:"structure"` 12058 LoggingEnabled *LoggingEnabled `type:"structure"`
11963} 12059}
11964 12060
@@ -12592,6 +12688,8 @@ type GetBucketWebsiteOutput struct {
12592 12688
12593 IndexDocument *IndexDocument `type:"structure"` 12689 IndexDocument *IndexDocument `type:"structure"`
12594 12690
12691 // Specifies the redirect behavior of all requests to a website endpoint of
12692 // an Amazon S3 bucket.
12595 RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` 12693 RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
12596 12694
12597 RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` 12695 RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
@@ -12820,7 +12918,7 @@ type GetObjectInput struct {
12820 // does not store the encryption key. The key must be appropriate for use with 12918 // does not store the encryption key. The key must be appropriate for use with
12821 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm 12919 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
12822 // header. 12920 // header.
12823 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` 12921 SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
12824 12922
12825 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. 12923 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
12826 // Amazon S3 uses this header for a message integrity check to ensure the encryption 12924 // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -13103,7 +13201,7 @@ func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObje
13103type GetObjectLockConfigurationInput struct { 13201type GetObjectLockConfigurationInput struct {
13104 _ struct{} `type:"structure"` 13202 _ struct{} `type:"structure"`
13105 13203
13106 // The bucket whose Object Lock configuration you want to retrieve. 13204 // The bucket whose object lock configuration you want to retrieve.
13107 // 13205 //
13108 // Bucket is a required field 13206 // Bucket is a required field
13109 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 13207 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -13151,7 +13249,7 @@ func (s *GetObjectLockConfigurationInput) getBucket() (v string) {
13151type GetObjectLockConfigurationOutput struct { 13249type GetObjectLockConfigurationOutput struct {
13152 _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` 13250 _ struct{} `type:"structure" payload:"ObjectLockConfiguration"`
13153 13251
13154 // The specified bucket's Object Lock configuration. 13252 // The specified bucket's object lock configuration.
13155 ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` 13253 ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"`
13156} 13254}
13157 13255
@@ -13235,10 +13333,10 @@ type GetObjectOutput struct {
13235 // returned if you have permission to view an object's legal hold status. 13333 // returned if you have permission to view an object's legal hold status.
13236 ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` 13334 ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
13237 13335
13238 // The Object Lock mode currently in place for this object. 13336 // The object lock mode currently in place for this object.
13239 ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` 13337 ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
13240 13338
13241 // The date and time when this object's Object Lock will expire. 13339 // The date and time when this object's object lock will expire.
13242 ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` 13340 ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
13243 13341
13244 // The count of parts this object has. 13342 // The count of parts this object has.
@@ -14136,7 +14234,7 @@ type HeadObjectInput struct {
14136 // does not store the encryption key. The key must be appropriate for use with 14234 // does not store the encryption key. The key must be appropriate for use with
14137 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm 14235 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
14138 // header. 14236 // header.
14139 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` 14237 SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
14140 14238
14141 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. 14239 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
14142 // Amazon S3 uses this header for a message integrity check to ensure the encryption 14240 // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -14328,10 +14426,10 @@ type HeadObjectOutput struct {
14328 // The Legal Hold status for the specified object. 14426 // The Legal Hold status for the specified object.
14329 ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` 14427 ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
14330 14428
14331 // The Object Lock mode currently in place for this object. 14429 // The object lock mode currently in place for this object.
14332 ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` 14430 ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
14333 14431
14334 // The date and time when this object's Object Lock will expire. 14432 // The date and time when this object's object lock expires.
14335 ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` 14433 ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
14336 14434
14337 // The count of parts this object has. 14435 // The count of parts this object has.
@@ -14680,6 +14778,9 @@ func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization {
14680 return s 14778 return s
14681} 14779}
14682 14780
14781// Specifies the inventory configuration for an Amazon S3 bucket. For more information,
14782// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html)
14783// in the Amazon Simple Storage Service API Reference.
14683type InventoryConfiguration struct { 14784type InventoryConfiguration struct {
14684 _ struct{} `type:"structure"` 14785 _ struct{} `type:"structure"`
14685 14786
@@ -14697,12 +14798,16 @@ type InventoryConfiguration struct {
14697 // Id is a required field 14798 // Id is a required field
14698 Id *string `type:"string" required:"true"` 14799 Id *string `type:"string" required:"true"`
14699 14800
14700 // Specifies which object version(s) to included in the inventory results. 14801 // Object versions to include in the inventory list. If set to All, the list
14802 // includes all the object versions, which adds the version-related fields VersionId,
14803 // IsLatest, and DeleteMarker to the list. If set to Current, the list does
14804 // not contain these version-related fields.
14701 // 14805 //
14702 // IncludedObjectVersions is a required field 14806 // IncludedObjectVersions is a required field
14703 IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` 14807 IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"`
14704 14808
14705 // Specifies whether the inventory is enabled or disabled. 14809 // Specifies whether the inventory is enabled or disabled. If set to True, an
14810 // inventory list is generated. If set to False, no inventory list is generated.
14706 // 14811 //
14707 // IsEnabled is a required field 14812 // IsEnabled is a required field
14708 IsEnabled *bool `type:"boolean" required:"true"` 14813 IsEnabled *bool `type:"boolean" required:"true"`
@@ -15145,11 +15250,15 @@ func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter {
15145type LambdaFunctionConfiguration struct { 15250type LambdaFunctionConfiguration struct {
15146 _ struct{} `type:"structure"` 15251 _ struct{} `type:"structure"`
15147 15252
15253 // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
15254 // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
15255 // in the Amazon Simple Storage Service Developer Guide.
15256 //
15148 // Events is a required field 15257 // Events is a required field
15149 Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` 15258 Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
15150 15259
15151 // A container for object key name filtering rules. For information about key 15260 // Specifies object key name filtering rules. For information about key name
15152 // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) 15261 // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
15153 // in the Amazon Simple Storage Service Developer Guide. 15262 // in the Amazon Simple Storage Service Developer Guide.
15154 Filter *NotificationConfigurationFilter `type:"structure"` 15263 Filter *NotificationConfigurationFilter `type:"structure"`
15155 15264
@@ -15157,8 +15266,8 @@ type LambdaFunctionConfiguration struct {
15157 // If you don't provide one, Amazon S3 will assign an ID. 15266 // If you don't provide one, Amazon S3 will assign an ID.
15158 Id *string `type:"string"` 15267 Id *string `type:"string"`
15159 15268
15160 // The Amazon Resource Name (ARN) of the Lambda cloud function that Amazon S3 15269 // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3
15161 // can invoke when it detects events of the specified type. 15270 // invokes when the specified event type occurs.
15162 // 15271 //
15163 // LambdaFunctionArn is a required field 15272 // LambdaFunctionArn is a required field
15164 LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` 15273 LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"`
@@ -15309,8 +15418,11 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp
15309type LifecycleRule struct { 15418type LifecycleRule struct {
15310 _ struct{} `type:"structure"` 15419 _ struct{} `type:"structure"`
15311 15420
15312 // Specifies the days since the initiation of an Incomplete Multipart Upload 15421 // Specifies the days since the initiation of an incomplete multipart upload
15313 // that Lifecycle will wait before permanently removing all parts of the upload. 15422 // that Amazon S3 will wait before permanently removing all parts of the upload.
15423 // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
15424 // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
15425 // in the Amazon Simple Storage Service Developer Guide.
15314 AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` 15426 AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
15315 15427
15316 Expiration *LifecycleExpiration `type:"structure"` 15428 Expiration *LifecycleExpiration `type:"structure"`
@@ -17267,9 +17379,10 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location {
17267 return s 17379 return s
17268} 17380}
17269 17381
17270// Container for logging information. Presence of this element indicates that 17382// Describes where logs are stored and the prefix that Amazon S3 assigns to
17271// logging is enabled. Parameters TargetBucket and TargetPrefix are required 17383// all log object keys for a bucket. For more information, see PUT Bucket logging
17272// in this case. 17384// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
17385// in the Amazon Simple Storage Service API Reference.
17273type LoggingEnabled struct { 17386type LoggingEnabled struct {
17274 _ struct{} `type:"structure"` 17387 _ struct{} `type:"structure"`
17275 17388
@@ -17285,8 +17398,9 @@ type LoggingEnabled struct {
17285 17398
17286 TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` 17399 TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
17287 17400
17288 // This element lets you specify a prefix for the keys that the log files will 17401 // A prefix for all log object keys. If you store log files from multiple Amazon
17289 // be stored under. 17402 // S3 buckets in a single bucket, you can use a prefix to distinguish which
17403 // log files came from which bucket.
17290 // 17404 //
17291 // TargetPrefix is a required field 17405 // TargetPrefix is a required field
17292 TargetPrefix *string `type:"string" required:"true"` 17406 TargetPrefix *string `type:"string" required:"true"`
@@ -17429,6 +17543,13 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator {
17429 return s 17543 return s
17430} 17544}
17431 17545
17546// Specifies a metrics configuration for the CloudWatch request metrics (specified
17547// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating
17548// an existing metrics configuration, note that this is a full replacement of
17549// the existing metrics configuration. If you don't include the elements you
17550// want to keep, they are erased. For more information, see PUT Bucket metrics
17551// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html)
17552// in the Amazon Simple Storage Service API Reference.
17432type MetricsConfiguration struct { 17553type MetricsConfiguration struct {
17433 _ struct{} `type:"structure"` 17554 _ struct{} `type:"structure"`
17434 17555
@@ -17624,7 +17745,7 @@ type NoncurrentVersionExpiration struct {
17624 // Specifies the number of days an object is noncurrent before Amazon S3 can 17745 // Specifies the number of days an object is noncurrent before Amazon S3 can
17625 // perform the associated action. For information about the noncurrent days 17746 // perform the associated action. For information about the noncurrent days
17626 // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent 17747 // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
17627 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) 17748 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
17628 // in the Amazon Simple Storage Service Developer Guide. 17749 // in the Amazon Simple Storage Service Developer Guide.
17629 NoncurrentDays *int64 `type:"integer"` 17750 NoncurrentDays *int64 `type:"integer"`
17630} 17751}
@@ -17646,11 +17767,11 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers
17646} 17767}
17647 17768
17648// Container for the transition rule that describes when noncurrent objects 17769// Container for the transition rule that describes when noncurrent objects
17649// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER or 17770// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER,
17650// DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning 17771// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning
17651// is suspended), you can set this action to request that Amazon S3 transition 17772// is suspended), you can set this action to request that Amazon S3 transition
17652// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, 17773// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,
17653// GLACIER or DEEP_ARCHIVE storage class at a specific period in the object's 17774// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's
17654// lifetime. 17775// lifetime.
17655type NoncurrentVersionTransition struct { 17776type NoncurrentVersionTransition struct {
17656 _ struct{} `type:"structure"` 17777 _ struct{} `type:"structure"`
@@ -17693,10 +17814,16 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi
17693type NotificationConfiguration struct { 17814type NotificationConfiguration struct {
17694 _ struct{} `type:"structure"` 17815 _ struct{} `type:"structure"`
17695 17816
17817 // Describes the AWS Lambda functions to invoke and the events for which to
17818 // invoke them.
17696 LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` 17819 LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"`
17697 17820
17821 // The Amazon Simple Queue Service queues to publish messages to and the events
17822 // for which to publish messages.
17698 QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` 17823 QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"`
17699 17824
17825 // The topic to which notifications are sent and the events for which notifications
17826 // are generated.
17700 TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` 17827 TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"`
17701} 17828}
17702 17829
@@ -17806,8 +17933,8 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf
17806 return s 17933 return s
17807} 17934}
17808 17935
17809// A container for object key name filtering rules. For information about key 17936// Specifies object key name filtering rules. For information about key name
17810// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) 17937// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
17811// in the Amazon Simple Storage Service Developer Guide. 17938// in the Amazon Simple Storage Service Developer Guide.
17812type NotificationConfigurationFilter struct { 17939type NotificationConfigurationFilter struct {
17813 _ struct{} `type:"structure"` 17940 _ struct{} `type:"structure"`
@@ -17945,14 +18072,14 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier {
17945 return s 18072 return s
17946} 18073}
17947 18074
17948// The container element for Object Lock configuration parameters. 18075// The container element for object lock configuration parameters.
17949type ObjectLockConfiguration struct { 18076type ObjectLockConfiguration struct {
17950 _ struct{} `type:"structure"` 18077 _ struct{} `type:"structure"`
17951 18078
17952 // Indicates whether this bucket has an Object Lock configuration enabled. 18079 // Indicates whether this bucket has an object lock configuration enabled.
17953 ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` 18080 ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"`
17954 18081
17955 // The Object Lock rule in place for the specified object. 18082 // The object lock rule in place for the specified object.
17956 Rule *ObjectLockRule `type:"structure"` 18083 Rule *ObjectLockRule `type:"structure"`
17957} 18084}
17958 18085
@@ -18009,7 +18136,7 @@ type ObjectLockRetention struct {
18009 // Indicates the Retention mode for the specified object. 18136 // Indicates the Retention mode for the specified object.
18010 Mode *string `type:"string" enum:"ObjectLockRetentionMode"` 18137 Mode *string `type:"string" enum:"ObjectLockRetentionMode"`
18011 18138
18012 // The date on which this Object Lock Retention will expire. 18139 // The date on which this object lock retention expires.
18013 RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` 18140 RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
18014} 18141}
18015 18142
@@ -18035,7 +18162,7 @@ func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetenti
18035 return s 18162 return s
18036} 18163}
18037 18164
18038// The container element for an Object Lock rule. 18165// The container element for an object lock rule.
18039type ObjectLockRule struct { 18166type ObjectLockRule struct {
18040 _ struct{} `type:"structure"` 18167 _ struct{} `type:"structure"`
18041 18168
@@ -18418,6 +18545,7 @@ func (s *ProgressEvent) UnmarshalEvent(
18418 return nil 18545 return nil
18419} 18546}
18420 18547
18548// Specifies the Block Public Access configuration for an Amazon S3 bucket.
18421type PublicAccessBlockConfiguration struct { 18549type PublicAccessBlockConfiguration struct {
18422 _ struct{} `type:"structure"` 18550 _ struct{} `type:"structure"`
18423 18551
@@ -18575,6 +18703,7 @@ type PutBucketAclInput struct {
18575 // The canned ACL to apply to the bucket. 18703 // The canned ACL to apply to the bucket.
18576 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` 18704 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
18577 18705
18706 // Contains the elements that set the ACL permissions for an object per grantee.
18578 AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` 18707 AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
18579 18708
18580 // Bucket is a required field 18709 // Bucket is a required field
@@ -18710,7 +18839,7 @@ type PutBucketAnalyticsConfigurationInput struct {
18710 // Bucket is a required field 18839 // Bucket is a required field
18711 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 18840 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
18712 18841
18713 // The identifier used to represent an analytics configuration. 18842 // The ID that identifies the analytics configuration.
18714 // 18843 //
18715 // Id is a required field 18844 // Id is a required field
18716 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` 18845 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
@@ -18798,6 +18927,11 @@ type PutBucketCorsInput struct {
18798 // Bucket is a required field 18927 // Bucket is a required field
18799 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 18928 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
18800 18929
18930 // Describes the cross-origin access configuration for objects in an Amazon
18931 // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing
18932 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
18933 // Simple Storage Service Developer Guide.
18934 //
18801 // CORSConfiguration is a required field 18935 // CORSConfiguration is a required field
18802 CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` 18936 CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
18803} 18937}
@@ -18872,14 +19006,16 @@ func (s PutBucketCorsOutput) GoString() string {
18872type PutBucketEncryptionInput struct { 19006type PutBucketEncryptionInput struct {
18873 _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` 19007 _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"`
18874 19008
18875 // The name of the bucket for which the server-side encryption configuration 19009 // Specifies default encryption for a bucket using server-side encryption with
18876 // is set. 19010 // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information
19011 // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket
19012 // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
19013 // in the Amazon Simple Storage Service Developer Guide.
18877 // 19014 //
18878 // Bucket is a required field 19015 // Bucket is a required field
18879 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 19016 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
18880 19017
18881 // Container for server-side encryption configuration rules. Currently S3 supports 19018 // Specifies the default server-side-encryption configuration.
18882 // one rule only.
18883 // 19019 //
18884 // ServerSideEncryptionConfiguration is a required field 19020 // ServerSideEncryptionConfiguration is a required field
18885 ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` 19021 ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
@@ -19053,6 +19189,9 @@ type PutBucketLifecycleConfigurationInput struct {
19053 // Bucket is a required field 19189 // Bucket is a required field
19054 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 19190 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
19055 19191
19192 // Specifies the lifecycle configuration for objects in an Amazon S3 bucket.
19193 // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
19194 // in the Amazon Simple Storage Service Developer Guide.
19056 LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` 19195 LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
19057} 19196}
19058 19197
@@ -19612,6 +19751,9 @@ type PutBucketReplicationInput struct {
19612 // 19751 //
19613 // ReplicationConfiguration is a required field 19752 // ReplicationConfiguration is a required field
19614 ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` 19753 ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
19754
19755 // A token that allows Amazon S3 object lock to be enabled for an existing bucket.
19756 Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"`
19615} 19757}
19616 19758
19617// String returns the string representation 19759// String returns the string representation
@@ -19667,6 +19809,12 @@ func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationCo
19667 return s 19809 return s
19668} 19810}
19669 19811
19812// SetToken sets the Token field's value.
19813func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput {
19814 s.Token = &v
19815 return s
19816}
19817
19670type PutBucketReplicationOutput struct { 19818type PutBucketReplicationOutput struct {
19671 _ struct{} `type:"structure"` 19819 _ struct{} `type:"structure"`
19672} 19820}
@@ -19845,6 +19993,10 @@ type PutBucketVersioningInput struct {
19845 // and the value that is displayed on your authentication device. 19993 // and the value that is displayed on your authentication device.
19846 MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` 19994 MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
19847 19995
19996 // Describes the versioning state of an Amazon S3 bucket. For more information,
19997 // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html)
19998 // in the Amazon Simple Storage Service API Reference.
19999 //
19848 // VersioningConfiguration is a required field 20000 // VersioningConfiguration is a required field
19849 VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` 20001 VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
19850} 20002}
@@ -19923,6 +20075,8 @@ type PutBucketWebsiteInput struct {
19923 // Bucket is a required field 20075 // Bucket is a required field
19924 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 20076 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
19925 20077
20078 // Specifies website configuration parameters for an Amazon S3 bucket.
20079 //
19926 // WebsiteConfiguration is a required field 20080 // WebsiteConfiguration is a required field
19927 WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` 20081 WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
19928} 20082}
@@ -20000,6 +20154,7 @@ type PutObjectAclInput struct {
20000 // The canned ACL to apply to the object. 20154 // The canned ACL to apply to the object.
20001 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` 20155 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
20002 20156
20157 // Contains the elements that set the ACL permissions for an object per grantee.
20003 AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` 20158 AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
20004 20159
20005 // Bucket is a required field 20160 // Bucket is a required field
@@ -20201,7 +20356,8 @@ type PutObjectInput struct {
20201 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` 20356 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
20202 20357
20203 // The base64-encoded 128-bit MD5 digest of the part data. This parameter is 20358 // The base64-encoded 128-bit MD5 digest of the part data. This parameter is
20204 // auto-populated when using the command from the CLI 20359 // auto-populated when using the command from the CLI. This parameted is required
20360 // if object lock parameters are specified.
20205 ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` 20361 ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
20206 20362
20207 // A standard MIME type describing the format of the object data. 20363 // A standard MIME type describing the format of the object data.
@@ -20233,10 +20389,10 @@ type PutObjectInput struct {
20233 // The Legal Hold status that you want to apply to the specified object. 20389 // The Legal Hold status that you want to apply to the specified object.
20234 ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` 20390 ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
20235 20391
20236 // The Object Lock mode that you want to apply to this object. 20392 // The object lock mode that you want to apply to this object.
20237 ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` 20393 ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
20238 20394
20239 // The date and time when you want this object's Object Lock to expire. 20395 // The date and time when you want this object's object lock to expire.
20240 ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` 20396 ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
20241 20397
20242 // Confirms that the requester knows that she or he will be charged for the 20398 // Confirms that the requester knows that she or he will be charged for the
@@ -20253,13 +20409,18 @@ type PutObjectInput struct {
20253 // does not store the encryption key. The key must be appropriate for use with 20409 // does not store the encryption key. The key must be appropriate for use with
20254 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm 20410 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
20255 // header. 20411 // header.
20256 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` 20412 SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
20257 20413
20258 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. 20414 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
20259 // Amazon S3 uses this header for a message integrity check to ensure the encryption 20415 // Amazon S3 uses this header for a message integrity check to ensure the encryption
20260 // key was transmitted without error. 20416 // key was transmitted without error.
20261 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` 20417 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
20262 20418
20419 // Specifies the AWS KMS Encryption Context to use for object encryption. The
20420 // value of this header is a base64-encoded UTF-8 string holding JSON with the
20421 // encryption context key-value pairs.
20422 SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
20423
20263 // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT 20424 // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
20264 // requests for an object protected by AWS KMS will fail if not made via SSL 20425 // requests for an object protected by AWS KMS will fail if not made via SSL
20265 // or using SigV4. Documentation on configuring any of the officially supported 20426 // or using SigV4. Documentation on configuring any of the officially supported
@@ -20473,6 +20634,12 @@ func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput {
20473 return s 20634 return s
20474} 20635}
20475 20636
20637// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
20638func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput {
20639 s.SSEKMSEncryptionContext = &v
20640 return s
20641}
20642
20476// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. 20643// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
20477func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { 20644func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput {
20478 s.SSEKMSKeyId = &v 20645 s.SSEKMSKeyId = &v
@@ -20626,12 +20793,12 @@ func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHo
20626type PutObjectLockConfigurationInput struct { 20793type PutObjectLockConfigurationInput struct {
20627 _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` 20794 _ struct{} `type:"structure" payload:"ObjectLockConfiguration"`
20628 20795
20629 // The bucket whose Object Lock configuration you want to create or replace. 20796 // The bucket whose object lock configuration you want to create or replace.
20630 // 20797 //
20631 // Bucket is a required field 20798 // Bucket is a required field
20632 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 20799 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
20633 20800
20634 // The Object Lock configuration that you want to apply to the specified bucket. 20801 // The object lock configuration that you want to apply to the specified bucket.
20635 ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` 20802 ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
20636 20803
20637 // Confirms that the requester knows that she or he will be charged for the 20804 // Confirms that the requester knows that she or he will be charged for the
@@ -20640,7 +20807,7 @@ type PutObjectLockConfigurationInput struct {
20640 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html 20807 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
20641 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` 20808 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
20642 20809
20643 // A token to allow Object Lock to be enabled for an existing bucket. 20810 // A token to allow Amazon S3 object lock to be enabled for an existing bucket.
20644 Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` 20811 Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"`
20645} 20812}
20646 20813
@@ -20749,6 +20916,11 @@ type PutObjectOutput struct {
20749 // verification of the customer-provided encryption key. 20916 // verification of the customer-provided encryption key.
20750 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` 20917 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
20751 20918
20919 // If present, specifies the AWS KMS Encryption Context to use for object encryption.
20920 // The value of this header is a base64-encoded UTF-8 string holding JSON with
20921 // the encryption context key-value pairs.
20922 SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
20923
20752 // If present, specifies the ID of the AWS Key Management Service (KMS) master 20924 // If present, specifies the ID of the AWS Key Management Service (KMS) master
20753 // encryption key that was used for the object. 20925 // encryption key that was used for the object.
20754 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` 20926 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
@@ -20801,6 +20973,12 @@ func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput {
20801 return s 20973 return s
20802} 20974}
20803 20975
20976// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
20977func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput {
20978 s.SSEKMSEncryptionContext = &v
20979 return s
20980}
20981
20804// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. 20982// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
20805func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { 20983func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput {
20806 s.SSEKMSKeyId = &v 20984 s.SSEKMSKeyId = &v
@@ -21139,17 +21317,16 @@ func (s PutPublicAccessBlockOutput) GoString() string {
21139 return s.String() 21317 return s.String()
21140} 21318}
21141 21319
21142// A container for specifying the configuration for publication of messages 21320// Specifies the configuration for publishing messages to an Amazon Simple Queue
21143// to an Amazon Simple Queue Service (Amazon SQS) queue.when Amazon S3 detects 21321// Service (Amazon SQS) queue when Amazon S3 detects specified events.
21144// specified events.
21145type QueueConfiguration struct { 21322type QueueConfiguration struct {
21146 _ struct{} `type:"structure"` 21323 _ struct{} `type:"structure"`
21147 21324
21148 // Events is a required field 21325 // Events is a required field
21149 Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` 21326 Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
21150 21327
21151 // A container for object key name filtering rules. For information about key 21328 // Specifies object key name filtering rules. For information about key name
21152 // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) 21329 // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
21153 // in the Amazon Simple Storage Service Developer Guide. 21330 // in the Amazon Simple Storage Service Developer Guide.
21154 Filter *NotificationConfigurationFilter `type:"structure"` 21331 Filter *NotificationConfigurationFilter `type:"structure"`
21155 21332
@@ -21158,7 +21335,7 @@ type QueueConfiguration struct {
21158 Id *string `type:"string"` 21335 Id *string `type:"string"`
21159 21336
21160 // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 21337 // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3
21161 // will publish a message when it detects events of the specified type. 21338 // publishes a message when it detects events of the specified type.
21162 // 21339 //
21163 // QueueArn is a required field 21340 // QueueArn is a required field
21164 QueueArn *string `locationName:"Queue" type:"string" required:"true"` 21341 QueueArn *string `locationName:"Queue" type:"string" required:"true"`
@@ -21304,6 +21481,8 @@ func (s *RecordsEvent) UnmarshalEvent(
21304 return nil 21481 return nil
21305} 21482}
21306 21483
21484// Specifies how requests are redirected. In the event of an error, you can
21485// specify a different error code to return.
21307type Redirect struct { 21486type Redirect struct {
21308 _ struct{} `type:"structure"` 21487 _ struct{} `type:"structure"`
21309 21488
@@ -21314,8 +21493,8 @@ type Redirect struct {
21314 // siblings is present. 21493 // siblings is present.
21315 HttpRedirectCode *string `type:"string"` 21494 HttpRedirectCode *string `type:"string"`
21316 21495
21317 // Protocol to use (http, https) when redirecting requests. The default is the 21496 // Protocol to use when redirecting requests. The default is the protocol that
21318 // protocol that is used in the original request. 21497 // is used in the original request.
21319 Protocol *string `type:"string" enum:"Protocol"` 21498 Protocol *string `type:"string" enum:"Protocol"`
21320 21499
21321 // The object key prefix to use in the redirect request. For example, to redirect 21500 // The object key prefix to use in the redirect request. For example, to redirect
@@ -21327,7 +21506,7 @@ type Redirect struct {
21327 ReplaceKeyPrefixWith *string `type:"string"` 21506 ReplaceKeyPrefixWith *string `type:"string"`
21328 21507
21329 // The specific object key to use in the redirect request. For example, redirect 21508 // The specific object key to use in the redirect request. For example, redirect
21330 // request to error.html. Not required if one of the sibling is present. Can 21509 // request to error.html. Not required if one of the siblings is present. Can
21331 // be present only if ReplaceKeyPrefixWith is not provided. 21510 // be present only if ReplaceKeyPrefixWith is not provided.
21332 ReplaceKeyWith *string `type:"string"` 21511 ReplaceKeyWith *string `type:"string"`
21333} 21512}
@@ -21372,16 +21551,18 @@ func (s *Redirect) SetReplaceKeyWith(v string) *Redirect {
21372 return s 21551 return s
21373} 21552}
21374 21553
21554// Specifies the redirect behavior of all requests to a website endpoint of
21555// an Amazon S3 bucket.
21375type RedirectAllRequestsTo struct { 21556type RedirectAllRequestsTo struct {
21376 _ struct{} `type:"structure"` 21557 _ struct{} `type:"structure"`
21377 21558
21378 // Name of the host where requests will be redirected. 21559 // Name of the host where requests are redirected.
21379 // 21560 //
21380 // HostName is a required field 21561 // HostName is a required field
21381 HostName *string `type:"string" required:"true"` 21562 HostName *string `type:"string" required:"true"`
21382 21563
21383 // Protocol to use (http, https) when redirecting requests. The default is the 21564 // Protocol to use when redirecting requests. The default is the protocol that
21384 // protocol that is used in the original request. 21565 // is used in the original request.
21385 Protocol *string `type:"string" enum:"Protocol"` 21566 Protocol *string `type:"string" enum:"Protocol"`
21386} 21567}
21387 21568
@@ -21426,7 +21607,9 @@ type ReplicationConfiguration struct {
21426 _ struct{} `type:"structure"` 21607 _ struct{} `type:"structure"`
21427 21608
21428 // The Amazon Resource Name (ARN) of the AWS Identity and Access Management 21609 // The Amazon Resource Name (ARN) of the AWS Identity and Access Management
21429 // (IAM) role that Amazon S3 can assume when replicating the objects. 21610 // (IAM) role that Amazon S3 assumes when replicating objects. For more information,
21611 // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html)
21612 // in the Amazon Simple Storage Service Developer Guide.
21430 // 21613 //
21431 // Role is a required field 21614 // Role is a required field
21432 Role *string `type:"string" required:"true"` 21615 Role *string `type:"string" required:"true"`
@@ -21486,7 +21669,7 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo
21486 return s 21669 return s
21487} 21670}
21488 21671
21489// A container for information about a specific replication rule. 21672// Specifies which Amazon S3 objects to replicate and where to store the replicas.
21490type ReplicationRule struct { 21673type ReplicationRule struct {
21491 _ struct{} `type:"structure"` 21674 _ struct{} `type:"structure"`
21492 21675
@@ -21506,7 +21689,8 @@ type ReplicationRule struct {
21506 ID *string `type:"string"` 21689 ID *string `type:"string"`
21507 21690
21508 // An object keyname prefix that identifies the object or objects to which the 21691 // An object keyname prefix that identifies the object or objects to which the
21509 // rule applies. The maximum prefix length is 1,024 characters. 21692 // rule applies. The maximum prefix length is 1,024 characters. To include all
21693 // objects in a bucket, specify an empty string.
21510 // 21694 //
21511 // Deprecated: Prefix has been deprecated 21695 // Deprecated: Prefix has been deprecated
21512 Prefix *string `deprecated:"true" type:"string"` 21696 Prefix *string `deprecated:"true" type:"string"`
@@ -21522,7 +21706,7 @@ type ReplicationRule struct {
21522 // * Same object qualify tag based filter criteria specified in multiple 21706 // * Same object qualify tag based filter criteria specified in multiple
21523 // rules 21707 // rules
21524 // 21708 //
21525 // For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) 21709 // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
21526 // in the Amazon S3 Developer Guide. 21710 // in the Amazon S3 Developer Guide.
21527 Priority *int64 `type:"integer"` 21711 Priority *int64 `type:"integer"`
21528 21712
@@ -21531,12 +21715,9 @@ type ReplicationRule struct {
21531 // replication of these objects. Currently, Amazon S3 supports only the filter 21715 // replication of these objects. Currently, Amazon S3 supports only the filter
21532 // that you can specify for objects created with server-side encryption using 21716 // that you can specify for objects created with server-side encryption using
21533 // an AWS KMS-Managed Key (SSE-KMS). 21717 // an AWS KMS-Managed Key (SSE-KMS).
21534 //
21535 // If you want Amazon S3 to replicate objects created with server-side encryption
21536 // using AWS KMS-Managed Keys.
21537 SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` 21718 SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"`
21538 21719
21539 // If status isn't enabled, the rule is ignored. 21720 // Specifies whether the rule is enabled.
21540 // 21721 //
21541 // Status is a required field 21722 // Status is a required field
21542 Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` 21723 Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"`
@@ -22051,6 +22232,7 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest {
22051 return s 22232 return s
22052} 22233}
22053 22234
22235// Specifies the redirect behavior and when a redirect is applied.
22054type RoutingRule struct { 22236type RoutingRule struct {
22055 _ struct{} `type:"structure"` 22237 _ struct{} `type:"structure"`
22056 22238
@@ -22103,16 +22285,22 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule {
22103 return s 22285 return s
22104} 22286}
22105 22287
22288// Specifies lifecycle rules for an Amazon S3 bucket. For more information,
22289// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html)
22290// in the Amazon Simple Storage Service API Reference.
22106type Rule struct { 22291type Rule struct {
22107 _ struct{} `type:"structure"` 22292 _ struct{} `type:"structure"`
22108 22293
22109 // Specifies the days since the initiation of an Incomplete Multipart Upload 22294 // Specifies the days since the initiation of an incomplete multipart upload
22110 // that Lifecycle will wait before permanently removing all parts of the upload. 22295 // that Amazon S3 will wait before permanently removing all parts of the upload.
22296 // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
22297 // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
22298 // in the Amazon Simple Storage Service Developer Guide.
22111 AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` 22299 AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
22112 22300
22113 Expiration *LifecycleExpiration `type:"structure"` 22301 Expiration *LifecycleExpiration `type:"structure"`
22114 22302
22115 // Unique identifier for the rule. The value cannot be longer than 255 characters. 22303 // Unique identifier for the rule. The value can't be longer than 255 characters.
22116 ID *string `type:"string"` 22304 ID *string `type:"string"`
22117 22305
22118 // Specifies when noncurrent object versions expire. Upon expiration, Amazon 22306 // Specifies when noncurrent object versions expire. Upon expiration, Amazon
@@ -22123,25 +22311,27 @@ type Rule struct {
22123 NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` 22311 NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
22124 22312
22125 // Container for the transition rule that describes when noncurrent objects 22313 // Container for the transition rule that describes when noncurrent objects
22126 // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER or 22314 // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER,
22127 // DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning 22315 // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning
22128 // is suspended), you can set this action to request that Amazon S3 transition 22316 // is suspended), you can set this action to request that Amazon S3 transition
22129 // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, 22317 // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,
22130 // GLACIER or DEEP_ARCHIVE storage class at a specific period in the object's 22318 // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's
22131 // lifetime. 22319 // lifetime.
22132 NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` 22320 NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"`
22133 22321
22134 // Prefix identifying one or more objects to which the rule applies. 22322 // Object key prefix that identifies one or more objects to which this rule
22323 // applies.
22135 // 22324 //
22136 // Prefix is a required field 22325 // Prefix is a required field
22137 Prefix *string `type:"string" required:"true"` 22326 Prefix *string `type:"string" required:"true"`
22138 22327
22139 // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule 22328 // If Enabled, the rule is currently being applied. If Disabled, the rule is
22140 // is not currently being applied. 22329 // not currently being applied.
22141 // 22330 //
22142 // Status is a required field 22331 // Status is a required field
22143 Status *string `type:"string" required:"true" enum:"ExpirationStatus"` 22332 Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
22144 22333
22334 // Specifies when an object transitions to a specified storage class.
22145 Transition *Transition `type:"structure"` 22335 Transition *Transition `type:"structure"`
22146} 22336}
22147 22337
@@ -22537,15 +22727,15 @@ type SelectObjectContentInput struct {
22537 // Specifies if periodic request progress information should be enabled. 22727 // Specifies if periodic request progress information should be enabled.
22538 RequestProgress *RequestProgress `type:"structure"` 22728 RequestProgress *RequestProgress `type:"structure"`
22539 22729
22540 // The SSE Algorithm used to encrypt the object. For more information, see 22730 // The SSE Algorithm used to encrypt the object. For more information, see Server-Side
22541 // Server-Side Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). 22731 // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
22542 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` 22732 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
22543 22733
22544 // The SSE Customer Key. For more information, see Server-Side Encryption (Using 22734 // The SSE Customer Key. For more information, see Server-Side Encryption (Using
22545 // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). 22735 // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
22546 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` 22736 SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
22547 22737
22548 // The SSE Customer Key MD5. For more information, see Server-Side Encryption 22738 // The SSE Customer Key MD5. For more information, see Server-Side Encryption
22549 // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). 22739 // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
22550 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` 22740 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
22551} 22741}
@@ -22792,13 +22982,15 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec
22792} 22982}
22793 22983
22794// Describes the default server-side encryption to apply to new objects in the 22984// Describes the default server-side encryption to apply to new objects in the
22795// bucket. If Put Object request does not specify any server-side encryption, 22985// bucket. If a PUT Object request doesn't specify any server-side encryption,
22796// this default encryption will be applied. 22986// this default encryption will be applied. For more information, see PUT Bucket
22987// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html)
22988// in the Amazon Simple Storage Service API Reference.
22797type ServerSideEncryptionByDefault struct { 22989type ServerSideEncryptionByDefault struct {
22798 _ struct{} `type:"structure"` 22990 _ struct{} `type:"structure"`
22799 22991
22800 // KMS master key ID to use for the default encryption. This parameter is allowed 22992 // KMS master key ID to use for the default encryption. This parameter is allowed
22801 // if SSEAlgorithm is aws:kms. 22993 // if and only if SSEAlgorithm is set to aws:kms.
22802 KMSMasterKeyID *string `type:"string" sensitive:"true"` 22994 KMSMasterKeyID *string `type:"string" sensitive:"true"`
22803 22995
22804 // Server-side encryption algorithm to use for the default encryption. 22996 // Server-side encryption algorithm to use for the default encryption.
@@ -22842,8 +23034,7 @@ func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEnc
22842 return s 23034 return s
22843} 23035}
22844 23036
22845// Container for server-side encryption configuration rules. Currently S3 supports 23037// Specifies the default server-side-encryption configuration.
22846// one rule only.
22847type ServerSideEncryptionConfiguration struct { 23038type ServerSideEncryptionConfiguration struct {
22848 _ struct{} `type:"structure"` 23039 _ struct{} `type:"structure"`
22849 23040
@@ -22893,13 +23084,12 @@ func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRu
22893 return s 23084 return s
22894} 23085}
22895 23086
22896// Container for information about a particular server-side encryption configuration 23087// Specifies the default server-side encryption configuration.
22897// rule.
22898type ServerSideEncryptionRule struct { 23088type ServerSideEncryptionRule struct {
22899 _ struct{} `type:"structure"` 23089 _ struct{} `type:"structure"`
22900 23090
22901 // Describes the default server-side encryption to apply to new objects in the 23091 // Specifies the default server-side encryption to apply to new objects in the
22902 // bucket. If Put Object request does not specify any server-side encryption, 23092 // bucket. If a PUT Object request doesn't specify any server-side encryption,
22903 // this default encryption will be applied. 23093 // this default encryption will be applied.
22904 ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` 23094 ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"`
22905} 23095}
@@ -22935,13 +23125,17 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv
22935 return s 23125 return s
22936} 23126}
22937 23127
22938// A container for filters that define which source objects should be replicated. 23128// A container that describes additional filters for identifying the source
23129// objects that you want to replicate. You can choose to enable or disable the
23130// replication of these objects. Currently, Amazon S3 supports only the filter
23131// that you can specify for objects created with server-side encryption using
23132// an AWS KMS-Managed Key (SSE-KMS).
22939type SourceSelectionCriteria struct { 23133type SourceSelectionCriteria struct {
22940 _ struct{} `type:"structure"` 23134 _ struct{} `type:"structure"`
22941 23135
22942 // A container for filter information for the selection of S3 objects encrypted 23136 // A container for filter information for the selection of Amazon S3 objects
22943 // with AWS KMS. If you include SourceSelectionCriteria in the replication configuration, 23137 // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication
22944 // this element is required. 23138 // configuration, this element is required.
22945 SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` 23139 SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"`
22946} 23140}
22947 23141
@@ -22981,8 +23175,8 @@ func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedOb
22981type SseKmsEncryptedObjects struct { 23175type SseKmsEncryptedObjects struct {
22982 _ struct{} `type:"structure"` 23176 _ struct{} `type:"structure"`
22983 23177
22984 // If the status is not Enabled, replication for S3 objects encrypted with AWS 23178 // Specifies whether Amazon S3 replicates objects created with server-side encryption
22985 // KMS is disabled. 23179 // using an AWS KMS-managed key.
22986 // 23180 //
22987 // Status is a required field 23181 // Status is a required field
22988 Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` 23182 Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"`
@@ -23098,11 +23292,14 @@ func (s *StatsEvent) UnmarshalEvent(
23098 return nil 23292 return nil
23099} 23293}
23100 23294
23295// Specifies data related to access patterns to be collected and made available
23296// to analyze the tradeoffs between different storage classes for an Amazon
23297// S3 bucket.
23101type StorageClassAnalysis struct { 23298type StorageClassAnalysis struct {
23102 _ struct{} `type:"structure"` 23299 _ struct{} `type:"structure"`
23103 23300
23104 // A container used to describe how data related to the storage class analysis 23301 // Specifies how data related to the storage class analysis for an Amazon S3
23105 // should be exported. 23302 // bucket should be exported.
23106 DataExport *StorageClassAnalysisDataExport `type:"structure"` 23303 DataExport *StorageClassAnalysisDataExport `type:"structure"`
23107} 23304}
23108 23305
@@ -23342,16 +23539,20 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant {
23342} 23539}
23343 23540
23344// A container for specifying the configuration for publication of messages 23541// A container for specifying the configuration for publication of messages
23345// to an Amazon Simple Notification Service (Amazon SNS) topic.when Amazon S3 23542// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3
23346// detects specified events. 23543// detects specified events.
23347type TopicConfiguration struct { 23544type TopicConfiguration struct {
23348 _ struct{} `type:"structure"` 23545 _ struct{} `type:"structure"`
23349 23546
23547 // The Amazon S3 bucket event about which to send notifications. For more information,
23548 // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
23549 // in the Amazon Simple Storage Service Developer Guide.
23550 //
23350 // Events is a required field 23551 // Events is a required field
23351 Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` 23552 Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
23352 23553
23353 // A container for object key name filtering rules. For information about key 23554 // Specifies object key name filtering rules. For information about key name
23354 // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) 23555 // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
23355 // in the Amazon Simple Storage Service Developer Guide. 23556 // in the Amazon Simple Storage Service Developer Guide.
23356 Filter *NotificationConfigurationFilter `type:"structure"` 23557 Filter *NotificationConfigurationFilter `type:"structure"`
23357 23558
@@ -23360,7 +23561,7 @@ type TopicConfiguration struct {
23360 Id *string `type:"string"` 23561 Id *string `type:"string"`
23361 23562
23362 // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 23563 // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3
23363 // will publish a message when it detects events of the specified type. 23564 // publishes a message when it detects events of the specified type.
23364 // 23565 //
23365 // TopicArn is a required field 23566 // TopicArn is a required field
23366 TopicArn *string `locationName:"Topic" type:"string" required:"true"` 23567 TopicArn *string `locationName:"Topic" type:"string" required:"true"`
@@ -23469,18 +23670,19 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep
23469 return s 23670 return s
23470} 23671}
23471 23672
23673// Specifies when an object transitions to a specified storage class.
23472type Transition struct { 23674type Transition struct {
23473 _ struct{} `type:"structure"` 23675 _ struct{} `type:"structure"`
23474 23676
23475 // Indicates at what date the object is to be moved or deleted. Should be in 23677 // Indicates when objects are transitioned to the specified storage class. The
23476 // GMT ISO 8601 Format. 23678 // date value must be in ISO 8601 format. The time is always midnight UTC.
23477 Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` 23679 Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
23478 23680
23479 // Indicates the lifetime, in days, of the objects that are subject to the rule. 23681 // Indicates the number of days after creation when objects are transitioned
23480 // The value must be a non-zero positive integer. 23682 // to the specified storage class. The value must be a positive integer.
23481 Days *int64 `type:"integer"` 23683 Days *int64 `type:"integer"`
23482 23684
23483 // The class of storage used to store the object. 23685 // The storage class to which you want the object to transition.
23484 StorageClass *string `type:"string" enum:"TransitionStorageClass"` 23686 StorageClass *string `type:"string" enum:"TransitionStorageClass"`
23485} 23687}
23486 23688
@@ -23550,7 +23752,7 @@ type UploadPartCopyInput struct {
23550 // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt 23752 // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
23551 // the source object. The encryption key provided in this header must be one 23753 // the source object. The encryption key provided in this header must be one
23552 // that was used when the source object was created. 23754 // that was used when the source object was created.
23553 CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` 23755 CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
23554 23756
23555 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. 23757 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
23556 // Amazon S3 uses this header for a message integrity check to ensure the encryption 23758 // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -23581,7 +23783,7 @@ type UploadPartCopyInput struct {
23581 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm 23783 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
23582 // header. This must be the same encryption key specified in the initiate multipart 23784 // header. This must be the same encryption key specified in the initiate multipart
23583 // upload request. 23785 // upload request.
23584 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` 23786 SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
23585 23787
23586 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. 23788 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
23587 // Amazon S3 uses this header for a message integrity check to ensure the encryption 23789 // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -23857,7 +24059,9 @@ type UploadPartInput struct {
23857 // body cannot be determined automatically. 24059 // body cannot be determined automatically.
23858 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` 24060 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
23859 24061
23860 // The base64-encoded 128-bit MD5 digest of the part data. 24062 // The base64-encoded 128-bit MD5 digest of the part data. This parameter is
24063 // auto-populated when using the command from the CLI. This parameted is required
24064 // if object lock parameters are specified.
23861 ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` 24065 ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
23862 24066
23863 // Object key for which the multipart upload was initiated. 24067 // Object key for which the multipart upload was initiated.
@@ -23886,7 +24090,7 @@ type UploadPartInput struct {
23886 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm 24090 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
23887 // header. This must be the same encryption key specified in the initiate multipart 24091 // header. This must be the same encryption key specified in the initiate multipart
23888 // upload request. 24092 // upload request.
23889 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` 24093 SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
23890 24094
23891 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. 24095 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
23892 // Amazon S3 uses this header for a message integrity check to ensure the encryption 24096 // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -24092,6 +24296,9 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput {
24092 return s 24296 return s
24093} 24297}
24094 24298
24299// Describes the versioning state of an Amazon S3 bucket. For more information,
24300// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html)
24301// in the Amazon Simple Storage Service API Reference.
24095type VersioningConfiguration struct { 24302type VersioningConfiguration struct {
24096 _ struct{} `type:"structure"` 24303 _ struct{} `type:"structure"`
24097 24304
@@ -24126,15 +24333,22 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration {
24126 return s 24333 return s
24127} 24334}
24128 24335
24336// Specifies website configuration parameters for an Amazon S3 bucket.
24129type WebsiteConfiguration struct { 24337type WebsiteConfiguration struct {
24130 _ struct{} `type:"structure"` 24338 _ struct{} `type:"structure"`
24131 24339
24340 // The name of the error document for the website.
24132 ErrorDocument *ErrorDocument `type:"structure"` 24341 ErrorDocument *ErrorDocument `type:"structure"`
24133 24342
24343 // The name of the index document for the website.
24134 IndexDocument *IndexDocument `type:"structure"` 24344 IndexDocument *IndexDocument `type:"structure"`
24135 24345
24346 // The redirect behavior for every request to this bucket's website endpoint.
24347 //
24348 // If you specify this property, you can't specify any other property.
24136 RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` 24349 RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
24137 24350
24351 // Rules that define when a redirect is applied and the redirect behavior.
24138 RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` 24352 RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
24139} 24353}
24140 24354
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
index bc68a46..9ba8a78 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
@@ -80,7 +80,8 @@ func buildGetBucketLocation(r *request.Request) {
80 out := r.Data.(*GetBucketLocationOutput) 80 out := r.Data.(*GetBucketLocationOutput)
81 b, err := ioutil.ReadAll(r.HTTPResponse.Body) 81 b, err := ioutil.ReadAll(r.HTTPResponse.Body)
82 if err != nil { 82 if err != nil {
83 r.Error = awserr.New("SerializationError", "failed reading response body", err) 83 r.Error = awserr.New(request.ErrCodeSerialization,
84 "failed reading response body", err)
84 return 85 return
85 } 86 }
86 87
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
index 95f2456..23d386b 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -17,7 +17,8 @@ func defaultInitClientFn(c *client.Client) {
17 17
18 // Require SSL when using SSE keys 18 // Require SSL when using SSE keys
19 c.Handlers.Validate.PushBack(validateSSERequiresSSL) 19 c.Handlers.Validate.PushBack(validateSSERequiresSSL)
20 c.Handlers.Build.PushBack(computeSSEKeys) 20 c.Handlers.Build.PushBack(computeSSEKeyMD5)
21 c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5)
21 22
22 // S3 uses custom error unmarshaling logic 23 // S3 uses custom error unmarshaling logic
23 c.Handlers.UnmarshalError.Clear() 24 c.Handlers.UnmarshalError.Clear()
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
index 8010c4f..b71c835 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
@@ -3,6 +3,7 @@ package s3
3import ( 3import (
4 "crypto/md5" 4 "crypto/md5"
5 "encoding/base64" 5 "encoding/base64"
6 "net/http"
6 7
7 "github.com/aws/aws-sdk-go/aws/awserr" 8 "github.com/aws/aws-sdk-go/aws/awserr"
8 "github.com/aws/aws-sdk-go/aws/request" 9 "github.com/aws/aws-sdk-go/aws/request"
@@ -30,25 +31,54 @@ func validateSSERequiresSSL(r *request.Request) {
30 } 31 }
31} 32}
32 33
33func computeSSEKeys(r *request.Request) { 34const (
34 headers := []string{ 35 sseKeyHeader = "x-amz-server-side-encryption-customer-key"
35 "x-amz-server-side-encryption-customer-key", 36 sseKeyMD5Header = sseKeyHeader + "-md5"
36 "x-amz-copy-source-server-side-encryption-customer-key", 37)
38
39func computeSSEKeyMD5(r *request.Request) {
40 var key string
41 if g, ok := r.Params.(sseCustomerKeyGetter); ok {
42 key = g.getSSECustomerKey()
43 }
44
45 computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest)
46}
47
48const (
49 copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key"
50 copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5"
51)
52
53func computeCopySourceSSEKeyMD5(r *request.Request) {
54 var key string
55 if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
56 key = g.getCopySourceSSECustomerKey()
37 } 57 }
38 58
39 for _, h := range headers { 59 computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest)
40 md5h := h + "-md5" 60}
41 if key := r.HTTPRequest.Header.Get(h); key != "" { 61
42 // Base64-encode the value 62func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) {
43 b64v := base64.StdEncoding.EncodeToString([]byte(key)) 63 if len(key) == 0 {
44 r.HTTPRequest.Header.Set(h, b64v) 64 // Backwards compatiablity where user just set the header value instead
45 65 // of using the API parameter, or setting the header value for an
46 // Add MD5 if it wasn't computed 66 // operation without the parameters modeled.
47 if r.HTTPRequest.Header.Get(md5h) == "" { 67 key = r.Header.Get(keyHeader)
48 sum := md5.Sum([]byte(key)) 68 if len(key) == 0 {
49 b64sum := base64.StdEncoding.EncodeToString(sum[:]) 69 return
50 r.HTTPRequest.Header.Set(md5h, b64sum)
51 }
52 } 70 }
71
72 // In backwards compatiable, the header's value is not base64 encoded,
73 // and needs to be encoded and updated by the SDK's customizations.
74 b64Key := base64.StdEncoding.EncodeToString([]byte(key))
75 r.Header.Set(keyHeader, b64Key)
76 }
77
78 // Only update Key's MD5 if not already set.
79 if len(r.Header.Get(keyMD5Header)) == 0 {
80 sum := md5.Sum([]byte(key))
81 keyMD5 := base64.StdEncoding.EncodeToString(sum[:])
82 r.Header.Set(keyMD5Header, keyMD5)
53 } 83 }
54} 84}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
index fde3050..f6a69ae 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
@@ -14,7 +14,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
14 b, err := ioutil.ReadAll(r.HTTPResponse.Body) 14 b, err := ioutil.ReadAll(r.HTTPResponse.Body)
15 if err != nil { 15 if err != nil {
16 r.Error = awserr.NewRequestFailure( 16 r.Error = awserr.NewRequestFailure(
17 awserr.New("SerializationError", "unable to read response body", err), 17 awserr.New(request.ErrCodeSerialization, "unable to read response body", err),
18 r.HTTPResponse.StatusCode, 18 r.HTTPResponse.StatusCode,
19 r.RequestID, 19 r.RequestID,
20 ) 20 )
@@ -31,7 +31,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
31 31
32 unmarshalError(r) 32 unmarshalError(r)
33 if err, ok := r.Error.(awserr.Error); ok && err != nil { 33 if err, ok := r.Error.(awserr.Error); ok && err != nil {
34 if err.Code() == "SerializationError" { 34 if err.Code() == request.ErrCodeSerialization {
35 r.Error = nil 35 r.Error = nil
36 return 36 return
37 } 37 }
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
index 1db7e13..5b63fac 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
@@ -11,6 +11,7 @@ import (
11 "github.com/aws/aws-sdk-go/aws" 11 "github.com/aws/aws-sdk-go/aws"
12 "github.com/aws/aws-sdk-go/aws/awserr" 12 "github.com/aws/aws-sdk-go/aws/awserr"
13 "github.com/aws/aws-sdk-go/aws/request" 13 "github.com/aws/aws-sdk-go/aws/request"
14 "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
14) 15)
15 16
16type xmlErrorResponse struct { 17type xmlErrorResponse struct {
@@ -42,29 +43,34 @@ func unmarshalError(r *request.Request) {
42 return 43 return
43 } 44 }
44 45
45 var errCode, errMsg string
46
47 // Attempt to parse error from body if it is known 46 // Attempt to parse error from body if it is known
48 resp := &xmlErrorResponse{} 47 var errResp xmlErrorResponse
49 err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) 48 err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
50 if err != nil && err != io.EOF { 49 if err == io.EOF {
51 errCode = "SerializationError" 50 // Only capture the error if an unmarshal error occurs that is not EOF,
52 errMsg = "failed to decode S3 XML error response" 51 // because S3 might send an error without a error message which causes
53 } else { 52 // the XML unmarshal to fail with EOF.
54 errCode = resp.Code
55 errMsg = resp.Message
56 err = nil 53 err = nil
57 } 54 }
55 if err != nil {
56 r.Error = awserr.NewRequestFailure(
57 awserr.New(request.ErrCodeSerialization,
58 "failed to unmarshal error message", err),
59 r.HTTPResponse.StatusCode,
60 r.RequestID,
61 )
62 return
63 }
58 64
59 // Fallback to status code converted to message if still no error code 65 // Fallback to status code converted to message if still no error code
60 if len(errCode) == 0 { 66 if len(errResp.Code) == 0 {
61 statusText := http.StatusText(r.HTTPResponse.StatusCode) 67 statusText := http.StatusText(r.HTTPResponse.StatusCode)
62 errCode = strings.Replace(statusText, " ", "", -1) 68 errResp.Code = strings.Replace(statusText, " ", "", -1)
63 errMsg = statusText 69 errResp.Message = statusText
64 } 70 }
65 71
66 r.Error = awserr.NewRequestFailure( 72 r.Error = awserr.NewRequestFailure(
67 awserr.New(errCode, errMsg, err), 73 awserr.New(errResp.Code, errResp.Message, err),
68 r.HTTPResponse.StatusCode, 74 r.HTTPResponse.StatusCode,
69 r.RequestID, 75 r.RequestID,
70 ) 76 )
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index 8113089..d22c38b 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -3,6 +3,7 @@
3package sts 3package sts
4 4
5import ( 5import (
6 "fmt"
6 "time" 7 "time"
7 8
8 "github.com/aws/aws-sdk-go/aws" 9 "github.com/aws/aws-sdk-go/aws"
@@ -55,38 +56,26 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
55 56
56// AssumeRole API operation for AWS Security Token Service. 57// AssumeRole API operation for AWS Security Token Service.
57// 58//
58// Returns a set of temporary security credentials (consisting of an access 59// Returns a set of temporary security credentials that you can use to access
59// key ID, a secret access key, and a security token) that you can use to access 60// AWS resources that you might not normally have access to. These temporary
60// AWS resources that you might not normally have access to. Typically, you 61// credentials consist of an access key ID, a secret access key, and a security
61// use AssumeRole for cross-account access or federation. For a comparison of 62// token. Typically, you use AssumeRole within your account or for cross-account
62// AssumeRole with the other APIs that produce temporary credentials, see Requesting 63// access. For a comparison of AssumeRole with other API operations that produce
63// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) 64// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
64// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) 65// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
65// in the IAM User Guide. 66// in the IAM User Guide.
66// 67//
67// Important: You cannot call AssumeRole by using AWS root account credentials; 68// You cannot use AWS account root user credentials to call AssumeRole. You
68// access is denied. You must use credentials for an IAM user or an IAM role 69// must use credentials for an IAM user or an IAM role to call AssumeRole.
69// to call AssumeRole.
70// 70//
71// For cross-account access, imagine that you own multiple accounts and need 71// For cross-account access, imagine that you own multiple accounts and need
72// to access resources in each account. You could create long-term credentials 72// to access resources in each account. You could create long-term credentials
73// in each account to access those resources. However, managing all those credentials 73// in each account to access those resources. However, managing all those credentials
74// and remembering which one can access which account can be time consuming. 74// and remembering which one can access which account can be time consuming.
75// Instead, you can create one set of long-term credentials in one account and 75// Instead, you can create one set of long-term credentials in one account.
76// then use temporary security credentials to access all the other accounts 76// Then use temporary security credentials to access all the other accounts
77// by assuming roles in those accounts. For more information about roles, see 77// by assuming roles in those accounts. For more information about roles, see
78// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) 78// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)
79// in the IAM User Guide.
80//
81// For federation, you can, for example, grant single sign-on access to the
82// AWS Management Console. If you already have an identity and authentication
83// system in your corporate network, you don't have to recreate user identities
84// in AWS in order to grant those user identities access to AWS. Instead, after
85// a user has been authenticated, you call AssumeRole (and specify the role
86// with the appropriate permissions) to get temporary security credentials for
87// that user. With those temporary security credentials, you construct a sign-in
88// URL that users can use to access the console. For more information, see Common
89// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
90// in the IAM User Guide. 79// in the IAM User Guide.
91// 80//
92// By default, the temporary security credentials created by AssumeRole last 81// By default, the temporary security credentials created by AssumeRole last
@@ -95,69 +84,73 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
95// seconds (15 minutes) up to the maximum session duration setting for the role. 84// seconds (15 minutes) up to the maximum session duration setting for the role.
96// This setting can have a value from 1 hour to 12 hours. To learn how to view 85// This setting can have a value from 1 hour to 12 hours. To learn how to view
97// the maximum value for your role, see View the Maximum Session Duration Setting 86// the maximum value for your role, see View the Maximum Session Duration Setting
98// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) 87// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
99// in the IAM User Guide. The maximum session duration limit applies when you 88// in the IAM User Guide. The maximum session duration limit applies when you
100// use the AssumeRole* API operations or the assume-role* CLI operations but 89// use the AssumeRole* API operations or the assume-role* CLI commands. However
101// does not apply when you use those operations to create a console URL. For 90// the limit does not apply when you use those operations to create a console
102// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) 91// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
103// in the IAM User Guide. 92// in the IAM User Guide.
104// 93//
105// The temporary security credentials created by AssumeRole can be used to make 94// The temporary security credentials created by AssumeRole can be used to make
106// API calls to any AWS service with the following exception: you cannot call 95// API calls to any AWS service with the following exception: You cannot call
107// the STS service's GetFederationToken or GetSessionToken APIs. 96// the AWS STS GetFederationToken or GetSessionToken API operations.
108// 97//
109// Optionally, you can pass an IAM access policy to this operation. If you choose 98// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
110// not to pass a policy, the temporary security credentials that are returned 99// to this operation. You can pass a single JSON policy document to use as an
111// by the operation have the permissions that are defined in the access policy 100// inline session policy. You can also specify up to 10 managed policies to
112// of the role that is being assumed. If you pass a policy to this operation, 101// use as managed session policies. The plain text that you use for both inline
113// the temporary security credentials that are returned by the operation have 102// and managed session policies shouldn't exceed 2048 characters. Passing policies
114// the permissions that are allowed by both the access policy of the role that 103// to this operation returns new temporary credentials. The resulting session's
115// is being assumed, and the policy that you pass. This gives you a way to further 104// permissions are the intersection of the role's identity-based policy and
116// restrict the permissions for the resulting temporary security credentials. 105// the session policies. You can use the role's temporary credentials in subsequent
117// You cannot use the passed policy to grant permissions that are in excess 106// AWS API calls to access resources in the account that owns the role. You
118// of those allowed by the access policy of the role that is being assumed. 107// cannot use session policies to grant more permissions than those allowed
119// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, 108// by the identity-based policy of the role that is being assumed. For more
120// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) 109// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
121// in the IAM User Guide. 110// in the IAM User Guide.
122// 111//
123// To assume a role, your AWS account must be trusted by the role. The trust 112// To assume a role from a different account, your AWS account must be trusted
124// relationship is defined in the role's trust policy when the role is created. 113// by the role. The trust relationship is defined in the role's trust policy
125// That trust policy states which accounts are allowed to delegate access to 114// when the role is created. That trust policy states which accounts are allowed
126// this account's role. 115// to delegate that access to users in the account.
127// 116//
128// The user who wants to access the role must also have permissions delegated 117// A user who wants to access a role in a different account must also have permissions
129// from the role's administrator. If the user is in a different account than 118// that are delegated from the user account administrator. The administrator
130// the role, then the user's administrator must attach a policy that allows 119// must attach a policy that allows the user to call AssumeRole for the ARN
131// the user to call AssumeRole on the ARN of the role in the other account. 120// of the role in the other account. If the user is in the same account as the
132// If the user is in the same account as the role, then you can either attach 121// role, then you can do either of the following:
133// a policy to the user (identical to the previous different account user), 122//
134// or you can add the user as a principal directly in the role's trust policy. 123// * Attach a policy to the user (identical to the previous user in a different
135// In this case, the trust policy acts as the only resource-based policy in 124// account).
136// IAM, and users in the same account as the role do not need explicit permission 125//
137// to assume the role. For more information about trust policies and resource-based 126// * Add the user as a principal directly in the role's trust policy.
138// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) 127//
128// In this case, the trust policy acts as an IAM resource-based policy. Users
129// in the same account as the role do not need explicit permission to assume
130// the role. For more information about trust policies and resource-based policies,
131// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
139// in the IAM User Guide. 132// in the IAM User Guide.
140// 133//
141// Using MFA with AssumeRole 134// Using MFA with AssumeRole
142// 135//
143// You can optionally include multi-factor authentication (MFA) information 136// (Optional) You can include multi-factor authentication (MFA) information
144// when you call AssumeRole. This is useful for cross-account scenarios in which 137// when you call AssumeRole. This is useful for cross-account scenarios to ensure
145// you want to make sure that the user who is assuming the role has been authenticated 138// that the user that assumes the role has been authenticated with an AWS MFA
146// using an AWS MFA device. In that scenario, the trust policy of the role being 139// device. In that scenario, the trust policy of the role being assumed includes
147// assumed includes a condition that tests for MFA authentication; if the caller 140// a condition that tests for MFA authentication. If the caller does not include
148// does not include valid MFA information, the request to assume the role is 141// valid MFA information, the request to assume the role is denied. The condition
149// denied. The condition in a trust policy that tests for MFA authentication 142// in a trust policy that tests for MFA authentication might look like the following
150// might look like the following example. 143// example.
151// 144//
152// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} 145// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
153// 146//
154// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) 147// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
155// in the IAM User Guide guide. 148// in the IAM User Guide guide.
156// 149//
157// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode 150// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
158// parameters. The SerialNumber value identifies the user's hardware or virtual 151// parameters. The SerialNumber value identifies the user's hardware or virtual
159// MFA device. The TokenCode is the time-based one-time password (TOTP) that 152// MFA device. The TokenCode is the time-based one-time password (TOTP) that
160// the MFA devices produces. 153// the MFA device produces.
161// 154//
162// Returns awserr.Error for service API and SDK errors. Use runtime type assertions 155// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
163// with awserr.Error's Code and Message methods to get detailed information about 156// with awserr.Error's Code and Message methods to get detailed information about
@@ -180,7 +173,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
180// STS is not activated in the requested region for the account that is being 173// STS is not activated in the requested region for the account that is being
181// asked to generate credentials. The account administrator must use the IAM 174// asked to generate credentials. The account administrator must use the IAM
182// console to activate STS in that region. For more information, see Activating 175// console to activate STS in that region. For more information, see Activating
183// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 176// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
184// in the IAM User Guide. 177// in the IAM User Guide.
185// 178//
186// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole 179// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
@@ -254,9 +247,9 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
254// via a SAML authentication response. This operation provides a mechanism for 247// via a SAML authentication response. This operation provides a mechanism for
255// tying an enterprise identity store or directory to role-based AWS access 248// tying an enterprise identity store or directory to role-based AWS access
256// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML 249// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
257// with the other APIs that produce temporary credentials, see Requesting Temporary 250// with the other API operations that produce temporary credentials, see Requesting
258// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) 251// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
259// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) 252// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
260// in the IAM User Guide. 253// in the IAM User Guide.
261// 254//
262// The temporary security credentials returned by this operation consist of 255// The temporary security credentials returned by this operation consist of
@@ -271,37 +264,36 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
271// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session 264// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
272// duration setting for the role. This setting can have a value from 1 hour 265// duration setting for the role. This setting can have a value from 1 hour
273// to 12 hours. To learn how to view the maximum value for your role, see View 266// to 12 hours. To learn how to view the maximum value for your role, see View
274// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) 267// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
275// in the IAM User Guide. The maximum session duration limit applies when you 268// in the IAM User Guide. The maximum session duration limit applies when you
276// use the AssumeRole* API operations or the assume-role* CLI operations but 269// use the AssumeRole* API operations or the assume-role* CLI commands. However
277// does not apply when you use those operations to create a console URL. For 270// the limit does not apply when you use those operations to create a console
278// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) 271// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
279// in the IAM User Guide. 272// in the IAM User Guide.
280// 273//
281// The temporary security credentials created by AssumeRoleWithSAML can be used 274// The temporary security credentials created by AssumeRoleWithSAML can be used
282// to make API calls to any AWS service with the following exception: you cannot 275// to make API calls to any AWS service with the following exception: you cannot
283// call the STS service's GetFederationToken or GetSessionToken APIs. 276// call the STS GetFederationToken or GetSessionToken API operations.
284// 277//
285// Optionally, you can pass an IAM access policy to this operation. If you choose 278// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
286// not to pass a policy, the temporary security credentials that are returned 279// to this operation. You can pass a single JSON policy document to use as an
287// by the operation have the permissions that are defined in the access policy 280// inline session policy. You can also specify up to 10 managed policies to
288// of the role that is being assumed. If you pass a policy to this operation, 281// use as managed session policies. The plain text that you use for both inline
289// the temporary security credentials that are returned by the operation have 282// and managed session policies shouldn't exceed 2048 characters. Passing policies
290// the permissions that are allowed by the intersection of both the access policy 283// to this operation returns new temporary credentials. The resulting session's
291// of the role that is being assumed, and the policy that you pass. This means 284// permissions are the intersection of the role's identity-based policy and
292// that both policies must grant the permission for the action to be allowed. 285// the session policies. You can use the role's temporary credentials in subsequent
293// This gives you a way to further restrict the permissions for the resulting 286// AWS API calls to access resources in the account that owns the role. You
294// temporary security credentials. You cannot use the passed policy to grant 287// cannot use session policies to grant more permissions than those allowed
295// permissions that are in excess of those allowed by the access policy of the 288// by the identity-based policy of the role that is being assumed. For more
296// role that is being assumed. For more information, see Permissions for AssumeRole, 289// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
297// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
298// in the IAM User Guide. 290// in the IAM User Guide.
299// 291//
300// Before your application can call AssumeRoleWithSAML, you must configure your 292// Before your application can call AssumeRoleWithSAML, you must configure your
301// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, 293// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
302// you must use AWS Identity and Access Management (IAM) to create a SAML provider 294// you must use AWS Identity and Access Management (IAM) to create a SAML provider
303// entity in your AWS account that represents your identity provider, and create 295// entity in your AWS account that represents your identity provider. You must
304// an IAM role that specifies this SAML provider in its trust policy. 296// also create an IAM role that specifies this SAML provider in its trust policy.
305// 297//
306// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. 298// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
307// The identity of the caller is validated by using keys in the metadata document 299// The identity of the caller is validated by using keys in the metadata document
@@ -315,16 +307,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
315// 307//
316// For more information, see the following resources: 308// For more information, see the following resources:
317// 309//
318// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) 310// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
319// in the IAM User Guide. 311// in the IAM User Guide.
320// 312//
321// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) 313// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
322// in the IAM User Guide. 314// in the IAM User Guide.
323// 315//
324// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) 316// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
325// in the IAM User Guide. 317// in the IAM User Guide.
326// 318//
327// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) 319// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
328// in the IAM User Guide. 320// in the IAM User Guide.
329// 321//
330// Returns awserr.Error for service API and SDK errors. Use runtime type assertions 322// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -363,7 +355,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
363// STS is not activated in the requested region for the account that is being 355// STS is not activated in the requested region for the account that is being
364// asked to generate credentials. The account administrator must use the IAM 356// asked to generate credentials. The account administrator must use the IAM
365// console to activate STS in that region. For more information, see Activating 357// console to activate STS in that region. For more information, see Activating
366// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 358// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
367// in the IAM User Guide. 359// in the IAM User Guide.
368// 360//
369// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML 361// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
@@ -434,35 +426,35 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
434// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. 426// AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
435// 427//
436// Returns a set of temporary security credentials for users who have been authenticated 428// Returns a set of temporary security credentials for users who have been authenticated
437// in a mobile or web application with a web identity provider, such as Amazon 429// in a mobile or web application with a web identity provider. Example providers
438// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible 430// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID
439// identity provider. 431// Connect-compatible identity provider.
440// 432//
441// For mobile applications, we recommend that you use Amazon Cognito. You can 433// For mobile applications, we recommend that you use Amazon Cognito. You can
442// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) 434// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
443// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely 435// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/)
444// identify a user and supply the user with a consistent identity throughout 436// to uniquely identify a user. You can also supply the user with a consistent
445// the lifetime of an application. 437// identity throughout the lifetime of an application.
446// 438//
447// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) 439// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
448// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview 440// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
449// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
450// in the AWS SDK for iOS Developer Guide. 441// in the AWS SDK for iOS Developer Guide.
451// 442//
452// Calling AssumeRoleWithWebIdentity does not require the use of AWS security 443// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
453// credentials. Therefore, you can distribute an application (for example, on 444// credentials. Therefore, you can distribute an application (for example, on
454// mobile devices) that requests temporary security credentials without including 445// mobile devices) that requests temporary security credentials without including
455// long-term AWS credentials in the application, and without deploying server-based 446// long-term AWS credentials in the application. You also don't need to deploy
456// proxy services that use long-term AWS credentials. Instead, the identity 447// server-based proxy services that use long-term AWS credentials. Instead,
457// of the caller is validated by using a token from the web identity provider. 448// the identity of the caller is validated by using a token from the web identity
458// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce 449// provider. For a comparison of AssumeRoleWithWebIdentity with the other API
459// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) 450// operations that produce temporary credentials, see Requesting Temporary Security
460// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) 451// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
452// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
461// in the IAM User Guide. 453// in the IAM User Guide.
462// 454//
463// The temporary security credentials returned by this API consist of an access 455// The temporary security credentials returned by this API consist of an access
464// key ID, a secret access key, and a security token. Applications can use these 456// key ID, a secret access key, and a security token. Applications can use these
465// temporary security credentials to sign calls to AWS service APIs. 457// temporary security credentials to sign calls to AWS service API operations.
466// 458//
467// By default, the temporary security credentials created by AssumeRoleWithWebIdentity 459// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
468// last for one hour. However, you can use the optional DurationSeconds parameter 460// last for one hour. However, you can use the optional DurationSeconds parameter
@@ -470,29 +462,29 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
470// seconds (15 minutes) up to the maximum session duration setting for the role. 462// seconds (15 minutes) up to the maximum session duration setting for the role.
471// This setting can have a value from 1 hour to 12 hours. To learn how to view 463// This setting can have a value from 1 hour to 12 hours. To learn how to view
472// the maximum value for your role, see View the Maximum Session Duration Setting 464// the maximum value for your role, see View the Maximum Session Duration Setting
473// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) 465// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
474// in the IAM User Guide. The maximum session duration limit applies when you 466// in the IAM User Guide. The maximum session duration limit applies when you
475// use the AssumeRole* API operations or the assume-role* CLI operations but 467// use the AssumeRole* API operations or the assume-role* CLI commands. However
476// does not apply when you use those operations to create a console URL. For 468// the limit does not apply when you use those operations to create a console
477// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) 469// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
478// in the IAM User Guide. 470// in the IAM User Guide.
479// 471//
480// The temporary security credentials created by AssumeRoleWithWebIdentity can 472// The temporary security credentials created by AssumeRoleWithWebIdentity can
481// be used to make API calls to any AWS service with the following exception: 473// be used to make API calls to any AWS service with the following exception:
482// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. 474// you cannot call the STS GetFederationToken or GetSessionToken API operations.
483// 475//
484// Optionally, you can pass an IAM access policy to this operation. If you choose 476// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
485// not to pass a policy, the temporary security credentials that are returned 477// to this operation. You can pass a single JSON policy document to use as an
486// by the operation have the permissions that are defined in the access policy 478// inline session policy. You can also specify up to 10 managed policies to
487// of the role that is being assumed. If you pass a policy to this operation, 479// use as managed session policies. The plain text that you use for both inline
488// the temporary security credentials that are returned by the operation have 480// and managed session policies shouldn't exceed 2048 characters. Passing policies
489// the permissions that are allowed by both the access policy of the role that 481// to this operation returns new temporary credentials. The resulting session's
490// is being assumed, and the policy that you pass. This gives you a way to further 482// permissions are the intersection of the role's identity-based policy and
491// restrict the permissions for the resulting temporary security credentials. 483// the session policies. You can use the role's temporary credentials in subsequent
492// You cannot use the passed policy to grant permissions that are in excess 484// AWS API calls to access resources in the account that owns the role. You
493// of those allowed by the access policy of the role that is being assumed. 485// cannot use session policies to grant more permissions than those allowed
494// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, 486// by the identity-based policy of the role that is being assumed. For more
495// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) 487// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
496// in the IAM User Guide. 488// in the IAM User Guide.
497// 489//
498// Before your application can call AssumeRoleWithWebIdentity, you must have 490// Before your application can call AssumeRoleWithWebIdentity, you must have
@@ -511,21 +503,19 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
511// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity 503// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
512// API, see the following resources: 504// API, see the following resources:
513// 505//
514// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) 506// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
515// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). 507// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
516// 508//
509// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
510// Walk through the process of authenticating through Login with Amazon,
511// Facebook, or Google, getting temporary security credentials, and then
512// using those credentials to make a request to AWS.
517// 513//
518// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). 514// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and
519// This interactive website lets you walk through the process of authenticating 515// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
520// via Login with Amazon, Facebook, or Google, getting temporary security 516// These toolkits contain sample apps that show how to invoke the identity
521// credentials, and then using those credentials to make a request to AWS. 517// providers, and then how to use the information from these providers to
522// 518// get and use temporary security credentials.
523//
524// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
525// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample
526// apps that show how to invoke the identity providers, and then how to use
527// the information from these providers to get and use temporary security
528// credentials.
529// 519//
530// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). 520// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
531// This article discusses web identity federation and shows an example of 521// This article discusses web identity federation and shows an example of
@@ -575,7 +565,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
575// STS is not activated in the requested region for the account that is being 565// STS is not activated in the requested region for the account that is being
576// asked to generate credentials. The account administrator must use the IAM 566// asked to generate credentials. The account administrator must use the IAM
577// console to activate STS in that region. For more information, see Activating 567// console to activate STS in that region. For more information, see Activating
578// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 568// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
579// in the IAM User Guide. 569// in the IAM User Guide.
580// 570//
581// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity 571// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
@@ -647,17 +637,17 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
647// Decodes additional information about the authorization status of a request 637// Decodes additional information about the authorization status of a request
648// from an encoded message returned in response to an AWS request. 638// from an encoded message returned in response to an AWS request.
649// 639//
650// For example, if a user is not authorized to perform an action that he or 640// For example, if a user is not authorized to perform an operation that he
651// she has requested, the request returns a Client.UnauthorizedOperation response 641// or she has requested, the request returns a Client.UnauthorizedOperation
652// (an HTTP 403 response). Some AWS actions additionally return an encoded message 642// response (an HTTP 403 response). Some AWS operations additionally return
653// that can provide details about this authorization failure. 643// an encoded message that can provide details about this authorization failure.
654// 644//
655// Only certain AWS actions return an encoded authorization message. The documentation 645// Only certain AWS operations return an encoded authorization message. The
656// for an individual action indicates whether that action returns an encoded 646// documentation for an individual operation indicates whether that operation
657// message in addition to returning an HTTP code. 647// returns an encoded message in addition to returning an HTTP code.
658// 648//
659// The message is encoded because the details of the authorization status can 649// The message is encoded because the details of the authorization status can
660// constitute privileged information that the user who requested the action 650// constitute privileged information that the user who requested the operation
661// should not see. To decode an authorization status message, a user must be 651// should not see. To decode an authorization status message, a user must be
662// granted permissions via an IAM policy to request the DecodeAuthorizationMessage 652// granted permissions via an IAM policy to request the DecodeAuthorizationMessage
663// (sts:DecodeAuthorizationMessage) action. 653// (sts:DecodeAuthorizationMessage) action.
@@ -666,7 +656,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
666// 656//
667// * Whether the request was denied due to an explicit deny or due to the 657// * Whether the request was denied due to an explicit deny or due to the
668// absence of an explicit allow. For more information, see Determining Whether 658// absence of an explicit allow. For more information, see Determining Whether
669// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) 659// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
670// in the IAM User Guide. 660// in the IAM User Guide.
671// 661//
672// * The principal who made the request. 662// * The principal who made the request.
@@ -712,6 +702,102 @@ func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *Deco
712 return out, req.Send() 702 return out, req.Send()
713} 703}
714 704
705const opGetAccessKeyInfo = "GetAccessKeyInfo"
706
707// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the
708// client's request for the GetAccessKeyInfo operation. The "output" return
709// value will be populated with the request's response once the request completes
710// successfully.
711//
712// Use "Send" method on the returned Request to send the API call to the service.
713// the "output" return value is not valid until after Send returns without error.
714//
715// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo
716// API call, and error handling.
717//
718// This method is useful when you want to inject custom logic or configuration
719// into the SDK's request lifecycle. Such as custom headers, or retry logic.
720//
721//
722// // Example sending a request using the GetAccessKeyInfoRequest method.
723// req, resp := client.GetAccessKeyInfoRequest(params)
724//
725// err := req.Send()
726// if err == nil { // resp is now filled
727// fmt.Println(resp)
728// }
729//
730// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo
731func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) {
732 op := &request.Operation{
733 Name: opGetAccessKeyInfo,
734 HTTPMethod: "POST",
735 HTTPPath: "/",
736 }
737
738 if input == nil {
739 input = &GetAccessKeyInfoInput{}
740 }
741
742 output = &GetAccessKeyInfoOutput{}
743 req = c.newRequest(op, input, output)
744 return
745}
746
747// GetAccessKeyInfo API operation for AWS Security Token Service.
748//
749// Returns the account identifier for the specified access key ID.
750//
751// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE)
752// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY).
753// For more information about access keys, see Managing Access Keys for IAM
754// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)
755// in the IAM User Guide.
756//
757// When you pass an access key ID to this operation, it returns the ID of the
758// AWS account to which the keys belong. Access key IDs beginning with AKIA
759// are long-term credentials for an IAM user or the AWS account root user. Access
760// key IDs beginning with ASIA are temporary credentials that are created using
761// STS operations. If the account in the response belongs to you, you can sign
762// in as the root user and review your root user access keys. Then, you can
763// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report)
764// to learn which IAM user owns the keys. To learn who requested the temporary
765// credentials for an ASIA access key, view the STS events in your CloudTrail
766// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration).
767//
768// This operation does not indicate the state of the access key. The key might
769// be active, inactive, or deleted. Active keys might not have permissions to
770// perform an operation. Providing a deleted keys might return an error that
771// the key doesn't exist.
772//
773// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
774// with awserr.Error's Code and Message methods to get detailed information about
775// the error.
776//
777// See the AWS API reference guide for AWS Security Token Service's
778// API operation GetAccessKeyInfo for usage and error information.
779// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo
780func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) {
781 req, out := c.GetAccessKeyInfoRequest(input)
782 return out, req.Send()
783}
784
785// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of
786// the ability to pass a context and additional request options.
787//
788// See GetAccessKeyInfo for details on how to use this API operation.
789//
790// The context must be non-nil and will be used for request cancellation. If
791// the context is nil a panic will occur. In the future the SDK may create
792// sub-contexts for http.Requests. See https://golang.org/pkg/context/
793// for more information on using Contexts.
794func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) {
795 req, out := c.GetAccessKeyInfoRequest(input)
796 req.SetContext(ctx)
797 req.ApplyOptions(opts...)
798 return out, req.Send()
799}
800
715const opGetCallerIdentity = "GetCallerIdentity" 801const opGetCallerIdentity = "GetCallerIdentity"
716 802
717// GetCallerIdentityRequest generates a "aws/request.Request" representing the 803// GetCallerIdentityRequest generates a "aws/request.Request" representing the
@@ -834,81 +920,65 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
834// Returns a set of temporary security credentials (consisting of an access 920// Returns a set of temporary security credentials (consisting of an access
835// key ID, a secret access key, and a security token) for a federated user. 921// key ID, a secret access key, and a security token) for a federated user.
836// A typical use is in a proxy application that gets temporary security credentials 922// A typical use is in a proxy application that gets temporary security credentials
837// on behalf of distributed applications inside a corporate network. Because 923// on behalf of distributed applications inside a corporate network. You must
838// you must call the GetFederationToken action using the long-term security 924// call the GetFederationToken operation using the long-term security credentials
839// credentials of an IAM user, this call is appropriate in contexts where those 925// of an IAM user. As a result, this call is appropriate in contexts where those
840// credentials can be safely stored, usually in a server-based application. 926// credentials can be safely stored, usually in a server-based application.
841// For a comparison of GetFederationToken with the other APIs that produce temporary 927// For a comparison of GetFederationToken with the other API operations that
842// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) 928// produce temporary credentials, see Requesting Temporary Security Credentials
843// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) 929// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
930// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
844// in the IAM User Guide. 931// in the IAM User Guide.
845// 932//
846// If you are creating a mobile-based or browser-based app that can authenticate 933// You can create a mobile-based or browser-based app that can authenticate
847// users using a web identity provider like Login with Amazon, Facebook, Google, 934// users using a web identity provider like Login with Amazon, Facebook, Google,
848// or an OpenID Connect-compatible identity provider, we recommend that you 935// or an OpenID Connect-compatible identity provider. In this case, we recommend
849// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. 936// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
850// For more information, see Federation Through a Web-based Identity Provider 937// For more information, see Federation Through a Web-based Identity Provider
851// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). 938// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
852// 939//
853// The GetFederationToken action must be called by using the long-term AWS security 940// You can also call GetFederationToken using the security credentials of an
854// credentials of an IAM user. You can also call GetFederationToken using the 941// AWS account root user, but we do not recommend it. Instead, we recommend
855// security credentials of an AWS root account, but we do not recommended it. 942// that you create an IAM user for the purpose of the proxy application. Then
856// Instead, we recommend that you create an IAM user for the purpose of the 943// attach a policy to the IAM user that limits federated users to only the actions
857// proxy application and then attach a policy to the IAM user that limits federated 944// and resources that they need to access. For more information, see IAM Best
858// users to only the actions and resources that they need access to. For more 945// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
859// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
860// in the IAM User Guide. 946// in the IAM User Guide.
861// 947//
862// The temporary security credentials that are obtained by using the long-term 948// The temporary credentials are valid for the specified duration, from 900
863// credentials of an IAM user are valid for the specified duration, from 900 949// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
864// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default 950// is 43,200 seconds (12 hours). Temporary credentials that are obtained by
865// is 43200 seconds (12 hours). Temporary credentials that are obtained by using 951// using AWS account root user credentials have a maximum duration of 3,600
866// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). 952// seconds (1 hour).
867// 953//
868// The temporary security credentials created by GetFederationToken can be used 954// The temporary security credentials created by GetFederationToken can be used
869// to make API calls to any AWS service with the following exceptions: 955// to make API calls to any AWS service with the following exceptions:
870// 956//
871// * You cannot use these credentials to call any IAM APIs. 957// * You cannot use these credentials to call any IAM API operations.
872// 958//
873// * You cannot call any STS APIs except GetCallerIdentity. 959// * You cannot call any STS API operations except GetCallerIdentity.
874// 960//
875// Permissions 961// Permissions
876// 962//
877// The permissions for the temporary security credentials returned by GetFederationToken 963// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
878// are determined by a combination of the following: 964// to this operation. You can pass a single JSON policy document to use as an
879// 965// inline session policy. You can also specify up to 10 managed policies to
880// * The policy or policies that are attached to the IAM user whose credentials 966// use as managed session policies. The plain text that you use for both inline
881// are used to call GetFederationToken. 967// and managed session policies shouldn't exceed 2048 characters.
882// 968//
883// * The policy that is passed as a parameter in the call. 969// Though the session policy parameters are optional, if you do not pass a policy,
884// 970// then the resulting federated user session has no permissions. The only exception
885// The passed policy is attached to the temporary security credentials that 971// is when the credentials are used to access a resource that has a resource-based
886// result from the GetFederationToken API call--that is, to the federated user. 972// policy that specifically references the federated user session in the Principal
887// When the federated user makes an AWS request, AWS evaluates the policy attached 973// element of the policy. When you pass session policies, the session permissions
888// to the federated user in combination with the policy or policies attached 974// are the intersection of the IAM user policies and the session policies that
889// to the IAM user whose credentials were used to call GetFederationToken. AWS 975// you pass. This gives you a way to further restrict the permissions for a
890// allows the federated user's request only when both the federated user and 976// federated user. You cannot use session policies to grant more permissions
891// the IAM user are explicitly allowed to perform the requested action. The 977// than those that are defined in the permissions policy of the IAM user. For
892// passed policy cannot grant more permissions than those that are defined in 978// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
893// the IAM user policy. 979// in the IAM User Guide. For information about using GetFederationToken to
894// 980// create temporary security credentials, see GetFederationToken—Federation
895// A typical use case is that the permissions of the IAM user whose credentials 981// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
896// are used to call GetFederationToken are designed to allow access to all the
897// actions and resources that any federated user will need. Then, for individual
898// users, you pass a policy to the operation that scopes down the permissions
899// to a level that's appropriate to that individual user, using a policy that
900// allows only a subset of permissions that are granted to the IAM user.
901//
902// If you do not pass a policy, the resulting temporary security credentials
903// have no effective permissions. The only exception is when the temporary security
904// credentials are used to access a resource that has a resource-based policy
905// that specifically allows the federated user to access the resource.
906//
907// For more information about how permissions work, see Permissions for GetFederationToken
908// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
909// For information about using GetFederationToken to create temporary security
910// credentials, see GetFederationToken—Federation Through a Custom Identity
911// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
912// 982//
913// Returns awserr.Error for service API and SDK errors. Use runtime type assertions 983// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
914// with awserr.Error's Code and Message methods to get detailed information about 984// with awserr.Error's Code and Message methods to get detailed information about
@@ -931,7 +1001,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
931// STS is not activated in the requested region for the account that is being 1001// STS is not activated in the requested region for the account that is being
932// asked to generate credentials. The account administrator must use the IAM 1002// asked to generate credentials. The account administrator must use the IAM
933// console to activate STS in that region. For more information, see Activating 1003// console to activate STS in that region. For more information, see Activating
934// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 1004// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
935// in the IAM User Guide. 1005// in the IAM User Guide.
936// 1006//
937// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken 1007// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
@@ -1003,48 +1073,47 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
1003// Returns a set of temporary credentials for an AWS account or IAM user. The 1073// Returns a set of temporary credentials for an AWS account or IAM user. The
1004// credentials consist of an access key ID, a secret access key, and a security 1074// credentials consist of an access key ID, a secret access key, and a security
1005// token. Typically, you use GetSessionToken if you want to use MFA to protect 1075// token. Typically, you use GetSessionToken if you want to use MFA to protect
1006// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled 1076// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances.
1007// IAM users would need to call GetSessionToken and submit an MFA code that 1077// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA
1008// is associated with their MFA device. Using the temporary security credentials 1078// code that is associated with their MFA device. Using the temporary security
1009// that are returned from the call, IAM users can then make programmatic calls 1079// credentials that are returned from the call, IAM users can then make programmatic
1010// to APIs that require MFA authentication. If you do not supply a correct MFA 1080// calls to API operations that require MFA authentication. If you do not supply
1011// code, then the API returns an access denied error. For a comparison of GetSessionToken 1081// a correct MFA code, then the API returns an access denied error. For a comparison
1012// with the other APIs that produce temporary credentials, see Requesting Temporary 1082// of GetSessionToken with the other API operations that produce temporary credentials,
1013// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) 1083// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
1014// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) 1084// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
1015// in the IAM User Guide. 1085// in the IAM User Guide.
1016// 1086//
1017// The GetSessionToken action must be called by using the long-term AWS security 1087// The GetSessionToken operation must be called by using the long-term AWS security
1018// credentials of the AWS account or an IAM user. Credentials that are created 1088// credentials of the AWS account root user or an IAM user. Credentials that
1019// by IAM users are valid for the duration that you specify, from 900 seconds 1089// are created by IAM users are valid for the duration that you specify. This
1020// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default 1090// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600
1021// of 43200 seconds (12 hours); credentials that are created by using account 1091// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials
1022// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 1092// based on account credentials can range from 900 seconds (15 minutes) up to
1023// seconds (1 hour), with a default of 1 hour. 1093// 3,600 seconds (1 hour), with a default of 1 hour.
1024// 1094//
1025// The temporary security credentials created by GetSessionToken can be used 1095// The temporary security credentials created by GetSessionToken can be used
1026// to make API calls to any AWS service with the following exceptions: 1096// to make API calls to any AWS service with the following exceptions:
1027// 1097//
1028// * You cannot call any IAM APIs unless MFA authentication information is 1098// * You cannot call any IAM API operations unless MFA authentication information
1029// included in the request. 1099// is included in the request.
1030// 1100//
1031// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. 1101// * You cannot call any STS API except AssumeRole or GetCallerIdentity.
1032// 1102//
1033// We recommend that you do not call GetSessionToken with root account credentials. 1103// We recommend that you do not call GetSessionToken with AWS account root user
1034// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) 1104// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
1035// by creating one or more IAM users, giving them the necessary permissions, 1105// by creating one or more IAM users, giving them the necessary permissions,
1036// and using IAM users for everyday interaction with AWS. 1106// and using IAM users for everyday interaction with AWS.
1037// 1107//
1038// The permissions associated with the temporary security credentials returned 1108// The credentials that are returned by GetSessionToken are based on permissions
1039// by GetSessionToken are based on the permissions associated with account or 1109// associated with the user whose credentials were used to call the operation.
1040// IAM user whose credentials are used to call the action. If GetSessionToken 1110// If GetSessionToken is called using AWS account root user credentials, the
1041// is called using root account credentials, the temporary credentials have 1111// temporary credentials have root user permissions. Similarly, if GetSessionToken
1042// root account permissions. Similarly, if GetSessionToken is called using the 1112// is called using the credentials of an IAM user, the temporary credentials
1043// credentials of an IAM user, the temporary credentials have the same permissions 1113// have the same permissions as the IAM user.
1044// as the IAM user.
1045// 1114//
1046// For more information about using GetSessionToken to create temporary credentials, 1115// For more information about using GetSessionToken to create temporary credentials,
1047// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) 1116// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
1048// in the IAM User Guide. 1117// in the IAM User Guide.
1049// 1118//
1050// Returns awserr.Error for service API and SDK errors. Use runtime type assertions 1119// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -1059,7 +1128,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
1059// STS is not activated in the requested region for the account that is being 1128// STS is not activated in the requested region for the account that is being
1060// asked to generate credentials. The account administrator must use the IAM 1129// asked to generate credentials. The account administrator must use the IAM
1061// console to activate STS in that region. For more information, see Activating 1130// console to activate STS in that region. For more information, see Activating
1062// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 1131// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
1063// in the IAM User Guide. 1132// in the IAM User Guide.
1064// 1133//
1065// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken 1134// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
@@ -1094,7 +1163,7 @@ type AssumeRoleInput struct {
1094 // a session duration of 12 hours, but your administrator set the maximum session 1163 // a session duration of 12 hours, but your administrator set the maximum session
1095 // duration to 6 hours, your operation fails. To learn how to view the maximum 1164 // duration to 6 hours, your operation fails. To learn how to view the maximum
1096 // value for your role, see View the Maximum Session Duration Setting for a 1165 // value for your role, see View the Maximum Session Duration Setting for a
1097 // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) 1166 // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
1098 // in the IAM User Guide. 1167 // in the IAM User Guide.
1099 // 1168 //
1100 // By default, the value is set to 3600 seconds. 1169 // By default, the value is set to 3600 seconds.
@@ -1104,51 +1173,77 @@ type AssumeRoleInput struct {
1104 // to the federation endpoint for a console sign-in token takes a SessionDuration 1173 // to the federation endpoint for a console sign-in token takes a SessionDuration
1105 // parameter that specifies the maximum length of the console session. For more 1174 // parameter that specifies the maximum length of the console session. For more
1106 // information, see Creating a URL that Enables Federated Users to Access the 1175 // information, see Creating a URL that Enables Federated Users to Access the
1107 // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) 1176 // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
1108 // in the IAM User Guide. 1177 // in the IAM User Guide.
1109 DurationSeconds *int64 `min:"900" type:"integer"` 1178 DurationSeconds *int64 `min:"900" type:"integer"`
1110 1179
1111 // A unique identifier that is used by third parties when assuming roles in 1180 // A unique identifier that might be required when you assume a role in another
1112 // their customers' accounts. For each role that the third party can assume, 1181 // account. If the administrator of the account to which the role belongs provided
1113 // they should instruct their customers to ensure the role's trust policy checks 1182 // you with an external ID, then provide that value in the ExternalId parameter.
1114 // for the external ID that the third party generated. Each time the third party 1183 // This value can be any string, such as a passphrase or account number. A cross-account
1115 // assumes the role, they should pass the customer's external ID. The external 1184 // role is usually set up to trust everyone in an account. Therefore, the administrator
1116 // ID is useful in order to help third parties bind a role to the customer who 1185 // of the trusting account might send an external ID to the administrator of
1117 // created it. For more information about the external ID, see How to Use an 1186 // the trusted account. That way, only someone with the ID can assume the role,
1118 // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) 1187 // rather than everyone in the account. For more information about the external
1188 // ID, see How to Use an External ID When Granting Access to Your AWS Resources
1189 // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
1119 // in the IAM User Guide. 1190 // in the IAM User Guide.
1120 // 1191 //
1121 // The regex used to validated this parameter is a string of characters consisting 1192 // The regex used to validate this parameter is a string of characters consisting
1122 // of upper- and lower-case alphanumeric characters with no spaces. You can 1193 // of upper- and lower-case alphanumeric characters with no spaces. You can
1123 // also include underscores or any of the following characters: =,.@:/- 1194 // also include underscores or any of the following characters: =,.@:/-
1124 ExternalId *string `min:"2" type:"string"` 1195 ExternalId *string `min:"2" type:"string"`
1125 1196
1126 // An IAM policy in JSON format. 1197 // An IAM policy in JSON format that you want to use as an inline session policy.
1127 // 1198 //
1128 // This parameter is optional. If you pass a policy, the temporary security 1199 // This parameter is optional. Passing policies to this operation returns new
1129 // credentials that are returned by the operation have the permissions that 1200 // temporary credentials. The resulting session's permissions are the intersection
1130 // are allowed by both (the intersection of) the access policy of the role that 1201 // of the role's identity-based policy and the session policies. You can use
1131 // is being assumed, and the policy that you pass. This gives you a way to further 1202 // the role's temporary credentials in subsequent AWS API calls to access resources
1132 // restrict the permissions for the resulting temporary security credentials. 1203 // in the account that owns the role. You cannot use session policies to grant
1133 // You cannot use the passed policy to grant permissions that are in excess 1204 // more permissions than those allowed by the identity-based policy of the role
1134 // of those allowed by the access policy of the role that is being assumed. 1205 // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
1135 // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
1136 // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
1137 // in the IAM User Guide. 1206 // in the IAM User Guide.
1138 // 1207 //
1139 // The format for this parameter, as described by its regex pattern, is a string 1208 // The plain text that you use for both inline and managed session policies
1140 // of characters up to 2048 characters in length. The characters can be any 1209 // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
1141 // ASCII character from the space character to the end of the valid character 1210 // character from the space character to the end of the valid character list
1142 // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), 1211 // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
1143 // and carriage return (\u000D) characters. 1212 // and carriage return (\u000D) characters.
1144 // 1213 //
1145 // The policy plain text must be 2048 bytes or shorter. However, an internal 1214 // The characters in this parameter count towards the 2048 character session
1146 // conversion compresses it into a packed binary format with a separate limit. 1215 // policy guideline. However, an AWS conversion compresses the session policies
1147 // The PackedPolicySize response element indicates by percentage how close to 1216 // into a packed binary format that has a separate limit. This is the enforced
1148 // the upper size limit the policy is, with 100% equaling the maximum allowed 1217 // limit. The PackedPolicySize response element indicates by percentage how
1149 // size. 1218 // close the policy is to the upper size limit.
1150 Policy *string `min:"1" type:"string"` 1219 Policy *string `min:"1" type:"string"`
1151 1220
1221 // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
1222 // to use as managed session policies. The policies must exist in the same account
1223 // as the role.
1224 //
1225 // This parameter is optional. You can provide up to 10 managed policy ARNs.
1226 // However, the plain text that you use for both inline and managed session
1227 // policies shouldn't exceed 2048 characters. For more information about ARNs,
1228 // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
1229 // in the AWS General Reference.
1230 //
1231 // The characters in this parameter count towards the 2048 character session
1232 // policy guideline. However, an AWS conversion compresses the session policies
1233 // into a packed binary format that has a separate limit. This is the enforced
1234 // limit. The PackedPolicySize response element indicates by percentage how
1235 // close the policy is to the upper size limit.
1236 //
1237 // Passing policies to this operation returns new temporary credentials. The
1238 // resulting session's permissions are the intersection of the role's identity-based
1239 // policy and the session policies. You can use the role's temporary credentials
1240 // in subsequent AWS API calls to access resources in the account that owns
1241 // the role. You cannot use session policies to grant more permissions than
1242 // those allowed by the identity-based policy of the role that is being assumed.
1243 // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
1244 // in the IAM User Guide.
1245 PolicyArns []*PolicyDescriptorType `type:"list"`
1246
1152 // The Amazon Resource Name (ARN) of the role to assume. 1247 // The Amazon Resource Name (ARN) of the role to assume.
1153 // 1248 //
1154 // RoleArn is a required field 1249 // RoleArn is a required field
@@ -1161,8 +1256,8 @@ type AssumeRoleInput struct {
1161 // scenarios, the role session name is visible to, and can be logged by the 1256 // scenarios, the role session name is visible to, and can be logged by the
1162 // account that owns the role. The role session name is also used in the ARN 1257 // account that owns the role. The role session name is also used in the ARN
1163 // of the assumed role principal. This means that subsequent cross-account API 1258 // of the assumed role principal. This means that subsequent cross-account API
1164 // requests using the temporary security credentials will expose the role session 1259 // requests that use the temporary security credentials will expose the role
1165 // name to the external account in their CloudTrail logs. 1260 // session name to the external account in their AWS CloudTrail logs.
1166 // 1261 //
1167 // The regex used to validate this parameter is a string of characters consisting 1262 // The regex used to validate this parameter is a string of characters consisting
1168 // of upper- and lower-case alphanumeric characters with no spaces. You can 1263 // of upper- and lower-case alphanumeric characters with no spaces. You can
@@ -1232,6 +1327,16 @@ func (s *AssumeRoleInput) Validate() error {
1232 if s.TokenCode != nil && len(*s.TokenCode) < 6 { 1327 if s.TokenCode != nil && len(*s.TokenCode) < 6 {
1233 invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) 1328 invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
1234 } 1329 }
1330 if s.PolicyArns != nil {
1331 for i, v := range s.PolicyArns {
1332 if v == nil {
1333 continue
1334 }
1335 if err := v.Validate(); err != nil {
1336 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
1337 }
1338 }
1339 }
1235 1340
1236 if invalidParams.Len() > 0 { 1341 if invalidParams.Len() > 0 {
1237 return invalidParams 1342 return invalidParams
@@ -1257,6 +1362,12 @@ func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
1257 return s 1362 return s
1258} 1363}
1259 1364
1365// SetPolicyArns sets the PolicyArns field's value.
1366func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput {
1367 s.PolicyArns = v
1368 return s
1369}
1370
1260// SetRoleArn sets the RoleArn field's value. 1371// SetRoleArn sets the RoleArn field's value.
1261func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { 1372func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
1262 s.RoleArn = &v 1373 s.RoleArn = &v
@@ -1296,10 +1407,8 @@ type AssumeRoleOutput struct {
1296 // The temporary security credentials, which include an access key ID, a secret 1407 // The temporary security credentials, which include an access key ID, a secret
1297 // access key, and a security (or session) token. 1408 // access key, and a security (or session) token.
1298 // 1409 //
1299 // Note: The size of the security token that STS APIs return is not fixed. We 1410 // The size of the security token that STS API operations return is not fixed.
1300 // strongly recommend that you make no assumptions about the maximum size. As 1411 // We strongly recommend that you make no assumptions about the maximum size.
1301 // of this writing, the typical size is less than 4096 bytes, but that can vary.
1302 // Also, future updates to AWS might require larger sizes.
1303 Credentials *Credentials `type:"structure"` 1412 Credentials *Credentials `type:"structure"`
1304 1413
1305 // A percentage value that indicates the size of the policy in packed form. 1414 // A percentage value that indicates the size of the policy in packed form.
@@ -1349,7 +1458,7 @@ type AssumeRoleWithSAMLInput struct {
1349 // specify a session duration of 12 hours, but your administrator set the maximum 1458 // specify a session duration of 12 hours, but your administrator set the maximum
1350 // session duration to 6 hours, your operation fails. To learn how to view the 1459 // session duration to 6 hours, your operation fails. To learn how to view the
1351 // maximum value for your role, see View the Maximum Session Duration Setting 1460 // maximum value for your role, see View the Maximum Session Duration Setting
1352 // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) 1461 // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
1353 // in the IAM User Guide. 1462 // in the IAM User Guide.
1354 // 1463 //
1355 // By default, the value is set to 3600 seconds. 1464 // By default, the value is set to 3600 seconds.
@@ -1359,36 +1468,60 @@ type AssumeRoleWithSAMLInput struct {
1359 // to the federation endpoint for a console sign-in token takes a SessionDuration 1468 // to the federation endpoint for a console sign-in token takes a SessionDuration
1360 // parameter that specifies the maximum length of the console session. For more 1469 // parameter that specifies the maximum length of the console session. For more
1361 // information, see Creating a URL that Enables Federated Users to Access the 1470 // information, see Creating a URL that Enables Federated Users to Access the
1362 // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) 1471 // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
1363 // in the IAM User Guide. 1472 // in the IAM User Guide.
1364 DurationSeconds *int64 `min:"900" type:"integer"` 1473 DurationSeconds *int64 `min:"900" type:"integer"`
1365 1474
1366 // An IAM policy in JSON format. 1475 // An IAM policy in JSON format that you want to use as an inline session policy.
1367 // 1476 //
1368 // The policy parameter is optional. If you pass a policy, the temporary security 1477 // This parameter is optional. Passing policies to this operation returns new
1369 // credentials that are returned by the operation have the permissions that 1478 // temporary credentials. The resulting session's permissions are the intersection
1370 // are allowed by both the access policy of the role that is being assumed, 1479 // of the role's identity-based policy and the session policies. You can use
1371 // and the policy that you pass. This gives you a way to further restrict the 1480 // the role's temporary credentials in subsequent AWS API calls to access resources
1372 // permissions for the resulting temporary security credentials. You cannot 1481 // in the account that owns the role. You cannot use session policies to grant
1373 // use the passed policy to grant permissions that are in excess of those allowed 1482 // more permissions than those allowed by the identity-based policy of the role
1374 // by the access policy of the role that is being assumed. For more information, 1483 // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
1375 // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
1376 // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
1377 // in the IAM User Guide. 1484 // in the IAM User Guide.
1378 // 1485 //
1379 // The format for this parameter, as described by its regex pattern, is a string 1486 // The plain text that you use for both inline and managed session policies
1380 // of characters up to 2048 characters in length. The characters can be any 1487 // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
1381 // ASCII character from the space character to the end of the valid character 1488 // character from the space character to the end of the valid character list
1382 // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), 1489 // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
1383 // and carriage return (\u000D) characters. 1490 // and carriage return (\u000D) characters.
1384 // 1491 //
1385 // The policy plain text must be 2048 bytes or shorter. However, an internal 1492 // The characters in this parameter count towards the 2048 character session
1386 // conversion compresses it into a packed binary format with a separate limit. 1493 // policy guideline. However, an AWS conversion compresses the session policies
1387 // The PackedPolicySize response element indicates by percentage how close to 1494 // into a packed binary format that has a separate limit. This is the enforced
1388 // the upper size limit the policy is, with 100% equaling the maximum allowed 1495 // limit. The PackedPolicySize response element indicates by percentage how
1389 // size. 1496 // close the policy is to the upper size limit.
1390 Policy *string `min:"1" type:"string"` 1497 Policy *string `min:"1" type:"string"`
1391 1498
1499 // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
1500 // to use as managed session policies. The policies must exist in the same account
1501 // as the role.
1502 //
1503 // This parameter is optional. You can provide up to 10 managed policy ARNs.
1504 // However, the plain text that you use for both inline and managed session
1505 // policies shouldn't exceed 2048 characters. For more information about ARNs,
1506 // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
1507 // in the AWS General Reference.
1508 //
1509 // The characters in this parameter count towards the 2048 character session
1510 // policy guideline. However, an AWS conversion compresses the session policies
1511 // into a packed binary format that has a separate limit. This is the enforced
1512 // limit. The PackedPolicySize response element indicates by percentage how
1513 // close the policy is to the upper size limit.
1514 //
1515 // Passing policies to this operation returns new temporary credentials. The
1516 // resulting session's permissions are the intersection of the role's identity-based
1517 // policy and the session policies. You can use the role's temporary credentials
1518 // in subsequent AWS API calls to access resources in the account that owns
1519 // the role. You cannot use session policies to grant more permissions than
1520 // those allowed by the identity-based policy of the role that is being assumed.
1521 // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
1522 // in the IAM User Guide.
1523 PolicyArns []*PolicyDescriptorType `type:"list"`
1524
1392 // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes 1525 // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
1393 // the IdP. 1526 // the IdP.
1394 // 1527 //
@@ -1402,8 +1535,8 @@ type AssumeRoleWithSAMLInput struct {
1402 1535
1403 // The base-64 encoded SAML authentication response provided by the IdP. 1536 // The base-64 encoded SAML authentication response provided by the IdP.
1404 // 1537 //
1405 // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) 1538 // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
1406 // in the Using IAM guide. 1539 // in the IAM User Guide.
1407 // 1540 //
1408 // SAMLAssertion is a required field 1541 // SAMLAssertion is a required field
1409 SAMLAssertion *string `min:"4" type:"string" required:"true"` 1542 SAMLAssertion *string `min:"4" type:"string" required:"true"`
@@ -1446,6 +1579,16 @@ func (s *AssumeRoleWithSAMLInput) Validate() error {
1446 if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { 1579 if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
1447 invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) 1580 invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4))
1448 } 1581 }
1582 if s.PolicyArns != nil {
1583 for i, v := range s.PolicyArns {
1584 if v == nil {
1585 continue
1586 }
1587 if err := v.Validate(); err != nil {
1588 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
1589 }
1590 }
1591 }
1449 1592
1450 if invalidParams.Len() > 0 { 1593 if invalidParams.Len() > 0 {
1451 return invalidParams 1594 return invalidParams
@@ -1465,6 +1608,12 @@ func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
1465 return s 1608 return s
1466} 1609}
1467 1610
1611// SetPolicyArns sets the PolicyArns field's value.
1612func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput {
1613 s.PolicyArns = v
1614 return s
1615}
1616
1468// SetPrincipalArn sets the PrincipalArn field's value. 1617// SetPrincipalArn sets the PrincipalArn field's value.
1469func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { 1618func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
1470 s.PrincipalArn = &v 1619 s.PrincipalArn = &v
@@ -1499,10 +1648,8 @@ type AssumeRoleWithSAMLOutput struct {
1499 // The temporary security credentials, which include an access key ID, a secret 1648 // The temporary security credentials, which include an access key ID, a secret
1500 // access key, and a security (or session) token. 1649 // access key, and a security (or session) token.
1501 // 1650 //
1502 // Note: The size of the security token that STS APIs return is not fixed. We 1651 // The size of the security token that STS API operations return is not fixed.
1503 // strongly recommend that you make no assumptions about the maximum size. As 1652 // We strongly recommend that you make no assumptions about the maximum size.
1504 // of this writing, the typical size is less than 4096 bytes, but that can vary.
1505 // Also, future updates to AWS might require larger sizes.
1506 Credentials *Credentials `type:"structure"` 1653 Credentials *Credentials `type:"structure"`
1507 1654
1508 // The value of the Issuer element of the SAML assertion. 1655 // The value of the Issuer element of the SAML assertion.
@@ -1606,7 +1753,7 @@ type AssumeRoleWithWebIdentityInput struct {
1606 // a session duration of 12 hours, but your administrator set the maximum session 1753 // a session duration of 12 hours, but your administrator set the maximum session
1607 // duration to 6 hours, your operation fails. To learn how to view the maximum 1754 // duration to 6 hours, your operation fails. To learn how to view the maximum
1608 // value for your role, see View the Maximum Session Duration Setting for a 1755 // value for your role, see View the Maximum Session Duration Setting for a
1609 // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) 1756 // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
1610 // in the IAM User Guide. 1757 // in the IAM User Guide.
1611 // 1758 //
1612 // By default, the value is set to 3600 seconds. 1759 // By default, the value is set to 3600 seconds.
@@ -1616,35 +1763,60 @@ type AssumeRoleWithWebIdentityInput struct {
1616 // to the federation endpoint for a console sign-in token takes a SessionDuration 1763 // to the federation endpoint for a console sign-in token takes a SessionDuration
1617 // parameter that specifies the maximum length of the console session. For more 1764 // parameter that specifies the maximum length of the console session. For more
1618 // information, see Creating a URL that Enables Federated Users to Access the 1765 // information, see Creating a URL that Enables Federated Users to Access the
1619 // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) 1766 // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
1620 // in the IAM User Guide. 1767 // in the IAM User Guide.
1621 DurationSeconds *int64 `min:"900" type:"integer"` 1768 DurationSeconds *int64 `min:"900" type:"integer"`
1622 1769
1623 // An IAM policy in JSON format. 1770 // An IAM policy in JSON format that you want to use as an inline session policy.
1624 // 1771 //
1625 // The policy parameter is optional. If you pass a policy, the temporary security 1772 // This parameter is optional. Passing policies to this operation returns new
1626 // credentials that are returned by the operation have the permissions that 1773 // temporary credentials. The resulting session's permissions are the intersection
1627 // are allowed by both the access policy of the role that is being assumed, 1774 // of the role's identity-based policy and the session policies. You can use
1628 // and the policy that you pass. This gives you a way to further restrict the 1775 // the role's temporary credentials in subsequent AWS API calls to access resources
1629 // permissions for the resulting temporary security credentials. You cannot 1776 // in the account that owns the role. You cannot use session policies to grant
1630 // use the passed policy to grant permissions that are in excess of those allowed 1777 // more permissions than those allowed by the identity-based policy of the role
1631 // by the access policy of the role that is being assumed. For more information, 1778 // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
1632 // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
1633 // in the IAM User Guide. 1779 // in the IAM User Guide.
1634 // 1780 //
1635 // The format for this parameter, as described by its regex pattern, is a string 1781 // The plain text that you use for both inline and managed session policies
1636 // of characters up to 2048 characters in length. The characters can be any 1782 // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
1637 // ASCII character from the space character to the end of the valid character 1783 // character from the space character to the end of the valid character list
1638 // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), 1784 // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
1639 // and carriage return (\u000D) characters. 1785 // and carriage return (\u000D) characters.
1640 // 1786 //
1641 // The policy plain text must be 2048 bytes or shorter. However, an internal 1787 // The characters in this parameter count towards the 2048 character session
1642 // conversion compresses it into a packed binary format with a separate limit. 1788 // policy guideline. However, an AWS conversion compresses the session policies
1643 // The PackedPolicySize response element indicates by percentage how close to 1789 // into a packed binary format that has a separate limit. This is the enforced
1644 // the upper size limit the policy is, with 100% equaling the maximum allowed 1790 // limit. The PackedPolicySize response element indicates by percentage how
1645 // size. 1791 // close the policy is to the upper size limit.
1646 Policy *string `min:"1" type:"string"` 1792 Policy *string `min:"1" type:"string"`
1647 1793
1794 // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
1795 // to use as managed session policies. The policies must exist in the same account
1796 // as the role.
1797 //
1798 // This parameter is optional. You can provide up to 10 managed policy ARNs.
1799 // However, the plain text that you use for both inline and managed session
1800 // policies shouldn't exceed 2048 characters. For more information about ARNs,
1801 // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
1802 // in the AWS General Reference.
1803 //
1804 // The characters in this parameter count towards the 2048 character session
1805 // policy guideline. However, an AWS conversion compresses the session policies
1806 // into a packed binary format that has a separate limit. This is the enforced
1807 // limit. The PackedPolicySize response element indicates by percentage how
1808 // close the policy is to the upper size limit.
1809 //
1810 // Passing policies to this operation returns new temporary credentials. The
1811 // resulting session's permissions are the intersection of the role's identity-based
1812 // policy and the session policies. You can use the role's temporary credentials
1813 // in subsequent AWS API calls to access resources in the account that owns
1814 // the role. You cannot use session policies to grant more permissions than
1815 // those allowed by the identity-based policy of the role that is being assumed.
1816 // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
1817 // in the IAM User Guide.
1818 PolicyArns []*PolicyDescriptorType `type:"list"`
1819
1648 // The fully qualified host component of the domain name of the identity provider. 1820 // The fully qualified host component of the domain name of the identity provider.
1649 // 1821 //
1650 // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com 1822 // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
@@ -1721,6 +1893,16 @@ func (s *AssumeRoleWithWebIdentityInput) Validate() error {
1721 if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { 1893 if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
1722 invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) 1894 invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4))
1723 } 1895 }
1896 if s.PolicyArns != nil {
1897 for i, v := range s.PolicyArns {
1898 if v == nil {
1899 continue
1900 }
1901 if err := v.Validate(); err != nil {
1902 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
1903 }
1904 }
1905 }
1724 1906
1725 if invalidParams.Len() > 0 { 1907 if invalidParams.Len() > 0 {
1726 return invalidParams 1908 return invalidParams
@@ -1740,6 +1922,12 @@ func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebI
1740 return s 1922 return s
1741} 1923}
1742 1924
1925// SetPolicyArns sets the PolicyArns field's value.
1926func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput {
1927 s.PolicyArns = v
1928 return s
1929}
1930
1743// SetProviderId sets the ProviderId field's value. 1931// SetProviderId sets the ProviderId field's value.
1744func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { 1932func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
1745 s.ProviderId = &v 1933 s.ProviderId = &v
@@ -1784,10 +1972,8 @@ type AssumeRoleWithWebIdentityOutput struct {
1784 // The temporary security credentials, which include an access key ID, a secret 1972 // The temporary security credentials, which include an access key ID, a secret
1785 // access key, and a security token. 1973 // access key, and a security token.
1786 // 1974 //
1787 // Note: The size of the security token that STS APIs return is not fixed. We 1975 // The size of the security token that STS API operations return is not fixed.
1788 // strongly recommend that you make no assumptions about the maximum size. As 1976 // We strongly recommend that you make no assumptions about the maximum size.
1789 // of this writing, the typical size is less than 4096 bytes, but that can vary.
1790 // Also, future updates to AWS might require larger sizes.
1791 Credentials *Credentials `type:"structure"` 1977 Credentials *Credentials `type:"structure"`
1792 1978
1793 // A percentage value that indicates the size of the policy in packed form. 1979 // A percentage value that indicates the size of the policy in packed form.
@@ -1796,7 +1982,7 @@ type AssumeRoleWithWebIdentityOutput struct {
1796 PackedPolicySize *int64 `type:"integer"` 1982 PackedPolicySize *int64 `type:"integer"`
1797 1983
1798 // The issuing authority of the web identity token presented. For OpenID Connect 1984 // The issuing authority of the web identity token presented. For OpenID Connect
1799 // ID Tokens this contains the value of the iss field. For OAuth 2.0 access 1985 // ID tokens, this contains the value of the iss field. For OAuth 2.0 access
1800 // tokens, this contains the value of the ProviderId parameter that was passed 1986 // tokens, this contains the value of the ProviderId parameter that was passed
1801 // in the AssumeRoleWithWebIdentity request. 1987 // in the AssumeRoleWithWebIdentity request.
1802 Provider *string `type:"string"` 1988 Provider *string `type:"string"`
@@ -1863,7 +2049,7 @@ type AssumedRoleUser struct {
1863 2049
1864 // The ARN of the temporary security credentials that are returned from the 2050 // The ARN of the temporary security credentials that are returned from the
1865 // AssumeRole action. For more information about ARNs and how to use them in 2051 // AssumeRole action. For more information about ARNs and how to use them in
1866 // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) 2052 // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
1867 // in Using IAM. 2053 // in Using IAM.
1868 // 2054 //
1869 // Arn is a required field 2055 // Arn is a required field
@@ -2031,7 +2217,7 @@ type FederatedUser struct {
2031 2217
2032 // The ARN that specifies the federated user that is associated with the credentials. 2218 // The ARN that specifies the federated user that is associated with the credentials.
2033 // For more information about ARNs and how to use them in policies, see IAM 2219 // For more information about ARNs and how to use them in policies, see IAM
2034 // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) 2220 // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
2035 // in Using IAM. 2221 // in Using IAM.
2036 // 2222 //
2037 // Arn is a required field 2223 // Arn is a required field
@@ -2066,6 +2252,73 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
2066 return s 2252 return s
2067} 2253}
2068 2254
2255type GetAccessKeyInfoInput struct {
2256 _ struct{} `type:"structure"`
2257
2258 // The identifier of an access key.
2259 //
2260 // This parameter allows (through its regex pattern) a string of characters
2261 // that can consist of any upper- or lowercased letter or digit.
2262 //
2263 // AccessKeyId is a required field
2264 AccessKeyId *string `min:"16" type:"string" required:"true"`
2265}
2266
2267// String returns the string representation
2268func (s GetAccessKeyInfoInput) String() string {
2269 return awsutil.Prettify(s)
2270}
2271
2272// GoString returns the string representation
2273func (s GetAccessKeyInfoInput) GoString() string {
2274 return s.String()
2275}
2276
2277// Validate inspects the fields of the type to determine if they are valid.
2278func (s *GetAccessKeyInfoInput) Validate() error {
2279 invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"}
2280 if s.AccessKeyId == nil {
2281 invalidParams.Add(request.NewErrParamRequired("AccessKeyId"))
2282 }
2283 if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 {
2284 invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16))
2285 }
2286
2287 if invalidParams.Len() > 0 {
2288 return invalidParams
2289 }
2290 return nil
2291}
2292
2293// SetAccessKeyId sets the AccessKeyId field's value.
2294func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput {
2295 s.AccessKeyId = &v
2296 return s
2297}
2298
2299type GetAccessKeyInfoOutput struct {
2300 _ struct{} `type:"structure"`
2301
2302 // The number used to identify the AWS account.
2303 Account *string `type:"string"`
2304}
2305
2306// String returns the string representation
2307func (s GetAccessKeyInfoOutput) String() string {
2308 return awsutil.Prettify(s)
2309}
2310
2311// GoString returns the string representation
2312func (s GetAccessKeyInfoOutput) GoString() string {
2313 return s.String()
2314}
2315
2316// SetAccount sets the Account field's value.
2317func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput {
2318 s.Account = &v
2319 return s
2320}
2321
2069type GetCallerIdentityInput struct { 2322type GetCallerIdentityInput struct {
2070 _ struct{} `type:"structure"` 2323 _ struct{} `type:"structure"`
2071} 2324}
@@ -2093,8 +2346,8 @@ type GetCallerIdentityOutput struct {
2093 Arn *string `min:"20" type:"string"` 2346 Arn *string `min:"20" type:"string"`
2094 2347
2095 // The unique identifier of the calling entity. The exact value depends on the 2348 // The unique identifier of the calling entity. The exact value depends on the
2096 // type of entity making the call. The values returned are those listed in the 2349 // type of entity that is making the call. The values returned are those listed
2097 // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) 2350 // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
2098 // found on the Policy Variables reference page in the IAM User Guide. 2351 // found on the Policy Variables reference page in the IAM User Guide.
2099 UserId *string `type:"string"` 2352 UserId *string `type:"string"`
2100} 2353}
@@ -2131,12 +2384,11 @@ type GetFederationTokenInput struct {
2131 _ struct{} `type:"structure"` 2384 _ struct{} `type:"structure"`
2132 2385
2133 // The duration, in seconds, that the session should last. Acceptable durations 2386 // The duration, in seconds, that the session should last. Acceptable durations
2134 // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds 2387 // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds
2135 // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained 2388 // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained
2136 // using AWS account (root) credentials are restricted to a maximum of 3600 2389 // using AWS account root user credentials are restricted to a maximum of 3,600
2137 // seconds (one hour). If the specified duration is longer than one hour, the 2390 // seconds (one hour). If the specified duration is longer than one hour, the
2138 // session obtained by using AWS account (root) credentials defaults to one 2391 // session obtained by using root user credentials defaults to one hour.
2139 // hour.
2140 DurationSeconds *int64 `min:"900" type:"integer"` 2392 DurationSeconds *int64 `min:"900" type:"integer"`
2141 2393
2142 // The name of the federated user. The name is used as an identifier for the 2394 // The name of the federated user. The name is used as an identifier for the
@@ -2151,36 +2403,73 @@ type GetFederationTokenInput struct {
2151 // Name is a required field 2403 // Name is a required field
2152 Name *string `min:"2" type:"string" required:"true"` 2404 Name *string `min:"2" type:"string" required:"true"`
2153 2405
2154 // An IAM policy in JSON format that is passed with the GetFederationToken call 2406 // An IAM policy in JSON format that you want to use as an inline session policy.
2155 // and evaluated along with the policy or policies that are attached to the 2407 //
2156 // IAM user whose credentials are used to call GetFederationToken. The passed 2408 // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
2157 // policy is used to scope down the permissions that are available to the IAM 2409 // to this operation. You can pass a single JSON policy document to use as an
2158 // user, by allowing only a subset of the permissions that are granted to the 2410 // inline session policy. You can also specify up to 10 managed policies to
2159 // IAM user. The passed policy cannot grant more permissions than those granted 2411 // use as managed session policies.
2160 // to the IAM user. The final permissions for the federated user are the most
2161 // restrictive set based on the intersection of the passed policy and the IAM
2162 // user policy.
2163 //
2164 // If you do not pass a policy, the resulting temporary security credentials
2165 // have no effective permissions. The only exception is when the temporary security
2166 // credentials are used to access a resource that has a resource-based policy
2167 // that specifically allows the federated user to access the resource.
2168 //
2169 // The format for this parameter, as described by its regex pattern, is a string
2170 // of characters up to 2048 characters in length. The characters can be any
2171 // ASCII character from the space character to the end of the valid character
2172 // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
2173 // and carriage return (\u000D) characters.
2174 // 2412 //
2175 // The policy plain text must be 2048 bytes or shorter. However, an internal 2413 // This parameter is optional. However, if you do not pass any session policies,
2176 // conversion compresses it into a packed binary format with a separate limit. 2414 // then the resulting federated user session has no permissions. The only exception
2177 // The PackedPolicySize response element indicates by percentage how close to 2415 // is when the credentials are used to access a resource that has a resource-based
2178 // the upper size limit the policy is, with 100% equaling the maximum allowed 2416 // policy that specifically references the federated user session in the Principal
2179 // size. 2417 // element of the policy.
2180 // 2418 //
2181 // For more information about how permissions work, see Permissions for GetFederationToken 2419 // When you pass session policies, the session permissions are the intersection
2182 // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). 2420 // of the IAM user policies and the session policies that you pass. This gives
2421 // you a way to further restrict the permissions for a federated user. You cannot
2422 // use session policies to grant more permissions than those that are defined
2423 // in the permissions policy of the IAM user. For more information, see Session
2424 // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
2425 // in the IAM User Guide.
2426 //
2427 // The plain text that you use for both inline and managed session policies
2428 // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
2429 // character from the space character to the end of the valid character list
2430 // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
2431 // and carriage return (\u000D) characters.
2432 //
2433 // The characters in this parameter count towards the 2048 character session
2434 // policy guideline. However, an AWS conversion compresses the session policies
2435 // into a packed binary format that has a separate limit. This is the enforced
2436 // limit. The PackedPolicySize response element indicates by percentage how
2437 // close the policy is to the upper size limit.
2183 Policy *string `min:"1" type:"string"` 2438 Policy *string `min:"1" type:"string"`
2439
2440 // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
2441 // to use as a managed session policy. The policies must exist in the same account
2442 // as the IAM user that is requesting federated access.
2443 //
2444 // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
2445 // to this operation. You can pass a single JSON policy document to use as an
2446 // inline session policy. You can also specify up to 10 managed policies to
2447 // use as managed session policies. The plain text that you use for both inline
2448 // and managed session policies shouldn't exceed 2048 characters. You can provide
2449 // up to 10 managed policy ARNs. For more information about ARNs, see Amazon
2450 // Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html)
2451 // in the AWS General Reference.
2452 //
2453 // This parameter is optional. However, if you do not pass any session policies,
2454 // then the resulting federated user session has no permissions. The only exception
2455 // is when the credentials are used to access a resource that has a resource-based
2456 // policy that specifically references the federated user session in the Principal
2457 // element of the policy.
2458 //
2459 // When you pass session policies, the session permissions are the intersection
2460 // of the IAM user policies and the session policies that you pass. This gives
2461 // you a way to further restrict the permissions for a federated user. You cannot
2462 // use session policies to grant more permissions than those that are defined
2463 // in the permissions policy of the IAM user. For more information, see Session
2464 // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
2465 // in the IAM User Guide.
2466 //
2467 // The characters in this parameter count towards the 2048 character session
2468 // policy guideline. However, an AWS conversion compresses the session policies
2469 // into a packed binary format that has a separate limit. This is the enforced
2470 // limit. The PackedPolicySize response element indicates by percentage how
2471 // close the policy is to the upper size limit.
2472 PolicyArns []*PolicyDescriptorType `type:"list"`
2184} 2473}
2185 2474
2186// String returns the string representation 2475// String returns the string representation
@@ -2208,6 +2497,16 @@ func (s *GetFederationTokenInput) Validate() error {
2208 if s.Policy != nil && len(*s.Policy) < 1 { 2497 if s.Policy != nil && len(*s.Policy) < 1 {
2209 invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) 2498 invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
2210 } 2499 }
2500 if s.PolicyArns != nil {
2501 for i, v := range s.PolicyArns {
2502 if v == nil {
2503 continue
2504 }
2505 if err := v.Validate(); err != nil {
2506 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
2507 }
2508 }
2509 }
2211 2510
2212 if invalidParams.Len() > 0 { 2511 if invalidParams.Len() > 0 {
2213 return invalidParams 2512 return invalidParams
@@ -2233,6 +2532,12 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
2233 return s 2532 return s
2234} 2533}
2235 2534
2535// SetPolicyArns sets the PolicyArns field's value.
2536func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput {
2537 s.PolicyArns = v
2538 return s
2539}
2540
2236// Contains the response to a successful GetFederationToken request, including 2541// Contains the response to a successful GetFederationToken request, including
2237// temporary AWS credentials that can be used to make AWS requests. 2542// temporary AWS credentials that can be used to make AWS requests.
2238type GetFederationTokenOutput struct { 2543type GetFederationTokenOutput struct {
@@ -2241,10 +2546,8 @@ type GetFederationTokenOutput struct {
2241 // The temporary security credentials, which include an access key ID, a secret 2546 // The temporary security credentials, which include an access key ID, a secret
2242 // access key, and a security (or session) token. 2547 // access key, and a security (or session) token.
2243 // 2548 //
2244 // Note: The size of the security token that STS APIs return is not fixed. We 2549 // The size of the security token that STS API operations return is not fixed.
2245 // strongly recommend that you make no assumptions about the maximum size. As 2550 // We strongly recommend that you make no assumptions about the maximum size.
2246 // of this writing, the typical size is less than 4096 bytes, but that can vary.
2247 // Also, future updates to AWS might require larger sizes.
2248 Credentials *Credentials `type:"structure"` 2551 Credentials *Credentials `type:"structure"`
2249 2552
2250 // Identifiers for the federated user associated with the credentials (such 2553 // Identifiers for the federated user associated with the credentials (such
@@ -2291,11 +2594,11 @@ type GetSessionTokenInput struct {
2291 _ struct{} `type:"structure"` 2594 _ struct{} `type:"structure"`
2292 2595
2293 // The duration, in seconds, that the credentials should remain valid. Acceptable 2596 // The duration, in seconds, that the credentials should remain valid. Acceptable
2294 // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 2597 // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600
2295 // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions 2598 // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions
2296 // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). 2599 // for AWS account owners are restricted to a maximum of 3,600 seconds (one
2297 // If the duration is longer than one hour, the session for AWS account owners 2600 // hour). If the duration is longer than one hour, the session for AWS account
2298 // defaults to one hour. 2601 // owners defaults to one hour.
2299 DurationSeconds *int64 `min:"900" type:"integer"` 2602 DurationSeconds *int64 `min:"900" type:"integer"`
2300 2603
2301 // The identification number of the MFA device that is associated with the IAM 2604 // The identification number of the MFA device that is associated with the IAM
@@ -2306,16 +2609,16 @@ type GetSessionTokenInput struct {
2306 // You can find the device for an IAM user by going to the AWS Management Console 2609 // You can find the device for an IAM user by going to the AWS Management Console
2307 // and viewing the user's security credentials. 2610 // and viewing the user's security credentials.
2308 // 2611 //
2309 // The regex used to validated this parameter is a string of characters consisting 2612 // The regex used to validate this parameter is a string of characters consisting
2310 // of upper- and lower-case alphanumeric characters with no spaces. You can 2613 // of upper- and lower-case alphanumeric characters with no spaces. You can
2311 // also include underscores or any of the following characters: =,.@:/- 2614 // also include underscores or any of the following characters: =,.@:/-
2312 SerialNumber *string `min:"9" type:"string"` 2615 SerialNumber *string `min:"9" type:"string"`
2313 2616
2314 // The value provided by the MFA device, if MFA is required. If any policy requires 2617 // The value provided by the MFA device, if MFA is required. If any policy requires
2315 // the IAM user to submit an MFA code, specify this value. If MFA authentication 2618 // the IAM user to submit an MFA code, specify this value. If MFA authentication
2316 // is required, and the user does not provide a code when requesting a set of 2619 // is required, the user must provide a code when requesting a set of temporary
2317 // temporary security credentials, the user will receive an "access denied" 2620 // security credentials. A user who fails to provide the code receives an "access
2318 // response when requesting resources that require MFA authentication. 2621 // denied" response when requesting resources that require MFA authentication.
2319 // 2622 //
2320 // The format for this parameter, as described by its regex pattern, is a sequence 2623 // The format for this parameter, as described by its regex pattern, is a sequence
2321 // of six numeric digits. 2624 // of six numeric digits.
@@ -2377,10 +2680,8 @@ type GetSessionTokenOutput struct {
2377 // The temporary security credentials, which include an access key ID, a secret 2680 // The temporary security credentials, which include an access key ID, a secret
2378 // access key, and a security (or session) token. 2681 // access key, and a security (or session) token.
2379 // 2682 //
2380 // Note: The size of the security token that STS APIs return is not fixed. We 2683 // The size of the security token that STS API operations return is not fixed.
2381 // strongly recommend that you make no assumptions about the maximum size. As 2684 // We strongly recommend that you make no assumptions about the maximum size.
2382 // of this writing, the typical size is less than 4096 bytes, but that can vary.
2383 // Also, future updates to AWS might require larger sizes.
2384 Credentials *Credentials `type:"structure"` 2685 Credentials *Credentials `type:"structure"`
2385} 2686}
2386 2687
@@ -2399,3 +2700,44 @@ func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenO
2399 s.Credentials = v 2700 s.Credentials = v
2400 return s 2701 return s
2401} 2702}
2703
2704// A reference to the IAM managed policy that is passed as a session policy
2705// for a role session or a federated user session.
2706type PolicyDescriptorType struct {
2707 _ struct{} `type:"structure"`
2708
2709 // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session
2710 // policy for the role. For more information about ARNs, see Amazon Resource
2711 // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
2712 // in the AWS General Reference.
2713 Arn *string `locationName:"arn" min:"20" type:"string"`
2714}
2715
2716// String returns the string representation
2717func (s PolicyDescriptorType) String() string {
2718 return awsutil.Prettify(s)
2719}
2720
2721// GoString returns the string representation
2722func (s PolicyDescriptorType) GoString() string {
2723 return s.String()
2724}
2725
2726// Validate inspects the fields of the type to determine if they are valid.
2727func (s *PolicyDescriptorType) Validate() error {
2728 invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"}
2729 if s.Arn != nil && len(*s.Arn) < 20 {
2730 invalidParams.Add(request.NewErrParamMinLen("Arn", 20))
2731 }
2732
2733 if invalidParams.Len() > 0 {
2734 return invalidParams
2735 }
2736 return nil
2737}
2738
2739// SetArn sets the Arn field's value.
2740func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
2741 s.Arn = &v
2742 return s
2743}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
index ef681ab..fcb720d 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -7,22 +7,14 @@
7// request temporary, limited-privilege credentials for AWS Identity and Access 7// request temporary, limited-privilege credentials for AWS Identity and Access
8// Management (IAM) users or for users that you authenticate (federated users). 8// Management (IAM) users or for users that you authenticate (federated users).
9// This guide provides descriptions of the STS API. For more detailed information 9// This guide provides descriptions of the STS API. For more detailed information
10// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). 10// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
11//
12// As an alternative to using the API, you can use one of the AWS SDKs, which
13// consist of libraries and sample code for various programming languages and
14// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
15// way to create programmatic access to STS. For example, the SDKs take care
16// of cryptographically signing requests, managing errors, and retrying requests
17// automatically. For information about the AWS SDKs, including how to download
18// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
19// 11//
20// For information about setting up signatures and authorization through the 12// For information about setting up signatures and authorization through the
21// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) 13// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
22// in the AWS General Reference. For general information about the Query API, 14// in the AWS General Reference. For general information about the Query API,
23// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) 15// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
24// in Using IAM. For information about using security tokens with other AWS 16// in Using IAM. For information about using security tokens with other AWS
25// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) 17// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
26// in the IAM User Guide. 18// in the IAM User Guide.
27// 19//
28// If you're new to AWS and need additional technical information about a specific 20// If you're new to AWS and need additional technical information about a specific
@@ -31,14 +23,38 @@
31// 23//
32// Endpoints 24// Endpoints
33// 25//
34// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com 26// By default, AWS Security Token Service (STS) is available as a global service,
35// that maps to the US East (N. Virginia) region. Additional regions are available 27// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com.
36// and are activated by default. For more information, see Activating and Deactivating 28// Global requests map to the US East (N. Virginia) region. AWS recommends using
37// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 29// Regional AWS STS endpoints instead of the global endpoint to reduce latency,
30// build in redundancy, and increase session token validity. For more information,
31// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
32// in the IAM User Guide.
33//
34// Most AWS Regions are enabled for operations in all AWS services by default.
35// Those Regions are automatically activated for use with AWS STS. Some Regions,
36// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more
37// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html)
38// in the AWS General Reference. When you enable these AWS Regions, they are
39// automatically activated for use with AWS STS. You cannot activate the STS
40// endpoint for a Region that is disabled. Tokens that are valid in all AWS
41// Regions are longer than tokens that are valid in Regions that are enabled
42// by default. Changing this setting might affect existing systems where you
43// temporarily store tokens. For more information, see Managing Global Endpoint
44// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens)
38// in the IAM User Guide. 45// in the IAM User Guide.
39// 46//
40// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) 47// After you activate a Region for use with AWS STS, you can direct AWS STS
41// in the AWS General Reference. 48// API calls to that Region. AWS STS recommends that you provide both the Region
49// and endpoint when you make calls to a Regional endpoint. You can provide
50// the Region alone for manually enabled Regions, such as Asia Pacific (Hong
51// Kong). In this case, the calls are directed to the STS Regional endpoint.
52// However, if you provide the Region alone for Regions enabled by default,
53// the calls are directed to the global endpoint of https://sts.amazonaws.com.
54//
55// To view the list of AWS STS endpoints and whether they are active by default,
56// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code)
57// in the IAM User Guide.
42// 58//
43// Recording API requests 59// Recording API requests
44// 60//
@@ -46,8 +62,28 @@
46// your AWS account and delivers log files to an Amazon S3 bucket. By using 62// your AWS account and delivers log files to an Amazon S3 bucket. By using
47// information collected by CloudTrail, you can determine what requests were 63// information collected by CloudTrail, you can determine what requests were
48// successfully made to STS, who made the request, when it was made, and so 64// successfully made to STS, who made the request, when it was made, and so
49// on. To learn more about CloudTrail, including how to turn it on and find 65// on.
50// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). 66//
67// If you activate AWS STS endpoints in Regions other than the default global
68// endpoint, then you must also turn on CloudTrail logging in those Regions.
69// This is necessary to record any AWS STS API calls that are made in those
70// Regions. For more information, see Turning On CloudTrail in Additional Regions
71// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html)
72// in the AWS CloudTrail User Guide.
73//
74// AWS Security Token Service (STS) is a global service with a single endpoint
75// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls
76// to a global service. However, because this endpoint is physically located
77// in the US East (N. Virginia) Region, your logs list us-east-1 as the event
78// Region. CloudTrail does not write these logs to the US East (Ohio) Region
79// unless you choose to include global service logs in that Region. CloudTrail
80// writes calls to all Regional endpoints to their respective Regions. For example,
81// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio)
82// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU
83// (Frankfurt) Region.
84//
85// To learn more about CloudTrail, including how to turn it on and find your
86// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
51// 87//
52// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. 88// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
53// 89//
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
index e24884e..41ea09c 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
@@ -67,7 +67,7 @@ const (
67 // STS is not activated in the requested region for the account that is being 67 // STS is not activated in the requested region for the account that is being
68 // asked to generate credentials. The account administrator must use the IAM 68 // asked to generate credentials. The account administrator must use the IAM
69 // console to activate STS in that region. For more information, see Activating 69 // console to activate STS in that region. For more information, see Activating
70 // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 70 // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
71 // in the IAM User Guide. 71 // in the IAM User Guide.
72 ErrCodeRegionDisabledException = "RegionDisabledException" 72 ErrCodeRegionDisabledException = "RegionDisabledException"
73) 73)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
new file mode 100644
index 0000000..e2e1d6e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
@@ -0,0 +1,96 @@
1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client
4// for testing your code.
5//
6// It is important to note that this interface will have breaking changes
7// when the service model is updated and adds new API operations, paginators,
8// and waiters.
9package stsiface
10
11import (
12 "github.com/aws/aws-sdk-go/aws"
13 "github.com/aws/aws-sdk-go/aws/request"
14 "github.com/aws/aws-sdk-go/service/sts"
15)
16
17// STSAPI provides an interface to enable mocking the
18// sts.STS service client's API operation,
19// paginators, and waiters. This make unit testing your code that calls out
20// to the SDK's service client's calls easier.
21//
22// The best way to use this interface is so the SDK's service client's calls
23// can be stubbed out for unit testing your code with the SDK without needing
24// to inject custom request handlers into the SDK's request pipeline.
25//
26// // myFunc uses an SDK service client to make a request to
27// // AWS Security Token Service.
28// func myFunc(svc stsiface.STSAPI) bool {
29// // Make svc.AssumeRole request
30// }
31//
32// func main() {
33// sess := session.New()
34// svc := sts.New(sess)
35//
36// myFunc(svc)
37// }
38//
39// In your _test.go file:
40//
41// // Define a mock struct to be used in your unit tests of myFunc.
42// type mockSTSClient struct {
43// stsiface.STSAPI
44// }
45// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
46// // mock response/functionality
47// }
48//
49// func TestMyFunc(t *testing.T) {
50// // Setup Test
51// mockSvc := &mockSTSClient{}
52//
53// myfunc(mockSvc)
54//
55// // Verify myFunc's functionality
56// }
57//
58// It is important to note that this interface will have breaking changes
59// when the service model is updated and adds new API operations, paginators,
60// and waiters. Its suggested to use the pattern above for testing, or using
61// tooling to generate mocks to satisfy the interfaces.
62type STSAPI interface {
63 AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
64 AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error)
65 AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput)
66
67 AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error)
68 AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error)
69 AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput)
70
71 AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error)
72 AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error)
73 AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput)
74
75 DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error)
76 DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error)
77 DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput)
78
79 GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error)
80 GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error)
81 GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput)
82
83 GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error)
84 GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error)
85 GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput)
86
87 GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error)
88 GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error)
89 GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput)
90
91 GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error)
92 GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error)
93 GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput)
94}
95
96var _ STSAPI = (*sts.STS)(nil)
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
index 7e215f2..2133562 100644
--- a/vendor/github.com/google/go-cmp/cmp/compare.go
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -29,26 +29,17 @@ package cmp
29import ( 29import (
30 "fmt" 30 "fmt"
31 "reflect" 31 "reflect"
32 "strings"
32 33
33 "github.com/google/go-cmp/cmp/internal/diff" 34 "github.com/google/go-cmp/cmp/internal/diff"
35 "github.com/google/go-cmp/cmp/internal/flags"
34 "github.com/google/go-cmp/cmp/internal/function" 36 "github.com/google/go-cmp/cmp/internal/function"
35 "github.com/google/go-cmp/cmp/internal/value" 37 "github.com/google/go-cmp/cmp/internal/value"
36) 38)
37 39
38// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to
39// the reflection package's inability to retrieve such entries. Equal will panic
40// anytime it comes across a NaN key, but this behavior may change.
41//
42// See https://golang.org/issue/11104 for more details.
43
44var nothing = reflect.Value{}
45
46// Equal reports whether x and y are equal by recursively applying the 40// Equal reports whether x and y are equal by recursively applying the
47// following rules in the given order to x and y and all of their sub-values: 41// following rules in the given order to x and y and all of their sub-values:
48// 42//
49// • If two values are not of the same type, then they are never equal
50// and the overall result is false.
51//
52// • Let S be the set of all Ignore, Transformer, and Comparer options that 43// • Let S be the set of all Ignore, Transformer, and Comparer options that
53// remain after applying all path filters, value filters, and type filters. 44// remain after applying all path filters, value filters, and type filters.
54// If at least one Ignore exists in S, then the comparison is ignored. 45// If at least one Ignore exists in S, then the comparison is ignored.
@@ -61,43 +52,79 @@ var nothing = reflect.Value{}
61// 52//
62// • If the values have an Equal method of the form "(T) Equal(T) bool" or 53// • If the values have an Equal method of the form "(T) Equal(T) bool" or
63// "(T) Equal(I) bool" where T is assignable to I, then use the result of 54// "(T) Equal(I) bool" where T is assignable to I, then use the result of
64// x.Equal(y) even if x or y is nil. 55// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
65// Otherwise, no such method exists and evaluation proceeds to the next rule. 56// evaluation proceeds to the next rule.
66// 57//
67// • Lastly, try to compare x and y based on their basic kinds. 58// • Lastly, try to compare x and y based on their basic kinds.
68// Simple kinds like booleans, integers, floats, complex numbers, strings, and 59// Simple kinds like booleans, integers, floats, complex numbers, strings, and
69// channels are compared using the equivalent of the == operator in Go. 60// channels are compared using the equivalent of the == operator in Go.
70// Functions are only equal if they are both nil, otherwise they are unequal. 61// Functions are only equal if they are both nil, otherwise they are unequal.
71// Pointers are equal if the underlying values they point to are also equal.
72// Interfaces are equal if their underlying concrete values are also equal.
73// 62//
74// Structs are equal if all of their fields are equal. If a struct contains 63// Structs are equal if recursively calling Equal on all fields report equal.
75// unexported fields, Equal panics unless the AllowUnexported option is used or 64// If a struct contains unexported fields, Equal panics unless an Ignore option
76// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field. 65// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported
66// option explicitly permits comparing the unexported field.
67//
68// Slices are equal if they are both nil or both non-nil, where recursively
69// calling Equal on all non-ignored slice or array elements report equal.
70// Empty non-nil slices and nil slices are not equal; to equate empty slices,
71// consider using cmpopts.EquateEmpty.
77// 72//
78// Arrays, slices, and maps are equal if they are both nil or both non-nil 73// Maps are equal if they are both nil or both non-nil, where recursively
79// with the same length and the elements at each index or key are equal. 74// calling Equal on all non-ignored map entries report equal.
80// Note that a non-nil empty slice and a nil slice are not equal.
81// To equate empty slices and maps, consider using cmpopts.EquateEmpty.
82// Map keys are equal according to the == operator. 75// Map keys are equal according to the == operator.
83// To use custom comparisons for map keys, consider using cmpopts.SortMaps. 76// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
77// Empty non-nil maps and nil maps are not equal; to equate empty maps,
78// consider using cmpopts.EquateEmpty.
79//
80// Pointers and interfaces are equal if they are both nil or both non-nil,
81// where they have the same underlying concrete type and recursively
82// calling Equal on the underlying values reports equal.
84func Equal(x, y interface{}, opts ...Option) bool { 83func Equal(x, y interface{}, opts ...Option) bool {
84 vx := reflect.ValueOf(x)
85 vy := reflect.ValueOf(y)
86
87 // If the inputs are different types, auto-wrap them in an empty interface
88 // so that they have the same parent type.
89 var t reflect.Type
90 if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
91 t = reflect.TypeOf((*interface{})(nil)).Elem()
92 if vx.IsValid() {
93 vvx := reflect.New(t).Elem()
94 vvx.Set(vx)
95 vx = vvx
96 }
97 if vy.IsValid() {
98 vvy := reflect.New(t).Elem()
99 vvy.Set(vy)
100 vy = vvy
101 }
102 } else {
103 t = vx.Type()
104 }
105
85 s := newState(opts) 106 s := newState(opts)
86 s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y)) 107 s.compareAny(&pathStep{t, vx, vy})
87 return s.result.Equal() 108 return s.result.Equal()
88} 109}
89 110
90// Diff returns a human-readable report of the differences between two values. 111// Diff returns a human-readable report of the differences between two values.
91// It returns an empty string if and only if Equal returns true for the same 112// It returns an empty string if and only if Equal returns true for the same
92// input values and options. The output string will use the "-" symbol to 113// input values and options.
93// indicate elements removed from x, and the "+" symbol to indicate elements 114//
94// added to y. 115// The output is displayed as a literal in pseudo-Go syntax.
116// At the start of each line, a "-" prefix indicates an element removed from x,
117// a "+" prefix to indicates an element added to y, and the lack of a prefix
118// indicates an element common to both x and y. If possible, the output
119// uses fmt.Stringer.String or error.Error methods to produce more humanly
120// readable outputs. In such cases, the string is prefixed with either an
121// 's' or 'e' character, respectively, to indicate that the method was called.
95// 122//
96// Do not depend on this output being stable. 123// Do not depend on this output being stable. If you need the ability to
124// programmatically interpret the difference, consider using a custom Reporter.
97func Diff(x, y interface{}, opts ...Option) string { 125func Diff(x, y interface{}, opts ...Option) string {
98 r := new(defaultReporter) 126 r := new(defaultReporter)
99 opts = Options{Options(opts), r} 127 eq := Equal(x, y, Options(opts), Reporter(r))
100 eq := Equal(x, y, opts...)
101 d := r.String() 128 d := r.String()
102 if (d == "") != eq { 129 if (d == "") != eq {
103 panic("inconsistent difference and equality results") 130 panic("inconsistent difference and equality results")
@@ -108,9 +135,13 @@ func Diff(x, y interface{}, opts ...Option) string {
108type state struct { 135type state struct {
109 // These fields represent the "comparison state". 136 // These fields represent the "comparison state".
110 // Calling statelessCompare must not result in observable changes to these. 137 // Calling statelessCompare must not result in observable changes to these.
111 result diff.Result // The current result of comparison 138 result diff.Result // The current result of comparison
112 curPath Path // The current path in the value tree 139 curPath Path // The current path in the value tree
113 reporter reporter // Optional reporter used for difference formatting 140 reporters []reporter // Optional reporters
141
142 // recChecker checks for infinite cycles applying the same set of
143 // transformers upon the output of itself.
144 recChecker recChecker
114 145
115 // dynChecker triggers pseudo-random checks for option correctness. 146 // dynChecker triggers pseudo-random checks for option correctness.
116 // It is safe for statelessCompare to mutate this value. 147 // It is safe for statelessCompare to mutate this value.
@@ -122,10 +153,9 @@ type state struct {
122} 153}
123 154
124func newState(opts []Option) *state { 155func newState(opts []Option) *state {
125 s := new(state) 156 // Always ensure a validator option exists to validate the inputs.
126 for _, opt := range opts { 157 s := &state{opts: Options{validator{}}}
127 s.processOption(opt) 158 s.processOption(Options(opts))
128 }
129 return s 159 return s
130} 160}
131 161
@@ -152,10 +182,7 @@ func (s *state) processOption(opt Option) {
152 s.exporters[t] = true 182 s.exporters[t] = true
153 } 183 }
154 case reporter: 184 case reporter:
155 if s.reporter != nil { 185 s.reporters = append(s.reporters, opt)
156 panic("difference reporter already registered")
157 }
158 s.reporter = opt
159 default: 186 default:
160 panic(fmt.Sprintf("unknown option %T", opt)) 187 panic(fmt.Sprintf("unknown option %T", opt))
161 } 188 }
@@ -164,153 +191,88 @@ func (s *state) processOption(opt Option) {
164// statelessCompare compares two values and returns the result. 191// statelessCompare compares two values and returns the result.
165// This function is stateless in that it does not alter the current result, 192// This function is stateless in that it does not alter the current result,
166// or output to any registered reporters. 193// or output to any registered reporters.
167func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result { 194func (s *state) statelessCompare(step PathStep) diff.Result {
168 // We do not save and restore the curPath because all of the compareX 195 // We do not save and restore the curPath because all of the compareX
169 // methods should properly push and pop from the path. 196 // methods should properly push and pop from the path.
170 // It is an implementation bug if the contents of curPath differs from 197 // It is an implementation bug if the contents of curPath differs from
171 // when calling this function to when returning from it. 198 // when calling this function to when returning from it.
172 199
173 oldResult, oldReporter := s.result, s.reporter 200 oldResult, oldReporters := s.result, s.reporters
174 s.result = diff.Result{} // Reset result 201 s.result = diff.Result{} // Reset result
175 s.reporter = nil // Remove reporter to avoid spurious printouts 202 s.reporters = nil // Remove reporters to avoid spurious printouts
176 s.compareAny(vx, vy) 203 s.compareAny(step)
177 res := s.result 204 res := s.result
178 s.result, s.reporter = oldResult, oldReporter 205 s.result, s.reporters = oldResult, oldReporters
179 return res 206 return res
180} 207}
181 208
182func (s *state) compareAny(vx, vy reflect.Value) { 209func (s *state) compareAny(step PathStep) {
183 // TODO: Support cyclic data structures. 210 // Update the path stack.
184 211 s.curPath.push(step)
185 // Rule 0: Differing types are never equal. 212 defer s.curPath.pop()
186 if !vx.IsValid() || !vy.IsValid() { 213 for _, r := range s.reporters {
187 s.report(vx.IsValid() == vy.IsValid(), vx, vy) 214 r.PushStep(step)
188 return 215 defer r.PopStep()
189 }
190 if vx.Type() != vy.Type() {
191 s.report(false, vx, vy) // Possible for path to be empty
192 return
193 }
194 t := vx.Type()
195 if len(s.curPath) == 0 {
196 s.curPath.push(&pathStep{typ: t})
197 defer s.curPath.pop()
198 } 216 }
199 vx, vy = s.tryExporting(vx, vy) 217 s.recChecker.Check(s.curPath)
218
219 // Obtain the current type and values.
220 t := step.Type()
221 vx, vy := step.Values()
200 222
201 // Rule 1: Check whether an option applies on this node in the value tree. 223 // Rule 1: Check whether an option applies on this node in the value tree.
202 if s.tryOptions(vx, vy, t) { 224 if s.tryOptions(t, vx, vy) {
203 return 225 return
204 } 226 }
205 227
206 // Rule 2: Check whether the type has a valid Equal method. 228 // Rule 2: Check whether the type has a valid Equal method.
207 if s.tryMethod(vx, vy, t) { 229 if s.tryMethod(t, vx, vy) {
208 return 230 return
209 } 231 }
210 232
211 // Rule 3: Recursively descend into each value's underlying kind. 233 // Rule 3: Compare based on the underlying kind.
212 switch t.Kind() { 234 switch t.Kind() {
213 case reflect.Bool: 235 case reflect.Bool:
214 s.report(vx.Bool() == vy.Bool(), vx, vy) 236 s.report(vx.Bool() == vy.Bool(), 0)
215 return
216 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 237 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
217 s.report(vx.Int() == vy.Int(), vx, vy) 238 s.report(vx.Int() == vy.Int(), 0)
218 return
219 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 239 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
220 s.report(vx.Uint() == vy.Uint(), vx, vy) 240 s.report(vx.Uint() == vy.Uint(), 0)
221 return
222 case reflect.Float32, reflect.Float64: 241 case reflect.Float32, reflect.Float64:
223 s.report(vx.Float() == vy.Float(), vx, vy) 242 s.report(vx.Float() == vy.Float(), 0)
224 return
225 case reflect.Complex64, reflect.Complex128: 243 case reflect.Complex64, reflect.Complex128:
226 s.report(vx.Complex() == vy.Complex(), vx, vy) 244 s.report(vx.Complex() == vy.Complex(), 0)
227 return
228 case reflect.String: 245 case reflect.String:
229 s.report(vx.String() == vy.String(), vx, vy) 246 s.report(vx.String() == vy.String(), 0)
230 return
231 case reflect.Chan, reflect.UnsafePointer: 247 case reflect.Chan, reflect.UnsafePointer:
232 s.report(vx.Pointer() == vy.Pointer(), vx, vy) 248 s.report(vx.Pointer() == vy.Pointer(), 0)
233 return
234 case reflect.Func: 249 case reflect.Func:
235 s.report(vx.IsNil() && vy.IsNil(), vx, vy) 250 s.report(vx.IsNil() && vy.IsNil(), 0)
236 return 251 case reflect.Struct:
252 s.compareStruct(t, vx, vy)
253 case reflect.Slice, reflect.Array:
254 s.compareSlice(t, vx, vy)
255 case reflect.Map:
256 s.compareMap(t, vx, vy)
237 case reflect.Ptr: 257 case reflect.Ptr:
238 if vx.IsNil() || vy.IsNil() { 258 s.comparePtr(t, vx, vy)
239 s.report(vx.IsNil() && vy.IsNil(), vx, vy)
240 return
241 }
242 s.curPath.push(&indirect{pathStep{t.Elem()}})
243 defer s.curPath.pop()
244 s.compareAny(vx.Elem(), vy.Elem())
245 return
246 case reflect.Interface: 259 case reflect.Interface:
247 if vx.IsNil() || vy.IsNil() { 260 s.compareInterface(t, vx, vy)
248 s.report(vx.IsNil() && vy.IsNil(), vx, vy)
249 return
250 }
251 if vx.Elem().Type() != vy.Elem().Type() {
252 s.report(false, vx.Elem(), vy.Elem())
253 return
254 }
255 s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}})
256 defer s.curPath.pop()
257 s.compareAny(vx.Elem(), vy.Elem())
258 return
259 case reflect.Slice:
260 if vx.IsNil() || vy.IsNil() {
261 s.report(vx.IsNil() && vy.IsNil(), vx, vy)
262 return
263 }
264 fallthrough
265 case reflect.Array:
266 s.compareArray(vx, vy, t)
267 return
268 case reflect.Map:
269 s.compareMap(vx, vy, t)
270 return
271 case reflect.Struct:
272 s.compareStruct(vx, vy, t)
273 return
274 default: 261 default:
275 panic(fmt.Sprintf("%v kind not handled", t.Kind())) 262 panic(fmt.Sprintf("%v kind not handled", t.Kind()))
276 } 263 }
277} 264}
278 265
279func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) { 266func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
280 if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported {
281 if sf.force {
282 // Use unsafe pointer arithmetic to get read-write access to an
283 // unexported field in the struct.
284 vx = unsafeRetrieveField(sf.pvx, sf.field)
285 vy = unsafeRetrieveField(sf.pvy, sf.field)
286 } else {
287 // We are not allowed to export the value, so invalidate them
288 // so that tryOptions can panic later if not explicitly ignored.
289 vx = nothing
290 vy = nothing
291 }
292 }
293 return vx, vy
294}
295
296func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool {
297 // If there were no FilterValues, we will not detect invalid inputs,
298 // so manually check for them and append invalid if necessary.
299 // We still evaluate the options since an ignore can override invalid.
300 opts := s.opts
301 if !vx.IsValid() || !vy.IsValid() {
302 opts = Options{opts, invalid{}}
303 }
304
305 // Evaluate all filters and apply the remaining options. 267 // Evaluate all filters and apply the remaining options.
306 if opt := opts.filter(s, vx, vy, t); opt != nil { 268 if opt := s.opts.filter(s, t, vx, vy); opt != nil {
307 opt.apply(s, vx, vy) 269 opt.apply(s, vx, vy)
308 return true 270 return true
309 } 271 }
310 return false 272 return false
311} 273}
312 274
313func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool { 275func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
314 // Check if this type even has an Equal method. 276 // Check if this type even has an Equal method.
315 m, ok := t.MethodByName("Equal") 277 m, ok := t.MethodByName("Equal")
316 if !ok || !function.IsType(m.Type, function.EqualAssignable) { 278 if !ok || !function.IsType(m.Type, function.EqualAssignable) {
@@ -318,11 +280,11 @@ func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool {
318 } 280 }
319 281
320 eq := s.callTTBFunc(m.Func, vx, vy) 282 eq := s.callTTBFunc(m.Func, vx, vy)
321 s.report(eq, vx, vy) 283 s.report(eq, reportByMethod)
322 return true 284 return true
323} 285}
324 286
325func (s *state) callTRFunc(f, v reflect.Value) reflect.Value { 287func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
326 v = sanitizeValue(v, f.Type().In(0)) 288 v = sanitizeValue(v, f.Type().In(0))
327 if !s.dynChecker.Next() { 289 if !s.dynChecker.Next() {
328 return f.Call([]reflect.Value{v})[0] 290 return f.Call([]reflect.Value{v})[0]
@@ -333,15 +295,15 @@ func (s *state) callTRFunc(f, v reflect.Value) reflect.Value {
333 // unsafe mutations to the input. 295 // unsafe mutations to the input.
334 c := make(chan reflect.Value) 296 c := make(chan reflect.Value)
335 go detectRaces(c, f, v) 297 go detectRaces(c, f, v)
298 got := <-c
336 want := f.Call([]reflect.Value{v})[0] 299 want := f.Call([]reflect.Value{v})[0]
337 if got := <-c; !s.statelessCompare(got, want).Equal() { 300 if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
338 // To avoid false-positives with non-reflexive equality operations, 301 // To avoid false-positives with non-reflexive equality operations,
339 // we sanity check whether a value is equal to itself. 302 // we sanity check whether a value is equal to itself.
340 if !s.statelessCompare(want, want).Equal() { 303 if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
341 return want 304 return want
342 } 305 }
343 fn := getFuncName(f.Pointer()) 306 panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
344 panic(fmt.Sprintf("non-deterministic function detected: %s", fn))
345 } 307 }
346 return want 308 return want
347} 309}
@@ -359,10 +321,10 @@ func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
359 // unsafe mutations to the input. 321 // unsafe mutations to the input.
360 c := make(chan reflect.Value) 322 c := make(chan reflect.Value)
361 go detectRaces(c, f, y, x) 323 go detectRaces(c, f, y, x)
324 got := <-c
362 want := f.Call([]reflect.Value{x, y})[0].Bool() 325 want := f.Call([]reflect.Value{x, y})[0].Bool()
363 if got := <-c; !got.IsValid() || got.Bool() != want { 326 if !got.IsValid() || got.Bool() != want {
364 fn := getFuncName(f.Pointer()) 327 panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
365 panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn))
366 } 328 }
367 return want 329 return want
368} 330}
@@ -380,140 +342,241 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
380// assuming that T is assignable to R. 342// assuming that T is assignable to R.
381// Otherwise, it returns the input value as is. 343// Otherwise, it returns the input value as is.
382func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { 344func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
383 // TODO(dsnet): Remove this hacky workaround. 345 // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143).
384 // See https://golang.org/issue/22143 346 if !flags.AtLeastGo110 {
385 if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { 347 if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
386 return reflect.New(t).Elem() 348 return reflect.New(t).Elem()
349 }
387 } 350 }
388 return v 351 return v
389} 352}
390 353
391func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) { 354func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
392 step := &sliceIndex{pathStep{t.Elem()}, 0, 0} 355 var vax, vay reflect.Value // Addressable versions of vx and vy
393 s.curPath.push(step)
394 356
395 // Compute an edit-script for slices vx and vy. 357 step := StructField{&structField{}}
396 es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { 358 for i := 0; i < t.NumField(); i++ {
397 step.xkey, step.ykey = ix, iy 359 step.typ = t.Field(i).Type
398 return s.statelessCompare(vx.Index(ix), vy.Index(iy)) 360 step.vx = vx.Field(i)
399 }) 361 step.vy = vy.Field(i)
362 step.name = t.Field(i).Name
363 step.idx = i
364 step.unexported = !isExported(step.name)
365 if step.unexported {
366 if step.name == "_" {
367 continue
368 }
369 // Defer checking of unexported fields until later to give an
370 // Ignore a chance to ignore the field.
371 if !vax.IsValid() || !vay.IsValid() {
372 // For retrieveUnexportedField to work, the parent struct must
373 // be addressable. Create a new copy of the values if
374 // necessary to make them addressable.
375 vax = makeAddressable(vx)
376 vay = makeAddressable(vy)
377 }
378 step.mayForce = s.exporters[t]
379 step.pvx = vax
380 step.pvy = vay
381 step.field = t.Field(i)
382 }
383 s.compareAny(step)
384 }
385}
400 386
401 // Report the entire slice as is if the arrays are of primitive kind, 387func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
402 // and the arrays are different enough. 388 isSlice := t.Kind() == reflect.Slice
403 isPrimitive := false 389 if isSlice && (vx.IsNil() || vy.IsNil()) {
404 switch t.Elem().Kind() { 390 s.report(vx.IsNil() && vy.IsNil(), 0)
405 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
406 reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
407 reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
408 isPrimitive = true
409 }
410 if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 {
411 s.curPath.pop() // Pop first since we are reporting the whole slice
412 s.report(false, vx, vy)
413 return 391 return
414 } 392 }
415 393
416 // Replay the edit-script. 394 // TODO: Support cyclic data structures.
395
396 step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}}
397 withIndexes := func(ix, iy int) SliceIndex {
398 if ix >= 0 {
399 step.vx, step.xkey = vx.Index(ix), ix
400 } else {
401 step.vx, step.xkey = reflect.Value{}, -1
402 }
403 if iy >= 0 {
404 step.vy, step.ykey = vy.Index(iy), iy
405 } else {
406 step.vy, step.ykey = reflect.Value{}, -1
407 }
408 return step
409 }
410
411 // Ignore options are able to ignore missing elements in a slice.
412 // However, detecting these reliably requires an optimal differencing
413 // algorithm, for which diff.Difference is not.
414 //
415 // Instead, we first iterate through both slices to detect which elements
416 // would be ignored if standing alone. The index of non-discarded elements
417 // are stored in a separate slice, which diffing is then performed on.
418 var indexesX, indexesY []int
419 var ignoredX, ignoredY []bool
420 for ix := 0; ix < vx.Len(); ix++ {
421 ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
422 if !ignored {
423 indexesX = append(indexesX, ix)
424 }
425 ignoredX = append(ignoredX, ignored)
426 }
427 for iy := 0; iy < vy.Len(); iy++ {
428 ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
429 if !ignored {
430 indexesY = append(indexesY, iy)
431 }
432 ignoredY = append(ignoredY, ignored)
433 }
434
435 // Compute an edit-script for slices vx and vy (excluding ignored elements).
436 edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
437 return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
438 })
439
440 // Replay the ignore-scripts and the edit-script.
417 var ix, iy int 441 var ix, iy int
418 for _, e := range es { 442 for ix < vx.Len() || iy < vy.Len() {
443 var e diff.EditType
444 switch {
445 case ix < len(ignoredX) && ignoredX[ix]:
446 e = diff.UniqueX
447 case iy < len(ignoredY) && ignoredY[iy]:
448 e = diff.UniqueY
449 default:
450 e, edits = edits[0], edits[1:]
451 }
419 switch e { 452 switch e {
420 case diff.UniqueX: 453 case diff.UniqueX:
421 step.xkey, step.ykey = ix, -1 454 s.compareAny(withIndexes(ix, -1))
422 s.report(false, vx.Index(ix), nothing)
423 ix++ 455 ix++
424 case diff.UniqueY: 456 case diff.UniqueY:
425 step.xkey, step.ykey = -1, iy 457 s.compareAny(withIndexes(-1, iy))
426 s.report(false, nothing, vy.Index(iy))
427 iy++ 458 iy++
428 default: 459 default:
429 step.xkey, step.ykey = ix, iy 460 s.compareAny(withIndexes(ix, iy))
430 if e == diff.Identity {
431 s.report(true, vx.Index(ix), vy.Index(iy))
432 } else {
433 s.compareAny(vx.Index(ix), vy.Index(iy))
434 }
435 ix++ 461 ix++
436 iy++ 462 iy++
437 } 463 }
438 } 464 }
439 s.curPath.pop()
440 return
441} 465}
442 466
443func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) { 467func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
444 if vx.IsNil() || vy.IsNil() { 468 if vx.IsNil() || vy.IsNil() {
445 s.report(vx.IsNil() && vy.IsNil(), vx, vy) 469 s.report(vx.IsNil() && vy.IsNil(), 0)
446 return 470 return
447 } 471 }
448 472
473 // TODO: Support cyclic data structures.
474
449 // We combine and sort the two map keys so that we can perform the 475 // We combine and sort the two map keys so that we can perform the
450 // comparisons in a deterministic order. 476 // comparisons in a deterministic order.
451 step := &mapIndex{pathStep: pathStep{t.Elem()}} 477 step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
452 s.curPath.push(step)
453 defer s.curPath.pop()
454 for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { 478 for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
479 step.vx = vx.MapIndex(k)
480 step.vy = vy.MapIndex(k)
455 step.key = k 481 step.key = k
456 vvx := vx.MapIndex(k) 482 if !step.vx.IsValid() && !step.vy.IsValid() {
457 vvy := vy.MapIndex(k) 483 // It is possible for both vx and vy to be invalid if the
458 switch { 484 // key contained a NaN value in it.
459 case vvx.IsValid() && vvy.IsValid(): 485 //
460 s.compareAny(vvx, vvy) 486 // Even with the ability to retrieve NaN keys in Go 1.12,
461 case vvx.IsValid() && !vvy.IsValid(): 487 // there still isn't a sensible way to compare the values since
462 s.report(false, vvx, nothing) 488 // a NaN key may map to multiple unordered values.
463 case !vvx.IsValid() && vvy.IsValid(): 489 // The most reasonable way to compare NaNs would be to compare the
464 s.report(false, nothing, vvy) 490 // set of values. However, this is impossible to do efficiently
465 default: 491 // since set equality is provably an O(n^2) operation given only
466 // It is possible for both vvx and vvy to be invalid if the 492 // an Equal function. If we had a Less function or Hash function,
467 // key contained a NaN value in it. There is no way in 493 // this could be done in O(n*log(n)) or O(n), respectively.
468 // reflection to be able to retrieve these values. 494 //
469 // See https://golang.org/issue/11104 495 // Rather than adding complex logic to deal with NaNs, make it
470 panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath)) 496 // the user's responsibility to compare such obscure maps.
497 const help = "consider providing a Comparer to compare the map"
498 panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
471 } 499 }
500 s.compareAny(step)
472 } 501 }
473} 502}
474 503
475func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) { 504func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
476 var vax, vay reflect.Value // Addressable versions of vx and vy 505 if vx.IsNil() || vy.IsNil() {
506 s.report(vx.IsNil() && vy.IsNil(), 0)
507 return
508 }
477 509
478 step := &structField{} 510 // TODO: Support cyclic data structures.
479 s.curPath.push(step) 511
480 defer s.curPath.pop() 512 vx, vy = vx.Elem(), vy.Elem()
481 for i := 0; i < t.NumField(); i++ { 513 s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
482 vvx := vx.Field(i) 514}
483 vvy := vy.Field(i) 515
484 step.typ = t.Field(i).Type 516func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
485 step.name = t.Field(i).Name 517 if vx.IsNil() || vy.IsNil() {
486 step.idx = i 518 s.report(vx.IsNil() && vy.IsNil(), 0)
487 step.unexported = !isExported(step.name) 519 return
488 if step.unexported { 520 }
489 // Defer checking of unexported fields until later to give an 521 vx, vy = vx.Elem(), vy.Elem()
490 // Ignore a chance to ignore the field. 522 if vx.Type() != vy.Type() {
491 if !vax.IsValid() || !vay.IsValid() { 523 s.report(false, 0)
492 // For unsafeRetrieveField to work, the parent struct must 524 return
493 // be addressable. Create a new copy of the values if 525 }
494 // necessary to make them addressable. 526 s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
495 vax = makeAddressable(vx) 527}
496 vay = makeAddressable(vy) 528
497 } 529func (s *state) report(eq bool, rf resultFlags) {
498 step.force = s.exporters[t] 530 if rf&reportByIgnore == 0 {
499 step.pvx = vax 531 if eq {
500 step.pvy = vay 532 s.result.NumSame++
501 step.field = t.Field(i) 533 rf |= reportEqual
534 } else {
535 s.result.NumDiff++
536 rf |= reportUnequal
502 } 537 }
503 s.compareAny(vvx, vvy) 538 }
539 for _, r := range s.reporters {
540 r.Report(Result{flags: rf})
504 } 541 }
505} 542}
506 543
507// report records the result of a single comparison. 544// recChecker tracks the state needed to periodically perform checks that
508// It also calls Report if any reporter is registered. 545// user provided transformers are not stuck in an infinitely recursive cycle.
509func (s *state) report(eq bool, vx, vy reflect.Value) { 546type recChecker struct{ next int }
510 if eq { 547
511 s.result.NSame++ 548// Check scans the Path for any recursive transformers and panics when any
512 } else { 549// recursive transformers are detected. Note that the presence of a
513 s.result.NDiff++ 550// recursive Transformer does not necessarily imply an infinite cycle.
551// As such, this check only activates after some minimal number of path steps.
552func (rc *recChecker) Check(p Path) {
553 const minLen = 1 << 16
554 if rc.next == 0 {
555 rc.next = minLen
556 }
557 if len(p) < rc.next {
558 return
559 }
560 rc.next <<= 1
561
562 // Check whether the same transformer has appeared at least twice.
563 var ss []string
564 m := map[Option]int{}
565 for _, ps := range p {
566 if t, ok := ps.(Transform); ok {
567 t := t.Option()
568 if m[t] == 1 { // Transformer was used exactly once before
569 tf := t.(*transformer).fnc.Type()
570 ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
571 }
572 m[t]++
573 }
514 } 574 }
515 if s.reporter != nil { 575 if len(ss) > 0 {
516 s.reporter.Report(vx, vy, eq, s.curPath) 576 const warning = "recursive set of Transformers detected"
577 const help = "consider using cmpopts.AcyclicTransformer"
578 set := strings.Join(ss, "\n\t")
579 panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
517 } 580 }
518} 581}
519 582
diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go
index d1518eb..abc3a1c 100644
--- a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go
+++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go
@@ -2,7 +2,7 @@
2// Use of this source code is governed by a BSD-style 2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file. 3// license that can be found in the LICENSE.md file.
4 4
5// +build purego appengine js 5// +build purego
6 6
7package cmp 7package cmp
8 8
@@ -10,6 +10,6 @@ import "reflect"
10 10
11const supportAllowUnexported = false 11const supportAllowUnexported = false
12 12
13func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value { 13func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value {
14 panic("unsafeRetrieveField is not implemented") 14 panic("retrieveUnexportedField is not implemented")
15} 15}
diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go
index 579b655..59d4ee9 100644
--- a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go
+++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go
@@ -2,7 +2,7 @@
2// Use of this source code is governed by a BSD-style 2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file. 3// license that can be found in the LICENSE.md file.
4 4
5// +build !purego,!appengine,!js 5// +build !purego
6 6
7package cmp 7package cmp
8 8
@@ -13,11 +13,11 @@ import (
13 13
14const supportAllowUnexported = true 14const supportAllowUnexported = true
15 15
16// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct 16// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
17// such that the value has read-write permissions. 17// a struct such that the value has read-write permissions.
18// 18//
19// The parent struct, v, must be addressable, while f must be a StructField 19// The parent struct, v, must be addressable, while f must be a StructField
20// describing the field to retrieve. 20// describing the field to retrieve.
21func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value { 21func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value {
22 return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() 22 return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem()
23} 23}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
index 42afa49..fe98dcc 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
@@ -2,7 +2,7 @@
2// Use of this source code is governed by a BSD-style 2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file. 3// license that can be found in the LICENSE.md file.
4 4
5// +build !debug 5// +build !cmp_debug
6 6
7package diff 7package diff
8 8
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
index fd9f7f1..597b6ae 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
@@ -2,7 +2,7 @@
2// Use of this source code is governed by a BSD-style 2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file. 3// license that can be found in the LICENSE.md file.
4 4
5// +build debug 5// +build cmp_debug
6 6
7package diff 7package diff
8 8
@@ -14,7 +14,7 @@ import (
14) 14)
15 15
16// The algorithm can be seen running in real-time by enabling debugging: 16// The algorithm can be seen running in real-time by enabling debugging:
17// go test -tags=debug -v 17// go test -tags=cmp_debug -v
18// 18//
19// Example output: 19// Example output:
20// === RUN TestDifference/#34 20// === RUN TestDifference/#34
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
index 260befe..3d2e426 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -85,22 +85,31 @@ func (es EditScript) LenY() int { return len(es) - es.stats().NX }
85type EqualFunc func(ix int, iy int) Result 85type EqualFunc func(ix int, iy int) Result
86 86
87// Result is the result of comparison. 87// Result is the result of comparison.
88// NSame is the number of sub-elements that are equal. 88// NumSame is the number of sub-elements that are equal.
89// NDiff is the number of sub-elements that are not equal. 89// NumDiff is the number of sub-elements that are not equal.
90type Result struct{ NSame, NDiff int } 90type Result struct{ NumSame, NumDiff int }
91
92// BoolResult returns a Result that is either Equal or not Equal.
93func BoolResult(b bool) Result {
94 if b {
95 return Result{NumSame: 1} // Equal, Similar
96 } else {
97 return Result{NumDiff: 2} // Not Equal, not Similar
98 }
99}
91 100
92// Equal indicates whether the symbols are equal. Two symbols are equal 101// Equal indicates whether the symbols are equal. Two symbols are equal
93// if and only if NDiff == 0. If Equal, then they are also Similar. 102// if and only if NumDiff == 0. If Equal, then they are also Similar.
94func (r Result) Equal() bool { return r.NDiff == 0 } 103func (r Result) Equal() bool { return r.NumDiff == 0 }
95 104
96// Similar indicates whether two symbols are similar and may be represented 105// Similar indicates whether two symbols are similar and may be represented
97// by using the Modified type. As a special case, we consider binary comparisons 106// by using the Modified type. As a special case, we consider binary comparisons
98// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. 107// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
99// 108//
100// The exact ratio of NSame to NDiff to determine similarity may change. 109// The exact ratio of NumSame to NumDiff to determine similarity may change.
101func (r Result) Similar() bool { 110func (r Result) Similar() bool {
102 // Use NSame+1 to offset NSame so that binary comparisons are similar. 111 // Use NumSame+1 to offset NumSame so that binary comparisons are similar.
103 return r.NSame+1 >= r.NDiff 112 return r.NumSame+1 >= r.NumDiff
104} 113}
105 114
106// Difference reports whether two lists of lengths nx and ny are equal 115// Difference reports whether two lists of lengths nx and ny are equal
@@ -191,9 +200,9 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
191 // that two lists commonly differ because elements were added to the front 200 // that two lists commonly differ because elements were added to the front
192 // or end of the other list. 201 // or end of the other list.
193 // 202 //
194 // Running the tests with the "debug" build tag prints a visualization of 203 // Running the tests with the "cmp_debug" build tag prints a visualization
195 // the algorithm running in real-time. This is educational for understanding 204 // of the algorithm running in real-time. This is educational for
196 // how the algorithm works. See debug_enable.go. 205 // understanding how the algorithm works. See debug_enable.go.
197 f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) 206 f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
198 for { 207 for {
199 // Forward search from the beginning. 208 // Forward search from the beginning.
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
new file mode 100644
index 0000000..a9e7fc0
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
@@ -0,0 +1,9 @@
1// Copyright 2019, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5package flags
6
7// Deterministic controls whether the output of Diff should be deterministic.
8// This is only used for testing.
9var Deterministic bool
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
new file mode 100644
index 0000000..01aed0a
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
@@ -0,0 +1,10 @@
1// Copyright 2019, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5// +build !go1.10
6
7package flags
8
9// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
10const AtLeastGo110 = false
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
new file mode 100644
index 0000000..c0b667f
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
@@ -0,0 +1,10 @@
1// Copyright 2019, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5// +build go1.10
6
7package flags
8
9// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
10const AtLeastGo110 = true
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
index 4c35ff1..ace1dbe 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -2,25 +2,34 @@
2// Use of this source code is governed by a BSD-style 2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file. 3// license that can be found in the LICENSE.md file.
4 4
5// Package function identifies function types. 5// Package function provides functionality for identifying function types.
6package function 6package function
7 7
8import "reflect" 8import (
9 "reflect"
10 "regexp"
11 "runtime"
12 "strings"
13)
9 14
10type funcType int 15type funcType int
11 16
12const ( 17const (
13 _ funcType = iota 18 _ funcType = iota
14 19
20 tbFunc // func(T) bool
15 ttbFunc // func(T, T) bool 21 ttbFunc // func(T, T) bool
22 trbFunc // func(T, R) bool
16 tibFunc // func(T, I) bool 23 tibFunc // func(T, I) bool
17 trFunc // func(T) R 24 trFunc // func(T) R
18 25
19 Equal = ttbFunc // func(T, T) bool 26 Equal = ttbFunc // func(T, T) bool
20 EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool 27 EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
21 Transformer = trFunc // func(T) R 28 Transformer = trFunc // func(T) R
22 ValueFilter = ttbFunc // func(T, T) bool 29 ValueFilter = ttbFunc // func(T, T) bool
23 Less = ttbFunc // func(T, T) bool 30 Less = ttbFunc // func(T, T) bool
31 ValuePredicate = tbFunc // func(T) bool
32 KeyValuePredicate = trbFunc // func(T, R) bool
24) 33)
25 34
26var boolType = reflect.TypeOf(true) 35var boolType = reflect.TypeOf(true)
@@ -32,10 +41,18 @@ func IsType(t reflect.Type, ft funcType) bool {
32 } 41 }
33 ni, no := t.NumIn(), t.NumOut() 42 ni, no := t.NumIn(), t.NumOut()
34 switch ft { 43 switch ft {
44 case tbFunc: // func(T) bool
45 if ni == 1 && no == 1 && t.Out(0) == boolType {
46 return true
47 }
35 case ttbFunc: // func(T, T) bool 48 case ttbFunc: // func(T, T) bool
36 if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { 49 if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
37 return true 50 return true
38 } 51 }
52 case trbFunc: // func(T, R) bool
53 if ni == 2 && no == 1 && t.Out(0) == boolType {
54 return true
55 }
39 case tibFunc: // func(T, I) bool 56 case tibFunc: // func(T, I) bool
40 if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { 57 if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
41 return true 58 return true
@@ -47,3 +64,36 @@ func IsType(t reflect.Type, ft funcType) bool {
47 } 64 }
48 return false 65 return false
49} 66}
67
68var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
69
70// NameOf returns the name of the function value.
71func NameOf(v reflect.Value) string {
72 fnc := runtime.FuncForPC(v.Pointer())
73 if fnc == nil {
74 return "<unknown>"
75 }
76 fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
77
78 // Method closures have a "-fm" suffix.
79 fullName = strings.TrimSuffix(fullName, "-fm")
80
81 var name string
82 for len(fullName) > 0 {
83 inParen := strings.HasSuffix(fullName, ")")
84 fullName = strings.TrimSuffix(fullName, ")")
85
86 s := lastIdentRx.FindString(fullName)
87 if s == "" {
88 break
89 }
90 name = s + "." + name
91 fullName = strings.TrimSuffix(fullName, s)
92
93 if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
94 fullName = fullName[:i]
95 }
96 fullName = strings.TrimSuffix(fullName, ".")
97 }
98 return strings.TrimSuffix(name, ".")
99}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go
deleted file mode 100644
index 657e508..0000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go
+++ /dev/null
@@ -1,277 +0,0 @@
1// Copyright 2017, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5// Package value provides functionality for reflect.Value types.
6package value
7
8import (
9 "fmt"
10 "reflect"
11 "strconv"
12 "strings"
13 "unicode"
14)
15
16var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
17
18// Format formats the value v as a string.
19//
20// This is similar to fmt.Sprintf("%+v", v) except this:
21// * Prints the type unless it can be elided
22// * Avoids printing struct fields that are zero
23// * Prints a nil-slice as being nil, not empty
24// * Prints map entries in deterministic order
25func Format(v reflect.Value, conf FormatConfig) string {
26 conf.printType = true
27 conf.followPointers = true
28 conf.realPointers = true
29 return formatAny(v, conf, nil)
30}
31
32type FormatConfig struct {
33 UseStringer bool // Should the String method be used if available?
34 printType bool // Should we print the type before the value?
35 PrintPrimitiveType bool // Should we print the type of primitives?
36 followPointers bool // Should we recursively follow pointers?
37 realPointers bool // Should we print the real address of pointers?
38}
39
40func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string {
41 // TODO: Should this be a multi-line printout in certain situations?
42
43 if !v.IsValid() {
44 return "<non-existent>"
45 }
46 if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() {
47 if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() {
48 return "<nil>"
49 }
50
51 const stringerPrefix = "s" // Indicates that the String method was used
52 s := v.Interface().(fmt.Stringer).String()
53 return stringerPrefix + formatString(s)
54 }
55
56 switch v.Kind() {
57 case reflect.Bool:
58 return formatPrimitive(v.Type(), v.Bool(), conf)
59 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
60 return formatPrimitive(v.Type(), v.Int(), conf)
61 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
62 if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr {
63 // Unnamed uints are usually bytes or words, so use hexadecimal.
64 return formatPrimitive(v.Type(), formatHex(v.Uint()), conf)
65 }
66 return formatPrimitive(v.Type(), v.Uint(), conf)
67 case reflect.Float32, reflect.Float64:
68 return formatPrimitive(v.Type(), v.Float(), conf)
69 case reflect.Complex64, reflect.Complex128:
70 return formatPrimitive(v.Type(), v.Complex(), conf)
71 case reflect.String:
72 return formatPrimitive(v.Type(), formatString(v.String()), conf)
73 case reflect.UnsafePointer, reflect.Chan, reflect.Func:
74 return formatPointer(v, conf)
75 case reflect.Ptr:
76 if v.IsNil() {
77 if conf.printType {
78 return fmt.Sprintf("(%v)(nil)", v.Type())
79 }
80 return "<nil>"
81 }
82 if visited[v.Pointer()] || !conf.followPointers {
83 return formatPointer(v, conf)
84 }
85 visited = insertPointer(visited, v.Pointer())
86 return "&" + formatAny(v.Elem(), conf, visited)
87 case reflect.Interface:
88 if v.IsNil() {
89 if conf.printType {
90 return fmt.Sprintf("%v(nil)", v.Type())
91 }
92 return "<nil>"
93 }
94 return formatAny(v.Elem(), conf, visited)
95 case reflect.Slice:
96 if v.IsNil() {
97 if conf.printType {
98 return fmt.Sprintf("%v(nil)", v.Type())
99 }
100 return "<nil>"
101 }
102 if visited[v.Pointer()] {
103 return formatPointer(v, conf)
104 }
105 visited = insertPointer(visited, v.Pointer())
106 fallthrough
107 case reflect.Array:
108 var ss []string
109 subConf := conf
110 subConf.printType = v.Type().Elem().Kind() == reflect.Interface
111 for i := 0; i < v.Len(); i++ {
112 s := formatAny(v.Index(i), subConf, visited)
113 ss = append(ss, s)
114 }
115 s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
116 if conf.printType {
117 return v.Type().String() + s
118 }
119 return s
120 case reflect.Map:
121 if v.IsNil() {
122 if conf.printType {
123 return fmt.Sprintf("%v(nil)", v.Type())
124 }
125 return "<nil>"
126 }
127 if visited[v.Pointer()] {
128 return formatPointer(v, conf)
129 }
130 visited = insertPointer(visited, v.Pointer())
131
132 var ss []string
133 keyConf, valConf := conf, conf
134 keyConf.printType = v.Type().Key().Kind() == reflect.Interface
135 keyConf.followPointers = false
136 valConf.printType = v.Type().Elem().Kind() == reflect.Interface
137 for _, k := range SortKeys(v.MapKeys()) {
138 sk := formatAny(k, keyConf, visited)
139 sv := formatAny(v.MapIndex(k), valConf, visited)
140 ss = append(ss, fmt.Sprintf("%s: %s", sk, sv))
141 }
142 s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
143 if conf.printType {
144 return v.Type().String() + s
145 }
146 return s
147 case reflect.Struct:
148 var ss []string
149 subConf := conf
150 subConf.printType = true
151 for i := 0; i < v.NumField(); i++ {
152 vv := v.Field(i)
153 if isZero(vv) {
154 continue // Elide zero value fields
155 }
156 name := v.Type().Field(i).Name
157 subConf.UseStringer = conf.UseStringer
158 s := formatAny(vv, subConf, visited)
159 ss = append(ss, fmt.Sprintf("%s: %s", name, s))
160 }
161 s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
162 if conf.printType {
163 return v.Type().String() + s
164 }
165 return s
166 default:
167 panic(fmt.Sprintf("%v kind not handled", v.Kind()))
168 }
169}
170
171func formatString(s string) string {
172 // Use quoted string if it the same length as a raw string literal.
173 // Otherwise, attempt to use the raw string form.
174 qs := strconv.Quote(s)
175 if len(qs) == 1+len(s)+1 {
176 return qs
177 }
178
179 // Disallow newlines to ensure output is a single line.
180 // Only allow printable runes for readability purposes.
181 rawInvalid := func(r rune) bool {
182 return r == '`' || r == '\n' || !unicode.IsPrint(r)
183 }
184 if strings.IndexFunc(s, rawInvalid) < 0 {
185 return "`" + s + "`"
186 }
187 return qs
188}
189
190func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string {
191 if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") {
192 return fmt.Sprintf("%v(%v)", t, v)
193 }
194 return fmt.Sprintf("%v", v)
195}
196
197func formatPointer(v reflect.Value, conf FormatConfig) string {
198 p := v.Pointer()
199 if !conf.realPointers {
200 p = 0 // For deterministic printing purposes
201 }
202 s := formatHex(uint64(p))
203 if conf.printType {
204 return fmt.Sprintf("(%v)(%s)", v.Type(), s)
205 }
206 return s
207}
208
209func formatHex(u uint64) string {
210 var f string
211 switch {
212 case u <= 0xff:
213 f = "0x%02x"
214 case u <= 0xffff:
215 f = "0x%04x"
216 case u <= 0xffffff:
217 f = "0x%06x"
218 case u <= 0xffffffff:
219 f = "0x%08x"
220 case u <= 0xffffffffff:
221 f = "0x%010x"
222 case u <= 0xffffffffffff:
223 f = "0x%012x"
224 case u <= 0xffffffffffffff:
225 f = "0x%014x"
226 case u <= 0xffffffffffffffff:
227 f = "0x%016x"
228 }
229 return fmt.Sprintf(f, u)
230}
231
232// insertPointer insert p into m, allocating m if necessary.
233func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool {
234 if m == nil {
235 m = make(map[uintptr]bool)
236 }
237 m[p] = true
238 return m
239}
240
241// isZero reports whether v is the zero value.
242// This does not rely on Interface and so can be used on unexported fields.
243func isZero(v reflect.Value) bool {
244 switch v.Kind() {
245 case reflect.Bool:
246 return v.Bool() == false
247 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
248 return v.Int() == 0
249 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
250 return v.Uint() == 0
251 case reflect.Float32, reflect.Float64:
252 return v.Float() == 0
253 case reflect.Complex64, reflect.Complex128:
254 return v.Complex() == 0
255 case reflect.String:
256 return v.String() == ""
257 case reflect.UnsafePointer:
258 return v.Pointer() == 0
259 case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
260 return v.IsNil()
261 case reflect.Array:
262 for i := 0; i < v.Len(); i++ {
263 if !isZero(v.Index(i)) {
264 return false
265 }
266 }
267 return true
268 case reflect.Struct:
269 for i := 0; i < v.NumField(); i++ {
270 if !isZero(v.Field(i)) {
271 return false
272 }
273 }
274 return true
275 }
276 return false
277}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
new file mode 100644
index 0000000..0a01c47
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
@@ -0,0 +1,23 @@
1// Copyright 2018, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5// +build purego
6
7package value
8
9import "reflect"
10
11// Pointer is an opaque typed pointer and is guaranteed to be comparable.
12type Pointer struct {
13 p uintptr
14 t reflect.Type
15}
16
17// PointerOf returns a Pointer from v, which must be a
18// reflect.Ptr, reflect.Slice, or reflect.Map.
19func PointerOf(v reflect.Value) Pointer {
20 // NOTE: Storing a pointer as an uintptr is technically incorrect as it
21 // assumes that the GC implementation does not use a moving collector.
22 return Pointer{v.Pointer(), v.Type()}
23}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
new file mode 100644
index 0000000..da134ae
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
@@ -0,0 +1,26 @@
1// Copyright 2018, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5// +build !purego
6
7package value
8
9import (
10 "reflect"
11 "unsafe"
12)
13
14// Pointer is an opaque typed pointer and is guaranteed to be comparable.
15type Pointer struct {
16 p unsafe.Pointer
17 t reflect.Type
18}
19
20// PointerOf returns a Pointer from v, which must be a
21// reflect.Ptr, reflect.Slice, or reflect.Map.
22func PointerOf(v reflect.Value) Pointer {
23 // The proper representation of a pointer is unsafe.Pointer,
24 // which is necessary if the GC ever uses a moving collector.
25 return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
26}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
index fe8aa27..938f646 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
@@ -19,7 +19,7 @@ func SortKeys(vs []reflect.Value) []reflect.Value {
19 } 19 }
20 20
21 // Sort the map keys. 21 // Sort the map keys.
22 sort.Sort(valueSorter(vs)) 22 sort.Slice(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
23 23
24 // Deduplicate keys (fails for NaNs). 24 // Deduplicate keys (fails for NaNs).
25 vs2 := vs[:1] 25 vs2 := vs[:1]
@@ -31,13 +31,6 @@ func SortKeys(vs []reflect.Value) []reflect.Value {
31 return vs2 31 return vs2
32} 32}
33 33
34// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above.
35type valueSorter []reflect.Value
36
37func (vs valueSorter) Len() int { return len(vs) }
38func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) }
39func (vs valueSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
40
41// isLess is a generic function for sorting arbitrary map keys. 34// isLess is a generic function for sorting arbitrary map keys.
42// The inputs must be of the same type and must be comparable. 35// The inputs must be of the same type and must be comparable.
43func isLess(x, y reflect.Value) bool { 36func isLess(x, y reflect.Value) bool {
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
new file mode 100644
index 0000000..d13a12c
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
@@ -0,0 +1,45 @@
1// Copyright 2017, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5package value
6
7import "reflect"
8
9// IsZero reports whether v is the zero value.
10// This does not rely on Interface and so can be used on unexported fields.
11func IsZero(v reflect.Value) bool {
12 switch v.Kind() {
13 case reflect.Bool:
14 return v.Bool() == false
15 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
16 return v.Int() == 0
17 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
18 return v.Uint() == 0
19 case reflect.Float32, reflect.Float64:
20 return v.Float() == 0
21 case reflect.Complex64, reflect.Complex128:
22 return v.Complex() == 0
23 case reflect.String:
24 return v.String() == ""
25 case reflect.UnsafePointer:
26 return v.Pointer() == 0
27 case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
28 return v.IsNil()
29 case reflect.Array:
30 for i := 0; i < v.Len(); i++ {
31 if !IsZero(v.Index(i)) {
32 return false
33 }
34 }
35 return true
36 case reflect.Struct:
37 for i := 0; i < v.NumField(); i++ {
38 if !IsZero(v.Field(i)) {
39 return false
40 }
41 }
42 return true
43 }
44 return false
45}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
index 91d4b06..7934481 100644
--- a/vendor/github.com/google/go-cmp/cmp/options.go
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -7,7 +7,7 @@ package cmp
7import ( 7import (
8 "fmt" 8 "fmt"
9 "reflect" 9 "reflect"
10 "runtime" 10 "regexp"
11 "strings" 11 "strings"
12 12
13 "github.com/google/go-cmp/cmp/internal/function" 13 "github.com/google/go-cmp/cmp/internal/function"
@@ -29,11 +29,11 @@ type Option interface {
29 // An Options is returned only if multiple comparers or transformers 29 // An Options is returned only if multiple comparers or transformers
30 // can apply simultaneously and will only contain values of those types 30 // can apply simultaneously and will only contain values of those types
31 // or sub-Options containing values of those types. 31 // or sub-Options containing values of those types.
32 filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption 32 filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
33} 33}
34 34
35// applicableOption represents the following types: 35// applicableOption represents the following types:
36// Fundamental: ignore | invalid | *comparer | *transformer 36// Fundamental: ignore | validator | *comparer | *transformer
37// Grouping: Options 37// Grouping: Options
38type applicableOption interface { 38type applicableOption interface {
39 Option 39 Option
@@ -43,7 +43,7 @@ type applicableOption interface {
43} 43}
44 44
45// coreOption represents the following types: 45// coreOption represents the following types:
46// Fundamental: ignore | invalid | *comparer | *transformer 46// Fundamental: ignore | validator | *comparer | *transformer
47// Filters: *pathFilter | *valuesFilter 47// Filters: *pathFilter | *valuesFilter
48type coreOption interface { 48type coreOption interface {
49 Option 49 Option
@@ -63,19 +63,19 @@ func (core) isCore() {}
63// on all individual options held within. 63// on all individual options held within.
64type Options []Option 64type Options []Option
65 65
66func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) { 66func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
67 for _, opt := range opts { 67 for _, opt := range opts {
68 switch opt := opt.filter(s, vx, vy, t); opt.(type) { 68 switch opt := opt.filter(s, t, vx, vy); opt.(type) {
69 case ignore: 69 case ignore:
70 return ignore{} // Only ignore can short-circuit evaluation 70 return ignore{} // Only ignore can short-circuit evaluation
71 case invalid: 71 case validator:
72 out = invalid{} // Takes precedence over comparer or transformer 72 out = validator{} // Takes precedence over comparer or transformer
73 case *comparer, *transformer, Options: 73 case *comparer, *transformer, Options:
74 switch out.(type) { 74 switch out.(type) {
75 case nil: 75 case nil:
76 out = opt 76 out = opt
77 case invalid: 77 case validator:
78 // Keep invalid 78 // Keep validator
79 case *comparer, *transformer, Options: 79 case *comparer, *transformer, Options:
80 out = Options{out, opt} // Conflicting comparers or transformers 80 out = Options{out, opt} // Conflicting comparers or transformers
81 } 81 }
@@ -106,6 +106,11 @@ func (opts Options) String() string {
106// FilterPath returns a new Option where opt is only evaluated if filter f 106// FilterPath returns a new Option where opt is only evaluated if filter f
107// returns true for the current Path in the value tree. 107// returns true for the current Path in the value tree.
108// 108//
109// This filter is called even if a slice element or map entry is missing and
110// provides an opportunity to ignore such cases. The filter function must be
111// symmetric such that the filter result is identical regardless of whether the
112// missing value is from x or y.
113//
109// The option passed in may be an Ignore, Transformer, Comparer, Options, or 114// The option passed in may be an Ignore, Transformer, Comparer, Options, or
110// a previously filtered Option. 115// a previously filtered Option.
111func FilterPath(f func(Path) bool, opt Option) Option { 116func FilterPath(f func(Path) bool, opt Option) Option {
@@ -124,22 +129,22 @@ type pathFilter struct {
124 opt Option 129 opt Option
125} 130}
126 131
127func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { 132func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
128 if f.fnc(s.curPath) { 133 if f.fnc(s.curPath) {
129 return f.opt.filter(s, vx, vy, t) 134 return f.opt.filter(s, t, vx, vy)
130 } 135 }
131 return nil 136 return nil
132} 137}
133 138
134func (f pathFilter) String() string { 139func (f pathFilter) String() string {
135 fn := getFuncName(reflect.ValueOf(f.fnc).Pointer()) 140 return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
136 return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt)
137} 141}
138 142
139// FilterValues returns a new Option where opt is only evaluated if filter f, 143// FilterValues returns a new Option where opt is only evaluated if filter f,
140// which is a function of the form "func(T, T) bool", returns true for the 144// which is a function of the form "func(T, T) bool", returns true for the
141// current pair of values being compared. If the type of the values is not 145// current pair of values being compared. If either value is invalid or
142// assignable to T, then this filter implicitly returns false. 146// the type of the values is not assignable to T, then this filter implicitly
147// returns false.
143// 148//
144// The filter function must be 149// The filter function must be
145// symmetric (i.e., agnostic to the order of the inputs) and 150// symmetric (i.e., agnostic to the order of the inputs) and
@@ -171,19 +176,18 @@ type valuesFilter struct {
171 opt Option 176 opt Option
172} 177}
173 178
174func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { 179func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
175 if !vx.IsValid() || !vy.IsValid() { 180 if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
176 return invalid{} 181 return nil
177 } 182 }
178 if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { 183 if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
179 return f.opt.filter(s, vx, vy, t) 184 return f.opt.filter(s, t, vx, vy)
180 } 185 }
181 return nil 186 return nil
182} 187}
183 188
184func (f valuesFilter) String() string { 189func (f valuesFilter) String() string {
185 fn := getFuncName(f.fnc.Pointer()) 190 return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
186 return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt)
187} 191}
188 192
189// Ignore is an Option that causes all comparisons to be ignored. 193// Ignore is an Option that causes all comparisons to be ignored.
@@ -194,20 +198,45 @@ func Ignore() Option { return ignore{} }
194type ignore struct{ core } 198type ignore struct{ core }
195 199
196func (ignore) isFiltered() bool { return false } 200func (ignore) isFiltered() bool { return false }
197func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} } 201func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
198func (ignore) apply(_ *state, _, _ reflect.Value) { return } 202func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
199func (ignore) String() string { return "Ignore()" } 203func (ignore) String() string { return "Ignore()" }
200 204
201// invalid is a sentinel Option type to indicate that some options could not 205// validator is a sentinel Option type to indicate that some options could not
202// be evaluated due to unexported fields. 206// be evaluated due to unexported fields, missing slice elements, or
203type invalid struct{ core } 207// missing map entries. Both values are validator only for unexported fields.
208type validator struct{ core }
209
210func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
211 if !vx.IsValid() || !vy.IsValid() {
212 return validator{}
213 }
214 if !vx.CanInterface() || !vy.CanInterface() {
215 return validator{}
216 }
217 return nil
218}
219func (validator) apply(s *state, vx, vy reflect.Value) {
220 // Implies missing slice element or map entry.
221 if !vx.IsValid() || !vy.IsValid() {
222 s.report(vx.IsValid() == vy.IsValid(), 0)
223 return
224 }
225
226 // Unable to Interface implies unexported field without visibility access.
227 if !vx.CanInterface() || !vy.CanInterface() {
228 const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported"
229 panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help))
230 }
204 231
205func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} } 232 panic("not reachable")
206func (invalid) apply(s *state, _, _ reflect.Value) {
207 const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported"
208 panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help))
209} 233}
210 234
235// identRx represents a valid identifier according to the Go specification.
236const identRx = `[_\p{L}][_\p{L}\p{N}]*`
237
238var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
239
211// Transformer returns an Option that applies a transformation function that 240// Transformer returns an Option that applies a transformation function that
212// converts values of a certain type into that of another. 241// converts values of a certain type into that of another.
213// 242//
@@ -220,18 +249,25 @@ func (invalid) apply(s *state, _, _ reflect.Value) {
220// input and output types are the same), an implicit filter is added such that 249// input and output types are the same), an implicit filter is added such that
221// a transformer is applicable only if that exact transformer is not already 250// a transformer is applicable only if that exact transformer is not already
222// in the tail of the Path since the last non-Transform step. 251// in the tail of the Path since the last non-Transform step.
252// For situations where the implicit filter is still insufficient,
253// consider using cmpopts.AcyclicTransformer, which adds a filter
254// to prevent the transformer from being recursively applied upon itself.
223// 255//
224// The name is a user provided label that is used as the Transform.Name in the 256// The name is a user provided label that is used as the Transform.Name in the
225// transformation PathStep. If empty, an arbitrary name is used. 257// transformation PathStep (and eventually shown in the Diff output).
258// The name must be a valid identifier or qualified identifier in Go syntax.
259// If empty, an arbitrary name is used.
226func Transformer(name string, f interface{}) Option { 260func Transformer(name string, f interface{}) Option {
227 v := reflect.ValueOf(f) 261 v := reflect.ValueOf(f)
228 if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { 262 if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
229 panic(fmt.Sprintf("invalid transformer function: %T", f)) 263 panic(fmt.Sprintf("invalid transformer function: %T", f))
230 } 264 }
231 if name == "" { 265 if name == "" {
232 name = "λ" // Lambda-symbol as place-holder for anonymous transformer 266 name = function.NameOf(v)
233 } 267 if !identsRx.MatchString(name) {
234 if !isValid(name) { 268 name = "λ" // Lambda-symbol as placeholder name
269 }
270 } else if !identsRx.MatchString(name) {
235 panic(fmt.Sprintf("invalid name: %q", name)) 271 panic(fmt.Sprintf("invalid name: %q", name))
236 } 272 }
237 tr := &transformer{name: name, fnc: reflect.ValueOf(f)} 273 tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
@@ -250,9 +286,9 @@ type transformer struct {
250 286
251func (tr *transformer) isFiltered() bool { return tr.typ != nil } 287func (tr *transformer) isFiltered() bool { return tr.typ != nil }
252 288
253func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption { 289func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
254 for i := len(s.curPath) - 1; i >= 0; i-- { 290 for i := len(s.curPath) - 1; i >= 0; i-- {
255 if t, ok := s.curPath[i].(*transform); !ok { 291 if t, ok := s.curPath[i].(Transform); !ok {
256 break // Hit most recent non-Transform step 292 break // Hit most recent non-Transform step
257 } else if tr == t.trans { 293 } else if tr == t.trans {
258 return nil // Cannot directly use same Transform 294 return nil // Cannot directly use same Transform
@@ -265,18 +301,15 @@ func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) appl
265} 301}
266 302
267func (tr *transformer) apply(s *state, vx, vy reflect.Value) { 303func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
268 // Update path before calling the Transformer so that dynamic checks 304 step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
269 // will use the updated path. 305 vvx := s.callTRFunc(tr.fnc, vx, step)
270 s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr}) 306 vvy := s.callTRFunc(tr.fnc, vy, step)
271 defer s.curPath.pop() 307 step.vx, step.vy = vvx, vvy
272 308 s.compareAny(step)
273 vx = s.callTRFunc(tr.fnc, vx)
274 vy = s.callTRFunc(tr.fnc, vy)
275 s.compareAny(vx, vy)
276} 309}
277 310
278func (tr transformer) String() string { 311func (tr transformer) String() string {
279 return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer())) 312 return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
280} 313}
281 314
282// Comparer returns an Option that determines whether two values are equal 315// Comparer returns an Option that determines whether two values are equal
@@ -311,7 +344,7 @@ type comparer struct {
311 344
312func (cm *comparer) isFiltered() bool { return cm.typ != nil } 345func (cm *comparer) isFiltered() bool { return cm.typ != nil }
313 346
314func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption { 347func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
315 if cm.typ == nil || t.AssignableTo(cm.typ) { 348 if cm.typ == nil || t.AssignableTo(cm.typ) {
316 return cm 349 return cm
317 } 350 }
@@ -320,11 +353,11 @@ func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applica
320 353
321func (cm *comparer) apply(s *state, vx, vy reflect.Value) { 354func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
322 eq := s.callTTBFunc(cm.fnc, vx, vy) 355 eq := s.callTTBFunc(cm.fnc, vx, vy)
323 s.report(eq, vx, vy) 356 s.report(eq, reportByFunc)
324} 357}
325 358
326func (cm comparer) String() string { 359func (cm comparer) String() string {
327 return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer())) 360 return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
328} 361}
329 362
330// AllowUnexported returns an Option that forcibly allows operations on 363// AllowUnexported returns an Option that forcibly allows operations on
@@ -338,7 +371,7 @@ func (cm comparer) String() string {
338// defined in an internal package where the semantic meaning of an unexported 371// defined in an internal package where the semantic meaning of an unexported
339// field is in the control of the user. 372// field is in the control of the user.
340// 373//
341// For some cases, a custom Comparer should be used instead that defines 374// In many cases, a custom Comparer should be used instead that defines
342// equality as a function of the public API of a type rather than the underlying 375// equality as a function of the public API of a type rather than the underlying
343// unexported implementation. 376// unexported implementation.
344// 377//
@@ -370,27 +403,92 @@ func AllowUnexported(types ...interface{}) Option {
370 403
371type visibleStructs map[reflect.Type]bool 404type visibleStructs map[reflect.Type]bool
372 405
373func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { 406func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
374 panic("not implemented") 407 panic("not implemented")
375} 408}
376 409
377// reporter is an Option that configures how differences are reported. 410// Result represents the comparison result for a single node and
378type reporter interface { 411// is provided by cmp when calling Result (see Reporter).
379 // TODO: Not exported yet. 412type Result struct {
413 _ [0]func() // Make Result incomparable
414 flags resultFlags
415}
416
417// Equal reports whether the node was determined to be equal or not.
418// As a special case, ignored nodes are considered equal.
419func (r Result) Equal() bool {
420 return r.flags&(reportEqual|reportByIgnore) != 0
421}
422
423// ByIgnore reports whether the node is equal because it was ignored.
424// This never reports true if Equal reports false.
425func (r Result) ByIgnore() bool {
426 return r.flags&reportByIgnore != 0
427}
428
429// ByMethod reports whether the Equal method determined equality.
430func (r Result) ByMethod() bool {
431 return r.flags&reportByMethod != 0
432}
433
434// ByFunc reports whether a Comparer function determined equality.
435func (r Result) ByFunc() bool {
436 return r.flags&reportByFunc != 0
437}
438
439type resultFlags uint
440
441const (
442 _ resultFlags = (1 << iota) / 2
443
444 reportEqual
445 reportUnequal
446 reportByIgnore
447 reportByMethod
448 reportByFunc
449)
450
451// Reporter is an Option that can be passed to Equal. When Equal traverses
452// the value trees, it calls PushStep as it descends into each node in the
453// tree and PopStep as it ascend out of the node. The leaves of the tree are
454// either compared (determined to be equal or not equal) or ignored and reported
455// as such by calling the Report method.
456func Reporter(r interface {
457 // PushStep is called when a tree-traversal operation is performed.
458 // The PathStep itself is only valid until the step is popped.
459 // The PathStep.Values are valid for the duration of the entire traversal
460 // and must not be mutated.
461 //
462 // Equal always calls PushStep at the start to provide an operation-less
463 // PathStep used to report the root values.
380 // 464 //
381 // Perhaps add PushStep and PopStep and change Report to only accept 465 // Within a slice, the exact set of inserted, removed, or modified elements
382 // a PathStep instead of the full-path? Adding a PushStep and PopStep makes 466 // is unspecified and may change in future implementations.
383 // it clear that we are traversing the value tree in a depth-first-search 467 // The entries of a map are iterated through in an unspecified order.
384 // manner, which has an effect on how values are printed. 468 PushStep(PathStep)
469
470 // Report is called exactly once on leaf nodes to report whether the
471 // comparison identified the node as equal, unequal, or ignored.
472 // A leaf node is one that is immediately preceded by and followed by
473 // a pair of PushStep and PopStep calls.
474 Report(Result)
475
476 // PopStep ascends back up the value tree.
477 // There is always a matching pop call for every push call.
478 PopStep()
479}) Option {
480 return reporter{r}
481}
385 482
386 Option 483type reporter struct{ reporterIface }
484type reporterIface interface {
485 PushStep(PathStep)
486 Report(Result)
487 PopStep()
488}
387 489
388 // Report is called for every comparison made and will be provided with 490func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
389 // the two values being compared, the equality result, and the 491 panic("not implemented")
390 // current path in the value tree. It is possible for x or y to be an
391 // invalid reflect.Value if one of the values is non-existent;
392 // which is possible with maps and slices.
393 Report(x, y reflect.Value, eq bool, p Path)
394} 492}
395 493
396// normalizeOption normalizes the input options such that all Options groups 494// normalizeOption normalizes the input options such that all Options groups
@@ -424,30 +522,3 @@ func flattenOptions(dst, src Options) Options {
424 } 522 }
425 return dst 523 return dst
426} 524}
427
428// getFuncName returns a short function name from the pointer.
429// The string parsing logic works up until Go1.9.
430func getFuncName(p uintptr) string {
431 fnc := runtime.FuncForPC(p)
432 if fnc == nil {
433 return "<unknown>"
434 }
435 name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
436 if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") {
437 // Strip the package name from method name.
438 name = strings.TrimSuffix(name, ")-fm")
439 name = strings.TrimSuffix(name, ")·fm")
440 if i := strings.LastIndexByte(name, '('); i >= 0 {
441 methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
442 if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
443 methodName = methodName[j+1:] // E.g., "myfunc"
444 }
445 name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
446 }
447 }
448 if i := strings.LastIndexByte(name, '/'); i >= 0 {
449 // Strip the package name.
450 name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
451 }
452 return name
453}
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
index c08a3cf..96fffd2 100644
--- a/vendor/github.com/google/go-cmp/cmp/path.go
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -12,80 +12,52 @@ import (
12 "unicode/utf8" 12 "unicode/utf8"
13) 13)
14 14
15type ( 15// Path is a list of PathSteps describing the sequence of operations to get
16 // Path is a list of PathSteps describing the sequence of operations to get 16// from some root type to the current position in the value tree.
17 // from some root type to the current position in the value tree. 17// The first Path element is always an operation-less PathStep that exists
18 // The first Path element is always an operation-less PathStep that exists 18// simply to identify the initial type.
19 // simply to identify the initial type. 19//
20 // 20// When traversing structs with embedded structs, the embedded struct will
21 // When traversing structs with embedded structs, the embedded struct will 21// always be accessed as a field before traversing the fields of the
22 // always be accessed as a field before traversing the fields of the 22// embedded struct themselves. That is, an exported field from the
23 // embedded struct themselves. That is, an exported field from the 23// embedded struct will never be accessed directly from the parent struct.
24 // embedded struct will never be accessed directly from the parent struct. 24type Path []PathStep
25 Path []PathStep
26
27 // PathStep is a union-type for specific operations to traverse
28 // a value's tree structure. Users of this package never need to implement
29 // these types as values of this type will be returned by this package.
30 PathStep interface {
31 String() string
32 Type() reflect.Type // Resulting type after performing the path step
33 isPathStep()
34 }
35 25
36 // SliceIndex is an index operation on a slice or array at some index Key. 26// PathStep is a union-type for specific operations to traverse
37 SliceIndex interface { 27// a value's tree structure. Users of this package never need to implement
38 PathStep 28// these types as values of this type will be returned by this package.
39 Key() int // May return -1 if in a split state 29//
40 30// Implementations of this interface are
41 // SplitKeys returns the indexes for indexing into slices in the 31// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform.
42 // x and y values, respectively. These indexes may differ due to the 32type PathStep interface {
43 // insertion or removal of an element in one of the slices, causing 33 String() string
44 // all of the indexes to be shifted. If an index is -1, then that
45 // indicates that the element does not exist in the associated slice.
46 //
47 // Key is guaranteed to return -1 if and only if the indexes returned
48 // by SplitKeys are not the same. SplitKeys will never return -1 for
49 // both indexes.
50 SplitKeys() (x int, y int)
51
52 isSliceIndex()
53 }
54 // MapIndex is an index operation on a map at some index Key.
55 MapIndex interface {
56 PathStep
57 Key() reflect.Value
58 isMapIndex()
59 }
60 // TypeAssertion represents a type assertion on an interface.
61 TypeAssertion interface {
62 PathStep
63 isTypeAssertion()
64 }
65 // StructField represents a struct field access on a field called Name.
66 StructField interface {
67 PathStep
68 Name() string
69 Index() int
70 isStructField()
71 }
72 // Indirect represents pointer indirection on the parent type.
73 Indirect interface {
74 PathStep
75 isIndirect()
76 }
77 // Transform is a transformation from the parent type to the current type.
78 Transform interface {
79 PathStep
80 Name() string
81 Func() reflect.Value
82 34
83 // Option returns the originally constructed Transformer option. 35 // Type is the resulting type after performing the path step.
84 // The == operator can be used to detect the exact option used. 36 Type() reflect.Type
85 Option() Option
86 37
87 isTransform() 38 // Values is the resulting values after performing the path step.
88 } 39 // The type of each valid value is guaranteed to be identical to Type.
40 //
41 // In some cases, one or both may be invalid or have restrictions:
42 // • For StructField, both are not interface-able if the current field
43 // is unexported and the struct type is not explicitly permitted by
44 // AllowUnexported to traverse unexported fields.
45 // • For SliceIndex, one may be invalid if an element is missing from
46 // either the x or y slice.
47 // • For MapIndex, one may be invalid if an entry is missing from
48 // either the x or y map.
49 //
50 // The provided values must not be mutated.
51 Values() (vx, vy reflect.Value)
52}
53
54var (
55 _ PathStep = StructField{}
56 _ PathStep = SliceIndex{}
57 _ PathStep = MapIndex{}
58 _ PathStep = Indirect{}
59 _ PathStep = TypeAssertion{}
60 _ PathStep = Transform{}
89) 61)
90 62
91func (pa *Path) push(s PathStep) { 63func (pa *Path) push(s PathStep) {
@@ -124,7 +96,7 @@ func (pa Path) Index(i int) PathStep {
124func (pa Path) String() string { 96func (pa Path) String() string {
125 var ss []string 97 var ss []string
126 for _, s := range pa { 98 for _, s := range pa {
127 if _, ok := s.(*structField); ok { 99 if _, ok := s.(StructField); ok {
128 ss = append(ss, s.String()) 100 ss = append(ss, s.String())
129 } 101 }
130 } 102 }
@@ -144,13 +116,13 @@ func (pa Path) GoString() string {
144 nextStep = pa[i+1] 116 nextStep = pa[i+1]
145 } 117 }
146 switch s := s.(type) { 118 switch s := s.(type) {
147 case *indirect: 119 case Indirect:
148 numIndirect++ 120 numIndirect++
149 pPre, pPost := "(", ")" 121 pPre, pPost := "(", ")"
150 switch nextStep.(type) { 122 switch nextStep.(type) {
151 case *indirect: 123 case Indirect:
152 continue // Next step is indirection, so let them batch up 124 continue // Next step is indirection, so let them batch up
153 case *structField: 125 case StructField:
154 numIndirect-- // Automatic indirection on struct fields 126 numIndirect-- // Automatic indirection on struct fields
155 case nil: 127 case nil:
156 pPre, pPost = "", "" // Last step; no need for parenthesis 128 pPre, pPost = "", "" // Last step; no need for parenthesis
@@ -161,19 +133,10 @@ func (pa Path) GoString() string {
161 } 133 }
162 numIndirect = 0 134 numIndirect = 0
163 continue 135 continue
164 case *transform: 136 case Transform:
165 ssPre = append(ssPre, s.trans.name+"(") 137 ssPre = append(ssPre, s.trans.name+"(")
166 ssPost = append(ssPost, ")") 138 ssPost = append(ssPost, ")")
167 continue 139 continue
168 case *typeAssertion:
169 // As a special-case, elide type assertions on anonymous types
170 // since they are typically generated dynamically and can be very
171 // verbose. For example, some transforms return interface{} because
172 // of Go's lack of generics, but typically take in and return the
173 // exact same concrete type.
174 if s.Type().PkgPath() == "" {
175 continue
176 }
177 } 140 }
178 ssPost = append(ssPost, s.String()) 141 ssPost = append(ssPost, s.String())
179 } 142 }
@@ -183,44 +146,13 @@ func (pa Path) GoString() string {
183 return strings.Join(ssPre, "") + strings.Join(ssPost, "") 146 return strings.Join(ssPre, "") + strings.Join(ssPost, "")
184} 147}
185 148
186type ( 149type pathStep struct {
187 pathStep struct { 150 typ reflect.Type
188 typ reflect.Type 151 vx, vy reflect.Value
189 } 152}
190
191 sliceIndex struct {
192 pathStep
193 xkey, ykey int
194 }
195 mapIndex struct {
196 pathStep
197 key reflect.Value
198 }
199 typeAssertion struct {
200 pathStep
201 }
202 structField struct {
203 pathStep
204 name string
205 idx int
206
207 // These fields are used for forcibly accessing an unexported field.
208 // pvx, pvy, and field are only valid if unexported is true.
209 unexported bool
210 force bool // Forcibly allow visibility
211 pvx, pvy reflect.Value // Parent values
212 field reflect.StructField // Field information
213 }
214 indirect struct {
215 pathStep
216 }
217 transform struct {
218 pathStep
219 trans *transformer
220 }
221)
222 153
223func (ps pathStep) Type() reflect.Type { return ps.typ } 154func (ps pathStep) Type() reflect.Type { return ps.typ }
155func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
224func (ps pathStep) String() string { 156func (ps pathStep) String() string {
225 if ps.typ == nil { 157 if ps.typ == nil {
226 return "<nil>" 158 return "<nil>"
@@ -232,7 +164,54 @@ func (ps pathStep) String() string {
232 return fmt.Sprintf("{%s}", s) 164 return fmt.Sprintf("{%s}", s)
233} 165}
234 166
235func (si sliceIndex) String() string { 167// StructField represents a struct field access on a field called Name.
168type StructField struct{ *structField }
169type structField struct {
170 pathStep
171 name string
172 idx int
173
174 // These fields are used for forcibly accessing an unexported field.
175 // pvx, pvy, and field are only valid if unexported is true.
176 unexported bool
177 mayForce bool // Forcibly allow visibility
178 pvx, pvy reflect.Value // Parent values
179 field reflect.StructField // Field information
180}
181
182func (sf StructField) Type() reflect.Type { return sf.typ }
183func (sf StructField) Values() (vx, vy reflect.Value) {
184 if !sf.unexported {
185 return sf.vx, sf.vy // CanInterface reports true
186 }
187
188 // Forcibly obtain read-write access to an unexported struct field.
189 if sf.mayForce {
190 vx = retrieveUnexportedField(sf.pvx, sf.field)
191 vy = retrieveUnexportedField(sf.pvy, sf.field)
192 return vx, vy // CanInterface reports true
193 }
194 return sf.vx, sf.vy // CanInterface reports false
195}
196func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
197
198// Name is the field name.
199func (sf StructField) Name() string { return sf.name }
200
201// Index is the index of the field in the parent struct type.
202// See reflect.Type.Field.
203func (sf StructField) Index() int { return sf.idx }
204
205// SliceIndex is an index operation on a slice or array at some index Key.
206type SliceIndex struct{ *sliceIndex }
207type sliceIndex struct {
208 pathStep
209 xkey, ykey int
210}
211
212func (si SliceIndex) Type() reflect.Type { return si.typ }
213func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
214func (si SliceIndex) String() string {
236 switch { 215 switch {
237 case si.xkey == si.ykey: 216 case si.xkey == si.ykey:
238 return fmt.Sprintf("[%d]", si.xkey) 217 return fmt.Sprintf("[%d]", si.xkey)
@@ -247,63 +226,83 @@ func (si sliceIndex) String() string {
247 return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) 226 return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
248 } 227 }
249} 228}
250func (mi mapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
251func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
252func (sf structField) String() string { return fmt.Sprintf(".%s", sf.name) }
253func (in indirect) String() string { return "*" }
254func (tf transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
255 229
256func (si sliceIndex) Key() int { 230// Key is the index key; it may return -1 if in a split state
231func (si SliceIndex) Key() int {
257 if si.xkey != si.ykey { 232 if si.xkey != si.ykey {
258 return -1 233 return -1
259 } 234 }
260 return si.xkey 235 return si.xkey
261} 236}
262func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey }
263func (mi mapIndex) Key() reflect.Value { return mi.key }
264func (sf structField) Name() string { return sf.name }
265func (sf structField) Index() int { return sf.idx }
266func (tf transform) Name() string { return tf.trans.name }
267func (tf transform) Func() reflect.Value { return tf.trans.fnc }
268func (tf transform) Option() Option { return tf.trans }
269
270func (pathStep) isPathStep() {}
271func (sliceIndex) isSliceIndex() {}
272func (mapIndex) isMapIndex() {}
273func (typeAssertion) isTypeAssertion() {}
274func (structField) isStructField() {}
275func (indirect) isIndirect() {}
276func (transform) isTransform() {}
277 237
278var ( 238// SplitKeys are the indexes for indexing into slices in the
279 _ SliceIndex = sliceIndex{} 239// x and y values, respectively. These indexes may differ due to the
280 _ MapIndex = mapIndex{} 240// insertion or removal of an element in one of the slices, causing
281 _ TypeAssertion = typeAssertion{} 241// all of the indexes to be shifted. If an index is -1, then that
282 _ StructField = structField{} 242// indicates that the element does not exist in the associated slice.
283 _ Indirect = indirect{} 243//
284 _ Transform = transform{} 244// Key is guaranteed to return -1 if and only if the indexes returned
285 245// by SplitKeys are not the same. SplitKeys will never return -1 for
286 _ PathStep = sliceIndex{} 246// both indexes.
287 _ PathStep = mapIndex{} 247func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
288 _ PathStep = typeAssertion{} 248
289 _ PathStep = structField{} 249// MapIndex is an index operation on a map at some index Key.
290 _ PathStep = indirect{} 250type MapIndex struct{ *mapIndex }
291 _ PathStep = transform{} 251type mapIndex struct {
292) 252 pathStep
253 key reflect.Value
254}
255
256func (mi MapIndex) Type() reflect.Type { return mi.typ }
257func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
258func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
259
260// Key is the value of the map key.
261func (mi MapIndex) Key() reflect.Value { return mi.key }
262
263// Indirect represents pointer indirection on the parent type.
264type Indirect struct{ *indirect }
265type indirect struct {
266 pathStep
267}
268
269func (in Indirect) Type() reflect.Type { return in.typ }
270func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
271func (in Indirect) String() string { return "*" }
272
273// TypeAssertion represents a type assertion on an interface.
274type TypeAssertion struct{ *typeAssertion }
275type typeAssertion struct {
276 pathStep
277}
278
279func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
280func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
281func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
282
283// Transform is a transformation from the parent type to the current type.
284type Transform struct{ *transform }
285type transform struct {
286 pathStep
287 trans *transformer
288}
289
290func (tf Transform) Type() reflect.Type { return tf.typ }
291func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
292func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
293
294// Name is the name of the Transformer.
295func (tf Transform) Name() string { return tf.trans.name }
296
297// Func is the function pointer to the transformer function.
298func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
299
300// Option returns the originally constructed Transformer option.
301// The == operator can be used to detect the exact option used.
302func (tf Transform) Option() Option { return tf.trans }
293 303
294// isExported reports whether the identifier is exported. 304// isExported reports whether the identifier is exported.
295func isExported(id string) bool { 305func isExported(id string) bool {
296 r, _ := utf8.DecodeRuneInString(id) 306 r, _ := utf8.DecodeRuneInString(id)
297 return unicode.IsUpper(r) 307 return unicode.IsUpper(r)
298} 308}
299
300// isValid reports whether the identifier is valid.
301// Empty and underscore-only strings are not valid.
302func isValid(id string) bool {
303 ok := id != "" && id != "_"
304 for j, c := range id {
305 ok = ok && (j > 0 || !unicode.IsDigit(c))
306 ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c))
307 }
308 return ok
309}
diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go
new file mode 100644
index 0000000..6ddf299
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report.go
@@ -0,0 +1,51 @@
1// Copyright 2017, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5package cmp
6
7// defaultReporter implements the reporter interface.
8//
9// As Equal serially calls the PushStep, Report, and PopStep methods, the
10// defaultReporter constructs a tree-based representation of the compared value
11// and the result of each comparison (see valueNode).
12//
13// When the String method is called, the FormatDiff method transforms the
14// valueNode tree into a textNode tree, which is a tree-based representation
15// of the textual output (see textNode).
16//
17// Lastly, the textNode.String method produces the final report as a string.
18type defaultReporter struct {
19 root *valueNode
20 curr *valueNode
21}
22
23func (r *defaultReporter) PushStep(ps PathStep) {
24 r.curr = r.curr.PushStep(ps)
25 if r.root == nil {
26 r.root = r.curr
27 }
28}
29func (r *defaultReporter) Report(rs Result) {
30 r.curr.Report(rs)
31}
32func (r *defaultReporter) PopStep() {
33 r.curr = r.curr.PopStep()
34}
35
36// String provides a full report of the differences detected as a structured
37// literal in pseudo-Go syntax. String may only be called after the entire tree
38// has been traversed.
39func (r *defaultReporter) String() string {
40 assert(r.root != nil && r.curr == nil)
41 if r.root.NumDiff == 0 {
42 return ""
43 }
44 return formatOptions{}.FormatDiff(r.root).String()
45}
46
47func assert(ok bool) {
48 if !ok {
49 panic("assertion failure")
50 }
51}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
new file mode 100644
index 0000000..05efb99
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go
@@ -0,0 +1,296 @@
1// Copyright 2019, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5package cmp
6
7import (
8 "fmt"
9 "reflect"
10
11 "github.com/google/go-cmp/cmp/internal/value"
12)
13
14// TODO: Enforce limits?
15// * Enforce maximum number of records to print per node?
16// * Enforce maximum size in bytes allowed?
17// * As a heuristic, use less verbosity for equal nodes than unequal nodes.
18// TODO: Enforce unique outputs?
19// * Avoid Stringer methods if it results in same output?
20// * Print pointer address if outputs still equal?
21
22// numContextRecords is the number of surrounding equal records to print.
23const numContextRecords = 2
24
25type diffMode byte
26
27const (
28 diffUnknown diffMode = 0
29 diffIdentical diffMode = ' '
30 diffRemoved diffMode = '-'
31 diffInserted diffMode = '+'
32)
33
34type typeMode int
35
36const (
37 // emitType always prints the type.
38 emitType typeMode = iota
39 // elideType never prints the type.
40 elideType
41 // autoType prints the type only for composite kinds
42 // (i.e., structs, slices, arrays, and maps).
43 autoType
44)
45
46type formatOptions struct {
47 // DiffMode controls the output mode of FormatDiff.
48 //
49 // If diffUnknown, then produce a diff of the x and y values.
50 // If diffIdentical, then emit values as if they were equal.
51 // If diffRemoved, then only emit x values (ignoring y values).
52 // If diffInserted, then only emit y values (ignoring x values).
53 DiffMode diffMode
54
55 // TypeMode controls whether to print the type for the current node.
56 //
57 // As a general rule of thumb, we always print the type of the next node
58 // after an interface, and always elide the type of the next node after
59 // a slice or map node.
60 TypeMode typeMode
61
62 // formatValueOptions are options specific to printing reflect.Values.
63 formatValueOptions
64}
65
66func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
67 opts.DiffMode = d
68 return opts
69}
70func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
71 opts.TypeMode = t
72 return opts
73}
74
75// FormatDiff converts a valueNode tree into a textNode tree, where the later
76// is a textual representation of the differences detected in the former.
77func (opts formatOptions) FormatDiff(v *valueNode) textNode {
78 // Check whether we have specialized formatting for this node.
79 // This is not necessary, but helpful for producing more readable outputs.
80 if opts.CanFormatDiffSlice(v) {
81 return opts.FormatDiffSlice(v)
82 }
83
84 // For leaf nodes, format the value based on the reflect.Values alone.
85 if v.MaxDepth == 0 {
86 switch opts.DiffMode {
87 case diffUnknown, diffIdentical:
88 // Format Equal.
89 if v.NumDiff == 0 {
90 outx := opts.FormatValue(v.ValueX, visitedPointers{})
91 outy := opts.FormatValue(v.ValueY, visitedPointers{})
92 if v.NumIgnored > 0 && v.NumSame == 0 {
93 return textEllipsis
94 } else if outx.Len() < outy.Len() {
95 return outx
96 } else {
97 return outy
98 }
99 }
100
101 // Format unequal.
102 assert(opts.DiffMode == diffUnknown)
103 var list textList
104 outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{})
105 outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{})
106 if outx != nil {
107 list = append(list, textRecord{Diff: '-', Value: outx})
108 }
109 if outy != nil {
110 list = append(list, textRecord{Diff: '+', Value: outy})
111 }
112 return opts.WithTypeMode(emitType).FormatType(v.Type, list)
113 case diffRemoved:
114 return opts.FormatValue(v.ValueX, visitedPointers{})
115 case diffInserted:
116 return opts.FormatValue(v.ValueY, visitedPointers{})
117 default:
118 panic("invalid diff mode")
119 }
120 }
121
122 // Descend into the child value node.
123 if v.TransformerName != "" {
124 out := opts.WithTypeMode(emitType).FormatDiff(v.Value)
125 out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"}
126 return opts.FormatType(v.Type, out)
127 } else {
128 switch k := v.Type.Kind(); k {
129 case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map:
130 return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k))
131 case reflect.Ptr:
132 return textWrap{"&", opts.FormatDiff(v.Value), ""}
133 case reflect.Interface:
134 return opts.WithTypeMode(emitType).FormatDiff(v.Value)
135 default:
136 panic(fmt.Sprintf("%v cannot have children", k))
137 }
138 }
139}
140
141func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode {
142 // Derive record name based on the data structure kind.
143 var name string
144 var formatKey func(reflect.Value) string
145 switch k {
146 case reflect.Struct:
147 name = "field"
148 opts = opts.WithTypeMode(autoType)
149 formatKey = func(v reflect.Value) string { return v.String() }
150 case reflect.Slice, reflect.Array:
151 name = "element"
152 opts = opts.WithTypeMode(elideType)
153 formatKey = func(reflect.Value) string { return "" }
154 case reflect.Map:
155 name = "entry"
156 opts = opts.WithTypeMode(elideType)
157 formatKey = formatMapKey
158 }
159
160 // Handle unification.
161 switch opts.DiffMode {
162 case diffIdentical, diffRemoved, diffInserted:
163 var list textList
164 var deferredEllipsis bool // Add final "..." to indicate records were dropped
165 for _, r := range recs {
166 // Elide struct fields that are zero value.
167 if k == reflect.Struct {
168 var isZero bool
169 switch opts.DiffMode {
170 case diffIdentical:
171 isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueX)
172 case diffRemoved:
173 isZero = value.IsZero(r.Value.ValueX)
174 case diffInserted:
175 isZero = value.IsZero(r.Value.ValueY)
176 }
177 if isZero {
178 continue
179 }
180 }
181 // Elide ignored nodes.
182 if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
183 deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
184 if !deferredEllipsis {
185 list.AppendEllipsis(diffStats{})
186 }
187 continue
188 }
189 if out := opts.FormatDiff(r.Value); out != nil {
190 list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
191 }
192 }
193 if deferredEllipsis {
194 list.AppendEllipsis(diffStats{})
195 }
196 return textWrap{"{", list, "}"}
197 case diffUnknown:
198 default:
199 panic("invalid diff mode")
200 }
201
202 // Handle differencing.
203 var list textList
204 groups := coalesceAdjacentRecords(name, recs)
205 for i, ds := range groups {
206 // Handle equal records.
207 if ds.NumDiff() == 0 {
208 // Compute the number of leading and trailing records to print.
209 var numLo, numHi int
210 numEqual := ds.NumIgnored + ds.NumIdentical
211 for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
212 if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
213 break
214 }
215 numLo++
216 }
217 for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
218 if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
219 break
220 }
221 numHi++
222 }
223 if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
224 numHi++ // Avoid pointless coalescing of a single equal record
225 }
226
227 // Format the equal values.
228 for _, r := range recs[:numLo] {
229 out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value)
230 list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
231 }
232 if numEqual > numLo+numHi {
233 ds.NumIdentical -= numLo + numHi
234 list.AppendEllipsis(ds)
235 }
236 for _, r := range recs[numEqual-numHi : numEqual] {
237 out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value)
238 list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
239 }
240 recs = recs[numEqual:]
241 continue
242 }
243
244 // Handle unequal records.
245 for _, r := range recs[:ds.NumDiff()] {
246 switch {
247 case opts.CanFormatDiffSlice(r.Value):
248 out := opts.FormatDiffSlice(r.Value)
249 list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
250 case r.Value.NumChildren == r.Value.MaxDepth:
251 outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value)
252 outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value)
253 if outx != nil {
254 list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
255 }
256 if outy != nil {
257 list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
258 }
259 default:
260 out := opts.FormatDiff(r.Value)
261 list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
262 }
263 }
264 recs = recs[ds.NumDiff():]
265 }
266 assert(len(recs) == 0)
267 return textWrap{"{", list, "}"}
268}
269
270// coalesceAdjacentRecords coalesces the list of records into groups of
271// adjacent equal, or unequal counts.
272func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
273 var prevCase int // Arbitrary index into which case last occurred
274 lastStats := func(i int) *diffStats {
275 if prevCase != i {
276 groups = append(groups, diffStats{Name: name})
277 prevCase = i
278 }
279 return &groups[len(groups)-1]
280 }
281 for _, r := range recs {
282 switch rv := r.Value; {
283 case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
284 lastStats(1).NumIgnored++
285 case rv.NumDiff == 0:
286 lastStats(1).NumIdentical++
287 case rv.NumDiff > 0 && !rv.ValueY.IsValid():
288 lastStats(2).NumRemoved++
289 case rv.NumDiff > 0 && !rv.ValueX.IsValid():
290 lastStats(2).NumInserted++
291 default:
292 lastStats(2).NumModified++
293 }
294 }
295 return groups
296}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
new file mode 100644
index 0000000..5521c60
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
@@ -0,0 +1,279 @@
1// Copyright 2019, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5package cmp
6
7import (
8 "fmt"
9 "reflect"
10 "strconv"
11 "strings"
12 "unicode"
13
14 "github.com/google/go-cmp/cmp/internal/flags"
15 "github.com/google/go-cmp/cmp/internal/value"
16)
17
18type formatValueOptions struct {
19 // AvoidStringer controls whether to avoid calling custom stringer
20 // methods like error.Error or fmt.Stringer.String.
21 AvoidStringer bool
22
23 // ShallowPointers controls whether to avoid descending into pointers.
24 // Useful when printing map keys, where pointer comparison is performed
25 // on the pointer address rather than the pointed-at value.
26 ShallowPointers bool
27
28 // PrintAddresses controls whether to print the address of all pointers,
29 // slice elements, and maps.
30 PrintAddresses bool
31}
32
33// FormatType prints the type as if it were wrapping s.
34// This may return s as-is depending on the current type and TypeMode mode.
35func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
36 // Check whether to emit the type or not.
37 switch opts.TypeMode {
38 case autoType:
39 switch t.Kind() {
40 case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
41 if s.Equal(textNil) {
42 return s
43 }
44 default:
45 return s
46 }
47 case elideType:
48 return s
49 }
50
51 // Determine the type label, applying special handling for unnamed types.
52 typeName := t.String()
53 if t.Name() == "" {
54 // According to Go grammar, certain type literals contain symbols that
55 // do not strongly bind to the next lexicographical token (e.g., *T).
56 switch t.Kind() {
57 case reflect.Chan, reflect.Func, reflect.Ptr:
58 typeName = "(" + typeName + ")"
59 }
60 typeName = strings.Replace(typeName, "struct {", "struct{", -1)
61 typeName = strings.Replace(typeName, "interface {", "interface{", -1)
62 }
63
64 // Avoid wrap the value in parenthesis if unnecessary.
65 if s, ok := s.(textWrap); ok {
66 hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")")
67 hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}")
68 if hasParens || hasBraces {
69 return textWrap{typeName, s, ""}
70 }
71 }
72 return textWrap{typeName + "(", s, ")"}
73}
74
75// FormatValue prints the reflect.Value, taking extra care to avoid descending
76// into pointers already in m. As pointers are visited, m is also updated.
77func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) {
78 if !v.IsValid() {
79 return nil
80 }
81 t := v.Type()
82
83 // Check whether there is an Error or String method to call.
84 if !opts.AvoidStringer && v.CanInterface() {
85 // Avoid calling Error or String methods on nil receivers since many
86 // implementations crash when doing so.
87 if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
88 switch v := v.Interface().(type) {
89 case error:
90 return textLine("e" + formatString(v.Error()))
91 case fmt.Stringer:
92 return textLine("s" + formatString(v.String()))
93 }
94 }
95 }
96
97 // Check whether to explicitly wrap the result with the type.
98 var skipType bool
99 defer func() {
100 if !skipType {
101 out = opts.FormatType(t, out)
102 }
103 }()
104
105 var ptr string
106 switch t.Kind() {
107 case reflect.Bool:
108 return textLine(fmt.Sprint(v.Bool()))
109 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
110 return textLine(fmt.Sprint(v.Int()))
111 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
112 // Unnamed uints are usually bytes or words, so use hexadecimal.
113 if t.PkgPath() == "" || t.Kind() == reflect.Uintptr {
114 return textLine(formatHex(v.Uint()))
115 }
116 return textLine(fmt.Sprint(v.Uint()))
117 case reflect.Float32, reflect.Float64:
118 return textLine(fmt.Sprint(v.Float()))
119 case reflect.Complex64, reflect.Complex128:
120 return textLine(fmt.Sprint(v.Complex()))
121 case reflect.String:
122 return textLine(formatString(v.String()))
123 case reflect.UnsafePointer, reflect.Chan, reflect.Func:
124 return textLine(formatPointer(v))
125 case reflect.Struct:
126 var list textList
127 for i := 0; i < v.NumField(); i++ {
128 vv := v.Field(i)
129 if value.IsZero(vv) {
130 continue // Elide fields with zero values
131 }
132 s := opts.WithTypeMode(autoType).FormatValue(vv, m)
133 list = append(list, textRecord{Key: t.Field(i).Name, Value: s})
134 }
135 return textWrap{"{", list, "}"}
136 case reflect.Slice:
137 if v.IsNil() {
138 return textNil
139 }
140 if opts.PrintAddresses {
141 ptr = formatPointer(v)
142 }
143 fallthrough
144 case reflect.Array:
145 var list textList
146 for i := 0; i < v.Len(); i++ {
147 vi := v.Index(i)
148 if vi.CanAddr() { // Check for cyclic elements
149 p := vi.Addr()
150 if m.Visit(p) {
151 var out textNode
152 out = textLine(formatPointer(p))
153 out = opts.WithTypeMode(emitType).FormatType(p.Type(), out)
154 out = textWrap{"*", out, ""}
155 list = append(list, textRecord{Value: out})
156 continue
157 }
158 }
159 s := opts.WithTypeMode(elideType).FormatValue(vi, m)
160 list = append(list, textRecord{Value: s})
161 }
162 return textWrap{ptr + "{", list, "}"}
163 case reflect.Map:
164 if v.IsNil() {
165 return textNil
166 }
167 if m.Visit(v) {
168 return textLine(formatPointer(v))
169 }
170
171 var list textList
172 for _, k := range value.SortKeys(v.MapKeys()) {
173 sk := formatMapKey(k)
174 sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m)
175 list = append(list, textRecord{Key: sk, Value: sv})
176 }
177 if opts.PrintAddresses {
178 ptr = formatPointer(v)
179 }
180 return textWrap{ptr + "{", list, "}"}
181 case reflect.Ptr:
182 if v.IsNil() {
183 return textNil
184 }
185 if m.Visit(v) || opts.ShallowPointers {
186 return textLine(formatPointer(v))
187 }
188 if opts.PrintAddresses {
189 ptr = formatPointer(v)
190 }
191 skipType = true // Let the underlying value print the type instead
192 return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""}
193 case reflect.Interface:
194 if v.IsNil() {
195 return textNil
196 }
197 // Interfaces accept different concrete types,
198 // so configure the underlying value to explicitly print the type.
199 skipType = true // Print the concrete type instead
200 return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m)
201 default:
202 panic(fmt.Sprintf("%v kind not handled", v.Kind()))
203 }
204}
205
206// formatMapKey formats v as if it were a map key.
207// The result is guaranteed to be a single line.
208func formatMapKey(v reflect.Value) string {
209 var opts formatOptions
210 opts.TypeMode = elideType
211 opts.AvoidStringer = true
212 opts.ShallowPointers = true
213 s := opts.FormatValue(v, visitedPointers{}).String()
214 return strings.TrimSpace(s)
215}
216
217// formatString prints s as a double-quoted or backtick-quoted string.
218func formatString(s string) string {
219 // Use quoted string if it the same length as a raw string literal.
220 // Otherwise, attempt to use the raw string form.
221 qs := strconv.Quote(s)
222 if len(qs) == 1+len(s)+1 {
223 return qs
224 }
225
226 // Disallow newlines to ensure output is a single line.
227 // Only allow printable runes for readability purposes.
228 rawInvalid := func(r rune) bool {
229 return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
230 }
231 if strings.IndexFunc(s, rawInvalid) < 0 {
232 return "`" + s + "`"
233 }
234 return qs
235}
236
237// formatHex prints u as a hexadecimal integer in Go notation.
238func formatHex(u uint64) string {
239 var f string
240 switch {
241 case u <= 0xff:
242 f = "0x%02x"
243 case u <= 0xffff:
244 f = "0x%04x"
245 case u <= 0xffffff:
246 f = "0x%06x"
247 case u <= 0xffffffff:
248 f = "0x%08x"
249 case u <= 0xffffffffff:
250 f = "0x%010x"
251 case u <= 0xffffffffffff:
252 f = "0x%012x"
253 case u <= 0xffffffffffffff:
254 f = "0x%014x"
255 case u <= 0xffffffffffffffff:
256 f = "0x%016x"
257 }
258 return fmt.Sprintf(f, u)
259}
260
261// formatPointer prints the address of the pointer.
262func formatPointer(v reflect.Value) string {
263 p := v.Pointer()
264 if flags.Deterministic {
265 p = 0xdeadf00f // Only used for stable testing purposes
266 }
267 return fmt.Sprintf("⟪0x%x⟫", p)
268}
269
270type visitedPointers map[value.Pointer]struct{}
271
272// Visit inserts pointer v into the visited map and reports whether it had
273// already been visited before.
274func (m visitedPointers) Visit(v reflect.Value) bool {
275 p := value.PointerOf(v)
276 _, visited := m[p]
277 m[p] = struct{}{}
278 return visited
279}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
new file mode 100644
index 0000000..8cb3265
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go
@@ -0,0 +1,333 @@
1// Copyright 2019, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5package cmp
6
7import (
8 "bytes"
9 "fmt"
10 "reflect"
11 "strings"
12 "unicode"
13 "unicode/utf8"
14
15 "github.com/google/go-cmp/cmp/internal/diff"
16)
17
18// CanFormatDiffSlice reports whether we support custom formatting for nodes
19// that are slices of primitive kinds or strings.
20func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
21 switch {
22 case opts.DiffMode != diffUnknown:
23 return false // Must be formatting in diff mode
24 case v.NumDiff == 0:
25 return false // No differences detected
26 case v.NumIgnored+v.NumCompared+v.NumTransformed > 0:
27 // TODO: Handle the case where someone uses bytes.Equal on a large slice.
28 return false // Some custom option was used to determined equality
29 case !v.ValueX.IsValid() || !v.ValueY.IsValid():
30 return false // Both values must be valid
31 }
32
33 switch t := v.Type; t.Kind() {
34 case reflect.String:
35 case reflect.Array, reflect.Slice:
36 // Only slices of primitive types have specialized handling.
37 switch t.Elem().Kind() {
38 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
39 reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
40 reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
41 default:
42 return false
43 }
44
45 // If a sufficient number of elements already differ,
46 // use specialized formatting even if length requirement is not met.
47 if v.NumDiff > v.NumSame {
48 return true
49 }
50 default:
51 return false
52 }
53
54 // Use specialized string diffing for longer slices or strings.
55 const minLength = 64
56 return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength
57}
58
59// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
60// This provides custom-tailored logic to make printing of differences in
61// textual strings and slices of primitive kinds more readable.
62func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
63 assert(opts.DiffMode == diffUnknown)
64 t, vx, vy := v.Type, v.ValueX, v.ValueY
65
66 // Auto-detect the type of the data.
67 var isLinedText, isText, isBinary bool
68 var sx, sy string
69 switch {
70 case t.Kind() == reflect.String:
71 sx, sy = vx.String(), vy.String()
72 isText = true // Initial estimate, verify later
73 case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
74 sx, sy = string(vx.Bytes()), string(vy.Bytes())
75 isBinary = true // Initial estimate, verify later
76 case t.Kind() == reflect.Array:
77 // Arrays need to be addressable for slice operations to work.
78 vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
79 vx2.Set(vx)
80 vy2.Set(vy)
81 vx, vy = vx2, vy2
82 }
83 if isText || isBinary {
84 var numLines, lastLineIdx, maxLineLen int
85 isBinary = false
86 for i, r := range sx + sy {
87 if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError {
88 isBinary = true
89 break
90 }
91 if r == '\n' {
92 if maxLineLen < i-lastLineIdx {
93 lastLineIdx = i - lastLineIdx
94 }
95 lastLineIdx = i + 1
96 numLines++
97 }
98 }
99 isText = !isBinary
100 isLinedText = isText && numLines >= 4 && maxLineLen <= 256
101 }
102
103 // Format the string into printable records.
104 var list textList
105 var delim string
106 switch {
107 // If the text appears to be multi-lined text,
108 // then perform differencing across individual lines.
109 case isLinedText:
110 ssx := strings.Split(sx, "\n")
111 ssy := strings.Split(sy, "\n")
112 list = opts.formatDiffSlice(
113 reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
114 func(v reflect.Value, d diffMode) textRecord {
115 s := formatString(v.Index(0).String())
116 return textRecord{Diff: d, Value: textLine(s)}
117 },
118 )
119 delim = "\n"
120 // If the text appears to be single-lined text,
121 // then perform differencing in approximately fixed-sized chunks.
122 // The output is printed as quoted strings.
123 case isText:
124 list = opts.formatDiffSlice(
125 reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
126 func(v reflect.Value, d diffMode) textRecord {
127 s := formatString(v.String())
128 return textRecord{Diff: d, Value: textLine(s)}
129 },
130 )
131 delim = ""
132 // If the text appears to be binary data,
133 // then perform differencing in approximately fixed-sized chunks.
134 // The output is inspired by hexdump.
135 case isBinary:
136 list = opts.formatDiffSlice(
137 reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
138 func(v reflect.Value, d diffMode) textRecord {
139 var ss []string
140 for i := 0; i < v.Len(); i++ {
141 ss = append(ss, formatHex(v.Index(i).Uint()))
142 }
143 s := strings.Join(ss, ", ")
144 comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
145 return textRecord{Diff: d, Value: textLine(s), Comment: comment}
146 },
147 )
148 // For all other slices of primitive types,
149 // then perform differencing in approximately fixed-sized chunks.
150 // The size of each chunk depends on the width of the element kind.
151 default:
152 var chunkSize int
153 if t.Elem().Kind() == reflect.Bool {
154 chunkSize = 16
155 } else {
156 switch t.Elem().Bits() {
157 case 8:
158 chunkSize = 16
159 case 16:
160 chunkSize = 12
161 case 32:
162 chunkSize = 8
163 default:
164 chunkSize = 8
165 }
166 }
167 list = opts.formatDiffSlice(
168 vx, vy, chunkSize, t.Elem().Kind().String(),
169 func(v reflect.Value, d diffMode) textRecord {
170 var ss []string
171 for i := 0; i < v.Len(); i++ {
172 switch t.Elem().Kind() {
173 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
174 ss = append(ss, fmt.Sprint(v.Index(i).Int()))
175 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
176 ss = append(ss, formatHex(v.Index(i).Uint()))
177 case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
178 ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
179 }
180 }
181 s := strings.Join(ss, ", ")
182 return textRecord{Diff: d, Value: textLine(s)}
183 },
184 )
185 }
186
187 // Wrap the output with appropriate type information.
188 var out textNode = textWrap{"{", list, "}"}
189 if !isText {
190 // The "{...}" byte-sequence literal is not valid Go syntax for strings.
191 // Emit the type for extra clarity (e.g. "string{...}").
192 if t.Kind() == reflect.String {
193 opts = opts.WithTypeMode(emitType)
194 }
195 return opts.FormatType(t, out)
196 }
197 switch t.Kind() {
198 case reflect.String:
199 out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)}
200 if t != reflect.TypeOf(string("")) {
201 out = opts.FormatType(t, out)
202 }
203 case reflect.Slice:
204 out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)}
205 if t != reflect.TypeOf([]byte(nil)) {
206 out = opts.FormatType(t, out)
207 }
208 }
209 return out
210}
211
212// formatASCII formats s as an ASCII string.
213// This is useful for printing binary strings in a semi-legible way.
214func formatASCII(s string) string {
215 b := bytes.Repeat([]byte{'.'}, len(s))
216 for i := 0; i < len(s); i++ {
217 if ' ' <= s[i] && s[i] <= '~' {
218 b[i] = s[i]
219 }
220 }
221 return string(b)
222}
223
224func (opts formatOptions) formatDiffSlice(
225 vx, vy reflect.Value, chunkSize int, name string,
226 makeRec func(reflect.Value, diffMode) textRecord,
227) (list textList) {
228 es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result {
229 return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface())
230 })
231
232 appendChunks := func(v reflect.Value, d diffMode) int {
233 n0 := v.Len()
234 for v.Len() > 0 {
235 n := chunkSize
236 if n > v.Len() {
237 n = v.Len()
238 }
239 list = append(list, makeRec(v.Slice(0, n), d))
240 v = v.Slice(n, v.Len())
241 }
242 return n0 - v.Len()
243 }
244
245 groups := coalesceAdjacentEdits(name, es)
246 groups = coalesceInterveningIdentical(groups, chunkSize/4)
247 for i, ds := range groups {
248 // Print equal.
249 if ds.NumDiff() == 0 {
250 // Compute the number of leading and trailing equal bytes to print.
251 var numLo, numHi int
252 numEqual := ds.NumIgnored + ds.NumIdentical
253 for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
254 numLo++
255 }
256 for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
257 numHi++
258 }
259 if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
260 numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
261 }
262
263 // Print the equal bytes.
264 appendChunks(vx.Slice(0, numLo), diffIdentical)
265 if numEqual > numLo+numHi {
266 ds.NumIdentical -= numLo + numHi
267 list.AppendEllipsis(ds)
268 }
269 appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
270 vx = vx.Slice(numEqual, vx.Len())
271 vy = vy.Slice(numEqual, vy.Len())
272 continue
273 }
274
275 // Print unequal.
276 nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
277 vx = vx.Slice(nx, vx.Len())
278 ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
279 vy = vy.Slice(ny, vy.Len())
280 }
281 assert(vx.Len() == 0 && vy.Len() == 0)
282 return list
283}
284
285// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
286// equal or unequal counts.
287func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
288 var prevCase int // Arbitrary index into which case last occurred
289 lastStats := func(i int) *diffStats {
290 if prevCase != i {
291 groups = append(groups, diffStats{Name: name})
292 prevCase = i
293 }
294 return &groups[len(groups)-1]
295 }
296 for _, e := range es {
297 switch e {
298 case diff.Identity:
299 lastStats(1).NumIdentical++
300 case diff.UniqueX:
301 lastStats(2).NumRemoved++
302 case diff.UniqueY:
303 lastStats(2).NumInserted++
304 case diff.Modified:
305 lastStats(2).NumModified++
306 }
307 }
308 return groups
309}
310
311// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
312// equal groups into adjacent unequal groups that currently result in a
313// dual inserted/removed printout. This acts as a high-pass filter to smooth
314// out high-frequency changes within the windowSize.
315func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
316 groups, groupsOrig := groups[:0], groups
317 for i, ds := range groupsOrig {
318 if len(groups) >= 2 && ds.NumDiff() > 0 {
319 prev := &groups[len(groups)-2] // Unequal group
320 curr := &groups[len(groups)-1] // Equal group
321 next := &groupsOrig[i] // Unequal group
322 hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
323 hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
324 if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
325 *prev = (*prev).Append(*curr).Append(*next)
326 groups = groups[:len(groups)-1] // Truncate off equal group
327 continue
328 }
329 }
330 groups = append(groups, ds)
331 }
332 return groups
333}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
new file mode 100644
index 0000000..80605d0
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_text.go
@@ -0,0 +1,382 @@
1// Copyright 2019, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5package cmp
6
7import (
8 "bytes"
9 "fmt"
10 "math/rand"
11 "strings"
12 "time"
13
14 "github.com/google/go-cmp/cmp/internal/flags"
15)
16
17var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
18
19type indentMode int
20
21func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
22 if flags.Deterministic || randBool {
23 // Use regular spaces (U+0020).
24 switch d {
25 case diffUnknown, diffIdentical:
26 b = append(b, " "...)
27 case diffRemoved:
28 b = append(b, "- "...)
29 case diffInserted:
30 b = append(b, "+ "...)
31 }
32 } else {
33 // Use non-breaking spaces (U+00a0).
34 switch d {
35 case diffUnknown, diffIdentical:
36 b = append(b, "  "...)
37 case diffRemoved:
38 b = append(b, "- "...)
39 case diffInserted:
40 b = append(b, "+ "...)
41 }
42 }
43 return repeatCount(n).appendChar(b, '\t')
44}
45
46type repeatCount int
47
48func (n repeatCount) appendChar(b []byte, c byte) []byte {
49 for ; n > 0; n-- {
50 b = append(b, c)
51 }
52 return b
53}
54
55// textNode is a simplified tree-based representation of structured text.
56// Possible node types are textWrap, textList, or textLine.
57type textNode interface {
58 // Len reports the length in bytes of a single-line version of the tree.
59 // Nested textRecord.Diff and textRecord.Comment fields are ignored.
60 Len() int
61 // Equal reports whether the two trees are structurally identical.
62 // Nested textRecord.Diff and textRecord.Comment fields are compared.
63 Equal(textNode) bool
64 // String returns the string representation of the text tree.
65 // It is not guaranteed that len(x.String()) == x.Len(),
66 // nor that x.String() == y.String() implies that x.Equal(y).
67 String() string
68
69 // formatCompactTo formats the contents of the tree as a single-line string
70 // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
71 // fields are ignored.
72 //
73 // However, not all nodes in the tree should be collapsed as a single-line.
74 // If a node can be collapsed as a single-line, it is replaced by a textLine
75 // node. Since the top-level node cannot replace itself, this also returns
76 // the current node itself.
77 //
78 // This does not mutate the receiver.
79 formatCompactTo([]byte, diffMode) ([]byte, textNode)
80 // formatExpandedTo formats the contents of the tree as a multi-line string
81 // to the provided buffer. In order for column alignment to operate well,
82 // formatCompactTo must be called before calling formatExpandedTo.
83 formatExpandedTo([]byte, diffMode, indentMode) []byte
84}
85
86// textWrap is a wrapper that concatenates a prefix and/or a suffix
87// to the underlying node.
88type textWrap struct {
89 Prefix string // e.g., "bytes.Buffer{"
90 Value textNode // textWrap | textList | textLine
91 Suffix string // e.g., "}"
92}
93
94func (s textWrap) Len() int {
95 return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
96}
97func (s1 textWrap) Equal(s2 textNode) bool {
98 if s2, ok := s2.(textWrap); ok {
99 return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
100 }
101 return false
102}
103func (s textWrap) String() string {
104 var d diffMode
105 var n indentMode
106 _, s2 := s.formatCompactTo(nil, d)
107 b := n.appendIndent(nil, d) // Leading indent
108 b = s2.formatExpandedTo(b, d, n) // Main body
109 b = append(b, '\n') // Trailing newline
110 return string(b)
111}
112func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
113 n0 := len(b) // Original buffer length
114 b = append(b, s.Prefix...)
115 b, s.Value = s.Value.formatCompactTo(b, d)
116 b = append(b, s.Suffix...)
117 if _, ok := s.Value.(textLine); ok {
118 return b, textLine(b[n0:])
119 }
120 return b, s
121}
122func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
123 b = append(b, s.Prefix...)
124 b = s.Value.formatExpandedTo(b, d, n)
125 b = append(b, s.Suffix...)
126 return b
127}
128
129// textList is a comma-separated list of textWrap or textLine nodes.
130// The list may be formatted as multi-lines or single-line at the discretion
131// of the textList.formatCompactTo method.
132type textList []textRecord
133type textRecord struct {
134 Diff diffMode // e.g., 0 or '-' or '+'
135 Key string // e.g., "MyField"
136 Value textNode // textWrap | textLine
137 Comment fmt.Stringer // e.g., "6 identical fields"
138}
139
140// AppendEllipsis appends a new ellipsis node to the list if none already
141// exists at the end. If cs is non-zero it coalesces the statistics with the
142// previous diffStats.
143func (s *textList) AppendEllipsis(ds diffStats) {
144 hasStats := ds != diffStats{}
145 if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
146 if hasStats {
147 *s = append(*s, textRecord{Value: textEllipsis, Comment: ds})
148 } else {
149 *s = append(*s, textRecord{Value: textEllipsis})
150 }
151 return
152 }
153 if hasStats {
154 (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
155 }
156}
157
158func (s textList) Len() (n int) {
159 for i, r := range s {
160 n += len(r.Key)
161 if r.Key != "" {
162 n += len(": ")
163 }
164 n += r.Value.Len()
165 if i < len(s)-1 {
166 n += len(", ")
167 }
168 }
169 return n
170}
171
172func (s1 textList) Equal(s2 textNode) bool {
173 if s2, ok := s2.(textList); ok {
174 if len(s1) != len(s2) {
175 return false
176 }
177 for i := range s1 {
178 r1, r2 := s1[i], s2[i]
179 if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
180 return false
181 }
182 }
183 return true
184 }
185 return false
186}
187
188func (s textList) String() string {
189 return textWrap{"{", s, "}"}.String()
190}
191
192func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
193 s = append(textList(nil), s...) // Avoid mutating original
194
195 // Determine whether we can collapse this list as a single line.
196 n0 := len(b) // Original buffer length
197 var multiLine bool
198 for i, r := range s {
199 if r.Diff == diffInserted || r.Diff == diffRemoved {
200 multiLine = true
201 }
202 b = append(b, r.Key...)
203 if r.Key != "" {
204 b = append(b, ": "...)
205 }
206 b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
207 if _, ok := s[i].Value.(textLine); !ok {
208 multiLine = true
209 }
210 if r.Comment != nil {
211 multiLine = true
212 }
213 if i < len(s)-1 {
214 b = append(b, ", "...)
215 }
216 }
217 // Force multi-lined output when printing a removed/inserted node that
218 // is sufficiently long.
219 if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 {
220 multiLine = true
221 }
222 if !multiLine {
223 return b, textLine(b[n0:])
224 }
225 return b, s
226}
227
228func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
229 alignKeyLens := s.alignLens(
230 func(r textRecord) bool {
231 _, isLine := r.Value.(textLine)
232 return r.Key == "" || !isLine
233 },
234 func(r textRecord) int { return len(r.Key) },
235 )
236 alignValueLens := s.alignLens(
237 func(r textRecord) bool {
238 _, isLine := r.Value.(textLine)
239 return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
240 },
241 func(r textRecord) int { return len(r.Value.(textLine)) },
242 )
243
244 // Format the list as a multi-lined output.
245 n++
246 for i, r := range s {
247 b = n.appendIndent(append(b, '\n'), d|r.Diff)
248 if r.Key != "" {
249 b = append(b, r.Key+": "...)
250 }
251 b = alignKeyLens[i].appendChar(b, ' ')
252
253 b = r.Value.formatExpandedTo(b, d|r.Diff, n)
254 if !r.Value.Equal(textEllipsis) {
255 b = append(b, ',')
256 }
257 b = alignValueLens[i].appendChar(b, ' ')
258
259 if r.Comment != nil {
260 b = append(b, " // "+r.Comment.String()...)
261 }
262 }
263 n--
264
265 return n.appendIndent(append(b, '\n'), d)
266}
267
268func (s textList) alignLens(
269 skipFunc func(textRecord) bool,
270 lenFunc func(textRecord) int,
271) []repeatCount {
272 var startIdx, endIdx, maxLen int
273 lens := make([]repeatCount, len(s))
274 for i, r := range s {
275 if skipFunc(r) {
276 for j := startIdx; j < endIdx && j < len(s); j++ {
277 lens[j] = repeatCount(maxLen - lenFunc(s[j]))
278 }
279 startIdx, endIdx, maxLen = i+1, i+1, 0
280 } else {
281 if maxLen < lenFunc(r) {
282 maxLen = lenFunc(r)
283 }
284 endIdx = i + 1
285 }
286 }
287 for j := startIdx; j < endIdx && j < len(s); j++ {
288 lens[j] = repeatCount(maxLen - lenFunc(s[j]))
289 }
290 return lens
291}
292
293// textLine is a single-line segment of text and is always a leaf node
294// in the textNode tree.
295type textLine []byte
296
297var (
298 textNil = textLine("nil")
299 textEllipsis = textLine("...")
300)
301
302func (s textLine) Len() int {
303 return len(s)
304}
305func (s1 textLine) Equal(s2 textNode) bool {
306 if s2, ok := s2.(textLine); ok {
307 return bytes.Equal([]byte(s1), []byte(s2))
308 }
309 return false
310}
311func (s textLine) String() string {
312 return string(s)
313}
314func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
315 return append(b, s...), s
316}
317func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
318 return append(b, s...)
319}
320
321type diffStats struct {
322 Name string
323 NumIgnored int
324 NumIdentical int
325 NumRemoved int
326 NumInserted int
327 NumModified int
328}
329
330func (s diffStats) NumDiff() int {
331 return s.NumRemoved + s.NumInserted + s.NumModified
332}
333
334func (s diffStats) Append(ds diffStats) diffStats {
335 assert(s.Name == ds.Name)
336 s.NumIgnored += ds.NumIgnored
337 s.NumIdentical += ds.NumIdentical
338 s.NumRemoved += ds.NumRemoved
339 s.NumInserted += ds.NumInserted
340 s.NumModified += ds.NumModified
341 return s
342}
343
344// String prints a humanly-readable summary of coalesced records.
345//
346// Example:
347// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
348func (s diffStats) String() string {
349 var ss []string
350 var sum int
351 labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
352 counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
353 for i, n := range counts {
354 if n > 0 {
355 ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
356 }
357 sum += n
358 }
359
360 // Pluralize the name (adjusting for some obscure English grammar rules).
361 name := s.Name
362 if sum > 1 {
363 name = name + "s"
364 if strings.HasSuffix(name, "ys") {
365 name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
366 }
367 }
368
369 // Format the list according to English grammar (with Oxford comma).
370 switch n := len(ss); n {
371 case 0:
372 return ""
373 case 1, 2:
374 return strings.Join(ss, " and ") + " " + name
375 default:
376 return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
377 }
378}
379
380type commentString string
381
382func (s commentString) String() string { return string(s) }
diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go
new file mode 100644
index 0000000..83031a7
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_value.go
@@ -0,0 +1,121 @@
1// Copyright 2019, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5package cmp
6
7import "reflect"
8
9// valueNode represents a single node within a report, which is a
10// structured representation of the value tree, containing information
11// regarding which nodes are equal or not.
12type valueNode struct {
13 parent *valueNode
14
15 Type reflect.Type
16 ValueX reflect.Value
17 ValueY reflect.Value
18
19 // NumSame is the number of leaf nodes that are equal.
20 // All descendants are equal only if NumDiff is 0.
21 NumSame int
22 // NumDiff is the number of leaf nodes that are not equal.
23 NumDiff int
24 // NumIgnored is the number of leaf nodes that are ignored.
25 NumIgnored int
26 // NumCompared is the number of leaf nodes that were compared
27 // using an Equal method or Comparer function.
28 NumCompared int
29 // NumTransformed is the number of non-leaf nodes that were transformed.
30 NumTransformed int
31 // NumChildren is the number of transitive descendants of this node.
32 // This counts from zero; thus, leaf nodes have no descendants.
33 NumChildren int
34 // MaxDepth is the maximum depth of the tree. This counts from zero;
35 // thus, leaf nodes have a depth of zero.
36 MaxDepth int
37
38 // Records is a list of struct fields, slice elements, or map entries.
39 Records []reportRecord // If populated, implies Value is not populated
40
41 // Value is the result of a transformation, pointer indirect, of
42 // type assertion.
43 Value *valueNode // If populated, implies Records is not populated
44
45 // TransformerName is the name of the transformer.
46 TransformerName string // If non-empty, implies Value is populated
47}
48type reportRecord struct {
49 Key reflect.Value // Invalid for slice element
50 Value *valueNode
51}
52
53func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
54 vx, vy := ps.Values()
55 child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
56 switch s := ps.(type) {
57 case StructField:
58 assert(parent.Value == nil)
59 parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
60 case SliceIndex:
61 assert(parent.Value == nil)
62 parent.Records = append(parent.Records, reportRecord{Value: child})
63 case MapIndex:
64 assert(parent.Value == nil)
65 parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
66 case Indirect:
67 assert(parent.Value == nil && parent.Records == nil)
68 parent.Value = child
69 case TypeAssertion:
70 assert(parent.Value == nil && parent.Records == nil)
71 parent.Value = child
72 case Transform:
73 assert(parent.Value == nil && parent.Records == nil)
74 parent.Value = child
75 parent.TransformerName = s.Name()
76 parent.NumTransformed++
77 default:
78 assert(parent == nil) // Must be the root step
79 }
80 return child
81}
82
83func (r *valueNode) Report(rs Result) {
84 assert(r.MaxDepth == 0) // May only be called on leaf nodes
85
86 if rs.ByIgnore() {
87 r.NumIgnored++
88 } else {
89 if rs.Equal() {
90 r.NumSame++
91 } else {
92 r.NumDiff++
93 }
94 }
95 assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
96
97 if rs.ByMethod() {
98 r.NumCompared++
99 }
100 if rs.ByFunc() {
101 r.NumCompared++
102 }
103 assert(r.NumCompared <= 1)
104}
105
106func (child *valueNode) PopStep() (parent *valueNode) {
107 if child.parent == nil {
108 return nil
109 }
110 parent = child.parent
111 parent.NumSame += child.NumSame
112 parent.NumDiff += child.NumDiff
113 parent.NumIgnored += child.NumIgnored
114 parent.NumCompared += child.NumCompared
115 parent.NumTransformed += child.NumTransformed
116 parent.NumChildren += child.NumChildren + 1
117 if parent.MaxDepth < child.MaxDepth+1 {
118 parent.MaxDepth = child.MaxDepth + 1
119 }
120 return parent
121}
diff --git a/vendor/github.com/google/go-cmp/cmp/reporter.go b/vendor/github.com/google/go-cmp/cmp/reporter.go
deleted file mode 100644
index 20e9f18..0000000
--- a/vendor/github.com/google/go-cmp/cmp/reporter.go
+++ /dev/null
@@ -1,53 +0,0 @@
1// Copyright 2017, The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE.md file.
4
5package cmp
6
7import (
8 "fmt"
9 "reflect"
10 "strings"
11
12 "github.com/google/go-cmp/cmp/internal/value"
13)
14
15type defaultReporter struct {
16 Option
17 diffs []string // List of differences, possibly truncated
18 ndiffs int // Total number of differences
19 nbytes int // Number of bytes in diffs
20 nlines int // Number of lines in diffs
21}
22
23var _ reporter = (*defaultReporter)(nil)
24
25func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) {
26 if eq {
27 return // Ignore equal results
28 }
29 const maxBytes = 4096
30 const maxLines = 256
31 r.ndiffs++
32 if r.nbytes < maxBytes && r.nlines < maxLines {
33 sx := value.Format(x, value.FormatConfig{UseStringer: true})
34 sy := value.Format(y, value.FormatConfig{UseStringer: true})
35 if sx == sy {
36 // Unhelpful output, so use more exact formatting.
37 sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true})
38 sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true})
39 }
40 s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy)
41 r.diffs = append(r.diffs, s)
42 r.nbytes += len(s)
43 r.nlines += strings.Count(s, "\n")
44 }
45}
46
47func (r *defaultReporter) String() string {
48 s := strings.Join(r.diffs, "")
49 if r.ndiffs == len(r.diffs) {
50 return s
51 }
52 return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs))
53}
diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE
new file mode 100644
index 0000000..ae121a1
--- /dev/null
+++ b/vendor/github.com/google/go-querystring/LICENSE
@@ -0,0 +1,27 @@
1Copyright (c) 2013 Google. All rights reserved.
2
3Redistribution and use in source and binary forms, with or without
4modification, are permitted provided that the following conditions are
5met:
6
7 * Redistributions of source code must retain the above copyright
8notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above
10copyright notice, this list of conditions and the following disclaimer
11in the documentation and/or other materials provided with the
12distribution.
13 * Neither the name of Google Inc. nor the names of its
14contributors may be used to endorse or promote products derived from
15this software without specific prior written permission.
16
17THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go
new file mode 100644
index 0000000..37080b1
--- /dev/null
+++ b/vendor/github.com/google/go-querystring/query/encode.go
@@ -0,0 +1,320 @@
1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package query implements encoding of structs into URL query parameters.
6//
7// As a simple example:
8//
9// type Options struct {
10// Query string `url:"q"`
11// ShowAll bool `url:"all"`
12// Page int `url:"page"`
13// }
14//
15// opt := Options{ "foo", true, 2 }
16// v, _ := query.Values(opt)
17// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2"
18//
19// The exact mapping between Go values and url.Values is described in the
20// documentation for the Values() function.
21package query
22
23import (
24 "bytes"
25 "fmt"
26 "net/url"
27 "reflect"
28 "strconv"
29 "strings"
30 "time"
31)
32
33var timeType = reflect.TypeOf(time.Time{})
34
35var encoderType = reflect.TypeOf(new(Encoder)).Elem()
36
37// Encoder is an interface implemented by any type that wishes to encode
38// itself into URL values in a non-standard way.
39type Encoder interface {
40 EncodeValues(key string, v *url.Values) error
41}
42
43// Values returns the url.Values encoding of v.
44//
45// Values expects to be passed a struct, and traverses it recursively using the
46// following encoding rules.
47//
48// Each exported struct field is encoded as a URL parameter unless
49//
50// - the field's tag is "-", or
51// - the field is empty and its tag specifies the "omitempty" option
52//
53// The empty values are false, 0, any nil pointer or interface value, any array
54// slice, map, or string of length zero, and any time.Time that returns true
55// for IsZero().
56//
57// The URL parameter name defaults to the struct field name but can be
58// specified in the struct field's tag value. The "url" key in the struct
59// field's tag value is the key name, followed by an optional comma and
60// options. For example:
61//
62// // Field is ignored by this package.
63// Field int `url:"-"`
64//
65// // Field appears as URL parameter "myName".
66// Field int `url:"myName"`
67//
68// // Field appears as URL parameter "myName" and the field is omitted if
69// // its value is empty
70// Field int `url:"myName,omitempty"`
71//
72// // Field appears as URL parameter "Field" (the default), but the field
73// // is skipped if empty. Note the leading comma.
74// Field int `url:",omitempty"`
75//
76// For encoding individual field values, the following type-dependent rules
77// apply:
78//
79// Boolean values default to encoding as the strings "true" or "false".
80// Including the "int" option signals that the field should be encoded as the
81// strings "1" or "0".
82//
83// time.Time values default to encoding as RFC3339 timestamps. Including the
84// "unix" option signals that the field should be encoded as a Unix time (see
85// time.Unix())
86//
87// Slice and Array values default to encoding as multiple URL values of the
88// same name. Including the "comma" option signals that the field should be
89// encoded as a single comma-delimited value. Including the "space" option
90// similarly encodes the value as a single space-delimited string. Including
91// the "semicolon" option will encode the value as a semicolon-delimited string.
92// Including the "brackets" option signals that the multiple URL values should
93// have "[]" appended to the value name. "numbered" will append a number to
94// the end of each incidence of the value name, example:
95// name0=value0&name1=value1, etc.
96//
97// Anonymous struct fields are usually encoded as if their inner exported
98// fields were fields in the outer struct, subject to the standard Go
99// visibility rules. An anonymous struct field with a name given in its URL
100// tag is treated as having that name, rather than being anonymous.
101//
102// Non-nil pointer values are encoded as the value pointed to.
103//
104// Nested structs are encoded including parent fields in value names for
105// scoping. e.g:
106//
107// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO"
108//
109// All other values are encoded using their default string representation.
110//
111// Multiple fields that encode to the same URL parameter name will be included
112// as multiple URL values of the same name.
113func Values(v interface{}) (url.Values, error) {
114 values := make(url.Values)
115 val := reflect.ValueOf(v)
116 for val.Kind() == reflect.Ptr {
117 if val.IsNil() {
118 return values, nil
119 }
120 val = val.Elem()
121 }
122
123 if v == nil {
124 return values, nil
125 }
126
127 if val.Kind() != reflect.Struct {
128 return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind())
129 }
130
131 err := reflectValue(values, val, "")
132 return values, err
133}
134
135// reflectValue populates the values parameter from the struct fields in val.
136// Embedded structs are followed recursively (using the rules defined in the
137// Values function documentation) breadth-first.
138func reflectValue(values url.Values, val reflect.Value, scope string) error {
139 var embedded []reflect.Value
140
141 typ := val.Type()
142 for i := 0; i < typ.NumField(); i++ {
143 sf := typ.Field(i)
144 if sf.PkgPath != "" && !sf.Anonymous { // unexported
145 continue
146 }
147
148 sv := val.Field(i)
149 tag := sf.Tag.Get("url")
150 if tag == "-" {
151 continue
152 }
153 name, opts := parseTag(tag)
154 if name == "" {
155 if sf.Anonymous && sv.Kind() == reflect.Struct {
156 // save embedded struct for later processing
157 embedded = append(embedded, sv)
158 continue
159 }
160
161 name = sf.Name
162 }
163
164 if scope != "" {
165 name = scope + "[" + name + "]"
166 }
167
168 if opts.Contains("omitempty") && isEmptyValue(sv) {
169 continue
170 }
171
172 if sv.Type().Implements(encoderType) {
173 if !reflect.Indirect(sv).IsValid() {
174 sv = reflect.New(sv.Type().Elem())
175 }
176
177 m := sv.Interface().(Encoder)
178 if err := m.EncodeValues(name, &values); err != nil {
179 return err
180 }
181 continue
182 }
183
184 if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array {
185 var del byte
186 if opts.Contains("comma") {
187 del = ','
188 } else if opts.Contains("space") {
189 del = ' '
190 } else if opts.Contains("semicolon") {
191 del = ';'
192 } else if opts.Contains("brackets") {
193 name = name + "[]"
194 }
195
196 if del != 0 {
197 s := new(bytes.Buffer)
198 first := true
199 for i := 0; i < sv.Len(); i++ {
200 if first {
201 first = false
202 } else {
203 s.WriteByte(del)
204 }
205 s.WriteString(valueString(sv.Index(i), opts))
206 }
207 values.Add(name, s.String())
208 } else {
209 for i := 0; i < sv.Len(); i++ {
210 k := name
211 if opts.Contains("numbered") {
212 k = fmt.Sprintf("%s%d", name, i)
213 }
214 values.Add(k, valueString(sv.Index(i), opts))
215 }
216 }
217 continue
218 }
219
220 for sv.Kind() == reflect.Ptr {
221 if sv.IsNil() {
222 break
223 }
224 sv = sv.Elem()
225 }
226
227 if sv.Type() == timeType {
228 values.Add(name, valueString(sv, opts))
229 continue
230 }
231
232 if sv.Kind() == reflect.Struct {
233 reflectValue(values, sv, name)
234 continue
235 }
236
237 values.Add(name, valueString(sv, opts))
238 }
239
240 for _, f := range embedded {
241 if err := reflectValue(values, f, scope); err != nil {
242 return err
243 }
244 }
245
246 return nil
247}
248
249// valueString returns the string representation of a value.
250func valueString(v reflect.Value, opts tagOptions) string {
251 for v.Kind() == reflect.Ptr {
252 if v.IsNil() {
253 return ""
254 }
255 v = v.Elem()
256 }
257
258 if v.Kind() == reflect.Bool && opts.Contains("int") {
259 if v.Bool() {
260 return "1"
261 }
262 return "0"
263 }
264
265 if v.Type() == timeType {
266 t := v.Interface().(time.Time)
267 if opts.Contains("unix") {
268 return strconv.FormatInt(t.Unix(), 10)
269 }
270 return t.Format(time.RFC3339)
271 }
272
273 return fmt.Sprint(v.Interface())
274}
275
276// isEmptyValue checks if a value should be considered empty for the purposes
277// of omitting fields with the "omitempty" option.
278func isEmptyValue(v reflect.Value) bool {
279 switch v.Kind() {
280 case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
281 return v.Len() == 0
282 case reflect.Bool:
283 return !v.Bool()
284 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
285 return v.Int() == 0
286 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
287 return v.Uint() == 0
288 case reflect.Float32, reflect.Float64:
289 return v.Float() == 0
290 case reflect.Interface, reflect.Ptr:
291 return v.IsNil()
292 }
293
294 if v.Type() == timeType {
295 return v.Interface().(time.Time).IsZero()
296 }
297
298 return false
299}
300
301// tagOptions is the string following a comma in a struct field's "url" tag, or
302// the empty string. It does not include the leading comma.
303type tagOptions []string
304
305// parseTag splits a struct field's url tag into its name and comma-separated
306// options.
307func parseTag(tag string) (string, tagOptions) {
308 s := strings.Split(tag, ",")
309 return s[0], s[1:]
310}
311
312// Contains checks whether the tagOptions contains the specified option.
313func (o tagOptions) Contains(option string) bool {
314 for _, s := range o {
315 if s == option {
316 return true
317 }
318 }
319 return false
320}
diff --git a/vendor/github.com/hashicorp/go-getter/checksum.go b/vendor/github.com/hashicorp/go-getter/checksum.go
index bea7ed1..eeccfea 100644
--- a/vendor/github.com/hashicorp/go-getter/checksum.go
+++ b/vendor/github.com/hashicorp/go-getter/checksum.go
@@ -19,8 +19,8 @@ import (
19 urlhelper "github.com/hashicorp/go-getter/helper/url" 19 urlhelper "github.com/hashicorp/go-getter/helper/url"
20) 20)
21 21
22// fileChecksum helps verifying the checksum for a file. 22// FileChecksum helps verifying the checksum for a file.
23type fileChecksum struct { 23type FileChecksum struct {
24 Type string 24 Type string
25 Hash hash.Hash 25 Hash hash.Hash
26 Value []byte 26 Value []byte
@@ -50,7 +50,7 @@ func (cerr *ChecksumError) Error() string {
50 50
51// checksum is a simple method to compute the checksum of a source file 51// checksum is a simple method to compute the checksum of a source file
52// and compare it to the given expected value. 52// and compare it to the given expected value.
53func (c *fileChecksum) checksum(source string) error { 53func (c *FileChecksum) checksum(source string) error {
54 f, err := os.Open(source) 54 f, err := os.Open(source)
55 if err != nil { 55 if err != nil {
56 return fmt.Errorf("Failed to open file for checksum: %s", err) 56 return fmt.Errorf("Failed to open file for checksum: %s", err)
@@ -74,7 +74,7 @@ func (c *fileChecksum) checksum(source string) error {
74 return nil 74 return nil
75} 75}
76 76
77// extractChecksum will return a fileChecksum based on the 'checksum' 77// extractChecksum will return a FileChecksum based on the 'checksum'
78// parameter of u. 78// parameter of u.
79// ex: 79// ex:
80// http://hashicorp.com/terraform?checksum=<checksumValue> 80// http://hashicorp.com/terraform?checksum=<checksumValue>
@@ -93,7 +93,7 @@ func (c *fileChecksum) checksum(source string) error {
93// <checksum> *file2 93// <checksum> *file2
94// 94//
95// see parseChecksumLine for more detail on checksum file parsing 95// see parseChecksumLine for more detail on checksum file parsing
96func (c *Client) extractChecksum(u *url.URL) (*fileChecksum, error) { 96func (c *Client) extractChecksum(u *url.URL) (*FileChecksum, error) {
97 q := u.Query() 97 q := u.Query()
98 v := q.Get("checksum") 98 v := q.Get("checksum")
99 99
@@ -115,14 +115,14 @@ func (c *Client) extractChecksum(u *url.URL) (*fileChecksum, error) {
115 115
116 switch checksumType { 116 switch checksumType {
117 case "file": 117 case "file":
118 return c.checksumFromFile(checksumValue, u) 118 return c.ChecksumFromFile(checksumValue, u)
119 default: 119 default:
120 return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath())) 120 return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath()))
121 } 121 }
122} 122}
123 123
124func newChecksum(checksumValue, filename string) (*fileChecksum, error) { 124func newChecksum(checksumValue, filename string) (*FileChecksum, error) {
125 c := &fileChecksum{ 125 c := &FileChecksum{
126 Filename: filename, 126 Filename: filename,
127 } 127 }
128 var err error 128 var err error
@@ -133,7 +133,7 @@ func newChecksum(checksumValue, filename string) (*fileChecksum, error) {
133 return c, nil 133 return c, nil
134} 134}
135 135
136func newChecksumFromType(checksumType, checksumValue, filename string) (*fileChecksum, error) { 136func newChecksumFromType(checksumType, checksumValue, filename string) (*FileChecksum, error) {
137 c, err := newChecksum(checksumValue, filename) 137 c, err := newChecksum(checksumValue, filename)
138 if err != nil { 138 if err != nil {
139 return nil, err 139 return nil, err
@@ -157,7 +157,7 @@ func newChecksumFromType(checksumType, checksumValue, filename string) (*fileChe
157 return c, nil 157 return c, nil
158} 158}
159 159
160func newChecksumFromValue(checksumValue, filename string) (*fileChecksum, error) { 160func newChecksumFromValue(checksumValue, filename string) (*FileChecksum, error) {
161 c, err := newChecksum(checksumValue, filename) 161 c, err := newChecksum(checksumValue, filename)
162 if err != nil { 162 if err != nil {
163 return nil, err 163 return nil, err
@@ -183,14 +183,14 @@ func newChecksumFromValue(checksumValue, filename string) (*fileChecksum, error)
183 return c, nil 183 return c, nil
184} 184}
185 185
186// checksumsFromFile will return all the fileChecksums found in file 186// ChecksumFromFile will return all the FileChecksums found in file
187// 187//
188// checksumsFromFile will try to guess the hashing algorithm based on content 188// ChecksumFromFile will try to guess the hashing algorithm based on content
189// of checksum file 189// of checksum file
190// 190//
191// checksumsFromFile will only return checksums for files that match file 191// ChecksumFromFile will only return checksums for files that match file
192// behind src 192// behind src
193func (c *Client) checksumFromFile(checksumFile string, src *url.URL) (*fileChecksum, error) { 193func (c *Client) ChecksumFromFile(checksumFile string, src *url.URL) (*FileChecksum, error) {
194 checksumFileURL, err := urlhelper.Parse(checksumFile) 194 checksumFileURL, err := urlhelper.Parse(checksumFile)
195 if err != nil { 195 if err != nil {
196 return nil, err 196 return nil, err
@@ -286,7 +286,7 @@ func (c *Client) checksumFromFile(checksumFile string, src *url.URL) (*fileCheck
286// of a line. 286// of a line.
287// for BSD type sums parseChecksumLine guesses the hashing algorithm 287// for BSD type sums parseChecksumLine guesses the hashing algorithm
288// by checking the length of the checksum. 288// by checking the length of the checksum.
289func parseChecksumLine(line string) (*fileChecksum, error) { 289func parseChecksumLine(line string) (*FileChecksum, error) {
290 parts := strings.Fields(line) 290 parts := strings.Fields(line)
291 291
292 switch len(parts) { 292 switch len(parts) {
diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go
index a183a17..19047eb 100644
--- a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go
+++ b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go
@@ -35,7 +35,7 @@ func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) {
35 var info struct { 35 var info struct {
36 SCM string `json:"scm"` 36 SCM string `json:"scm"`
37 } 37 }
38 infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path 38 infoUrl := "https://api.bitbucket.org/2.0/repositories" + u.Path
39 resp, err := http.Get(infoUrl) 39 resp, err := http.Get(infoUrl)
40 if err != nil { 40 if err != nil {
41 return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) 41 return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go
index 679e10a..bc56559 100644
--- a/vendor/github.com/hashicorp/go-plugin/client.go
+++ b/vendor/github.com/hashicorp/go-plugin/client.go
@@ -87,6 +87,10 @@ type Client struct {
87 // goroutines. 87 // goroutines.
88 clientWaitGroup sync.WaitGroup 88 clientWaitGroup sync.WaitGroup
89 89
90 // stderrWaitGroup is used to prevent the command's Wait() function from
91 // being called before we've finished reading from the stderr pipe.
92 stderrWaitGroup sync.WaitGroup
93
90 // processKilled is used for testing only, to flag when the process was 94 // processKilled is used for testing only, to flag when the process was
91 // forcefully killed. 95 // forcefully killed.
92 processKilled bool 96 processKilled bool
@@ -590,6 +594,12 @@ func (c *Client) Start() (addr net.Addr, err error) {
590 // Create a context for when we kill 594 // Create a context for when we kill
591 c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) 595 c.doneCtx, c.ctxCancel = context.WithCancel(context.Background())
592 596
597 // Start goroutine that logs the stderr
598 c.clientWaitGroup.Add(1)
599 c.stderrWaitGroup.Add(1)
600 // logStderr calls Done()
601 go c.logStderr(cmdStderr)
602
593 c.clientWaitGroup.Add(1) 603 c.clientWaitGroup.Add(1)
594 go func() { 604 go func() {
595 // ensure the context is cancelled when we're done 605 // ensure the context is cancelled when we're done
@@ -602,6 +612,10 @@ func (c *Client) Start() (addr net.Addr, err error) {
602 pid := c.process.Pid 612 pid := c.process.Pid
603 path := cmd.Path 613 path := cmd.Path
604 614
615 // wait to finish reading from stderr since the stderr pipe reader
616 // will be closed by the subsequent call to cmd.Wait().
617 c.stderrWaitGroup.Wait()
618
605 // Wait for the command to end. 619 // Wait for the command to end.
606 err := cmd.Wait() 620 err := cmd.Wait()
607 621
@@ -624,11 +638,6 @@ func (c *Client) Start() (addr net.Addr, err error) {
624 c.exited = true 638 c.exited = true
625 }() 639 }()
626 640
627 // Start goroutine that logs the stderr
628 c.clientWaitGroup.Add(1)
629 // logStderr calls Done()
630 go c.logStderr(cmdStderr)
631
632 // Start a goroutine that is going to be reading the lines 641 // Start a goroutine that is going to be reading the lines
633 // out of stdout 642 // out of stdout
634 linesCh := make(chan string) 643 linesCh := make(chan string)
@@ -936,6 +945,7 @@ var stdErrBufferSize = 64 * 1024
936 945
937func (c *Client) logStderr(r io.Reader) { 946func (c *Client) logStderr(r io.Reader) {
938 defer c.clientWaitGroup.Done() 947 defer c.clientWaitGroup.Done()
948 defer c.stderrWaitGroup.Done()
939 l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) 949 l := c.logger.Named(filepath.Base(c.config.Cmd.Path))
940 950
941 reader := bufio.NewReaderSize(r, stdErrBufferSize) 951 reader := bufio.NewReaderSize(r, stdErrBufferSize)
diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go
index fc9f05a..4c230e3 100644
--- a/vendor/github.com/hashicorp/go-plugin/server.go
+++ b/vendor/github.com/hashicorp/go-plugin/server.go
@@ -363,14 +363,34 @@ func serverListener() (net.Listener, error) {
363} 363}
364 364
365func serverListener_tcp() (net.Listener, error) { 365func serverListener_tcp() (net.Listener, error) {
366 minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32) 366 envMinPort := os.Getenv("PLUGIN_MIN_PORT")
367 if err != nil { 367 envMaxPort := os.Getenv("PLUGIN_MAX_PORT")
368 return nil, err 368
369 var minPort, maxPort int64
370 var err error
371
372 switch {
373 case len(envMinPort) == 0:
374 minPort = 0
375 default:
376 minPort, err = strconv.ParseInt(envMinPort, 10, 32)
377 if err != nil {
378 return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err)
379 }
369 } 380 }
370 381
371 maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32) 382 switch {
372 if err != nil { 383 case len(envMaxPort) == 0:
373 return nil, err 384 maxPort = 0
385 default:
386 maxPort, err = strconv.ParseInt(envMaxPort, 10, 32)
387 if err != nil {
388 return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err)
389 }
390 }
391
392 if minPort > maxPort {
393 return nil, fmt.Errorf("ENV_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort)
374 } 394 }
375 395
376 for port := minPort; port <= maxPort; port++ { 396 for port := minPort; port <= maxPort; port++ {
diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md b/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md
index 2b24fdb..f59ce92 100644
--- a/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md
+++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md
@@ -95,7 +95,7 @@ schema model provides a description of only one level of nested blocks at
95a time, and thus a new schema must be provided for each additional level of 95a time, and thus a new schema must be provided for each additional level of
96nesting. 96nesting.
97 97
98To make this arduous process as convenient as possbile, this package provides 98To make this arduous process as convenient as possible, this package provides
99a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode` 99a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode`
100instance that can be used to find variables directly in a given body and also 100instance that can be used to find variables directly in a given body and also
101determine which nested blocks require recursive calls. Using this mechanism 101determine which nested blocks require recursive calls. Using this mechanism
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go
index 26819a2..d3f7a74 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go
@@ -473,8 +473,35 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic
473 falseResult, falseDiags := e.FalseResult.Value(ctx) 473 falseResult, falseDiags := e.FalseResult.Value(ctx)
474 var diags hcl.Diagnostics 474 var diags hcl.Diagnostics
475 475
476 // Try to find a type that both results can be converted to. 476 resultType := cty.DynamicPseudoType
477 resultType, convs := convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()}) 477 convs := make([]convert.Conversion, 2)
478
479 switch {
480 // If either case is a dynamic null value (which would result from a
481 // literal null in the config), we know that it can convert to the expected
482 // type of the opposite case, and we don't need to speculatively reduce the
483 // final result type to DynamicPseudoType.
484
485 // If we know that either Type is a DynamicPseudoType, we can be certain
486 // that the other value can convert since it's a pass-through, and we don't
487 // need to unify the types. If the final evaluation results in the dynamic
488 // value being returned, there's no conversion we can do, so we return the
489 // value directly.
490 case trueResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)):
491 resultType = falseResult.Type()
492 convs[0] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType)
493 case falseResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)):
494 resultType = trueResult.Type()
495 convs[1] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType)
496 case trueResult.Type() == cty.DynamicPseudoType, falseResult.Type() == cty.DynamicPseudoType:
497 // the final resultType type is still unknown
498 // we don't need to get the conversion, because both are a noop.
499
500 default:
501 // Try to find a type that both results can be converted to.
502 resultType, convs = convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()})
503 }
504
478 if resultType == cty.NilType { 505 if resultType == cty.NilType {
479 return cty.DynamicVal, hcl.Diagnostics{ 506 return cty.DynamicVal, hcl.Diagnostics{
480 { 507 {
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go
index fa79e3d..ca3dae1 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go
@@ -89,6 +89,26 @@ func (e *TemplateExpr) StartRange() hcl.Range {
89 return e.Parts[0].StartRange() 89 return e.Parts[0].StartRange()
90} 90}
91 91
92// IsStringLiteral returns true if and only if the template consists only of
93// single string literal, as would be created for a simple quoted string like
94// "foo".
95//
96// If this function returns true, then calling Value on the same expression
97// with a nil EvalContext will return the literal value.
98//
99// Note that "${"foo"}", "${1}", etc aren't considered literal values for the
100// purposes of this method, because the intent of this method is to identify
101// situations where the user seems to be explicitly intending literal string
102// interpretation, not situations that result in literals as a technicality
103// of the template expression unwrapping behavior.
104func (e *TemplateExpr) IsStringLiteral() bool {
105 if len(e.Parts) != 1 {
106 return false
107 }
108 _, ok := e.Parts[0].(*LiteralValueExpr)
109 return ok
110}
111
92// TemplateJoinExpr is used to convert tuples of strings produced by template 112// TemplateJoinExpr is used to convert tuples of strings produced by template
93// constructs (i.e. for loops) into flat strings, by converting the values 113// constructs (i.e. for loops) into flat strings, by converting the values
94// tos strings and joining them. This AST node is not used directly; it's 114// tos strings and joining them. This AST node is not used directly; it's
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go
index 253ad50..772ebae 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go
@@ -853,6 +853,14 @@ Traversal:
853 SrcRange: rng, 853 SrcRange: rng,
854 } 854 }
855 ret = makeRelativeTraversal(ret, step, rng) 855 ret = makeRelativeTraversal(ret, step, rng)
856 } else if tmpl, isTmpl := keyExpr.(*TemplateExpr); isTmpl && tmpl.IsStringLiteral() {
857 litKey, _ := tmpl.Value(nil)
858 rng := hcl.RangeBetween(open.Range, close.Range)
859 step := hcl.TraverseIndex{
860 Key: litKey,
861 SrcRange: rng,
862 }
863 ret = makeRelativeTraversal(ret, step, rng)
856 } else { 864 } else {
857 rng := hcl.RangeBetween(open.Range, close.Range) 865 rng := hcl.RangeBetween(open.Range, close.Range)
858 ret = &IndexExpr{ 866 ret = &IndexExpr{
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md
index 091c1c2..d7faeed 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md
@@ -187,7 +187,7 @@ for later evaluation by the calling application.
187### Blocks 187### Blocks
188 188
189A _block_ creates a child body that is annotated with a block _type_ and 189A _block_ creates a child body that is annotated with a block _type_ and
190zero or more block _labels_. Blocks create a structural hierachy which can be 190zero or more block _labels_. Blocks create a structural hierarchy which can be
191interpreted by the calling application. 191interpreted by the calling application.
192 192
193Block labels can either be quoted literal strings or naked identifiers. 193Block labels can either be quoted literal strings or naked identifiers.
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go
index bdc0e98..74847c7 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go
@@ -416,12 +416,14 @@ func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
416 case *booleanVal: 416 case *booleanVal:
417 return cty.BoolVal(v.Value), nil 417 return cty.BoolVal(v.Value), nil
418 case *arrayVal: 418 case *arrayVal:
419 var diags hcl.Diagnostics
419 vals := []cty.Value{} 420 vals := []cty.Value{}
420 for _, jsonVal := range v.Values { 421 for _, jsonVal := range v.Values {
421 val, _ := (&expression{src: jsonVal}).Value(ctx) 422 val, valDiags := (&expression{src: jsonVal}).Value(ctx)
422 vals = append(vals, val) 423 vals = append(vals, val)
424 diags = append(diags, valDiags...)
423 } 425 }
424 return cty.TupleVal(vals), nil 426 return cty.TupleVal(vals), diags
425 case *objectVal: 427 case *objectVal:
426 var diags hcl.Diagnostics 428 var diags hcl.Diagnostics
427 attrs := map[string]cty.Value{} 429 attrs := map[string]cty.Value{}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/spec.md
index 8bbaff8..97ef613 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/spec.md
+++ b/vendor/github.com/hashicorp/hcl2/hcl/spec.md
@@ -66,7 +66,7 @@ _block header schemata_:
66Within a schema, it is an error to request the same attribute name twice or 66Within a schema, it is an error to request the same attribute name twice or
67to request a block type whose name is also an attribute name. While this can 67to request a block type whose name is also an attribute name. While this can
68in principle be supported in some syntaxes, in other syntaxes the attribute 68in principle be supported in some syntaxes, in other syntaxes the attribute
69and block namespaces are combined and so an an attribute cannot coexist with 69and block namespaces are combined and so an attribute cannot coexist with
70a block whose type name is identical to the attribute name. 70a block whose type name is identical to the attribute name.
71 71
72The result of applying a body schema to a body is _body content_, which 72The result of applying a body schema to a body is _body content_, which
@@ -497,7 +497,7 @@ producing an unknown value of the target type.
497 497
498Conversion of any value _to_ the dynamic pseudo-type is a no-op. The result 498Conversion of any value _to_ the dynamic pseudo-type is a no-op. The result
499is the input value, verbatim. This is the only situation where the conversion 499is the input value, verbatim. This is the only situation where the conversion
500result value is not of the the given target type. 500result value is not of the given target type.
501 501
502### Primitive Type Conversions 502### Primitive Type Conversions
503 503
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/structure.go
index b336f30..aab0945 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/structure.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/structure.go
@@ -33,9 +33,9 @@ type Blocks []*Block
33type Attributes map[string]*Attribute 33type Attributes map[string]*Attribute
34 34
35// Body is a container for attributes and blocks. It serves as the primary 35// Body is a container for attributes and blocks. It serves as the primary
36// unit of heirarchical structure within configuration. 36// unit of hierarchical structure within configuration.
37// 37//
38// The content of a body cannot be meaningfully intepreted without a schema, 38// The content of a body cannot be meaningfully interpreted without a schema,
39// so Body represents the raw body content and has methods that allow the 39// so Body represents the raw body content and has methods that allow the
40// content to be extracted in terms of a given schema. 40// content to be extracted in terms of a given schema.
41type Body interface { 41type Body interface {
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go
index d4a565a..f69d5fe 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go
@@ -36,7 +36,7 @@ func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
36 &Diagnostic{ 36 &Diagnostic{
37 Severity: DiagError, 37 Severity: DiagError,
38 Summary: "Invalid expression", 38 Summary: "Invalid expression",
39 Detail: "A static variable reference is required.", 39 Detail: "A single static variable reference is required: only attribute access and indexing with constant keys. No calculations, function calls, template expressions, etc are allowed here.",
40 Subject: expr.Range().Ptr(), 40 Subject: expr.Range().Ptr(),
41 }, 41 },
42 } 42 }
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/format.go b/vendor/github.com/hashicorp/hcl2/hclwrite/format.go
index f20ae23..ded7fb4 100644
--- a/vendor/github.com/hashicorp/hcl2/hclwrite/format.go
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/format.go
@@ -54,22 +54,12 @@ func formatIndent(lines []formatLine) {
54 // which should be more than enough for reasonable HCL uses. 54 // which should be more than enough for reasonable HCL uses.
55 indents := make([]int, 0, 10) 55 indents := make([]int, 0, 10)
56 56
57 inHeredoc := false
58 for i := range lines { 57 for i := range lines {
59 line := &lines[i] 58 line := &lines[i]
60 if len(line.lead) == 0 { 59 if len(line.lead) == 0 {
61 continue 60 continue
62 } 61 }
63 62
64 if inHeredoc {
65 for _, token := range line.lead {
66 if token.Type == hclsyntax.TokenCHeredoc {
67 inHeredoc = false
68 }
69 }
70 continue // don't touch indentation inside heredocs
71 }
72
73 if line.lead[0].Type == hclsyntax.TokenNewline { 63 if line.lead[0].Type == hclsyntax.TokenNewline {
74 // Never place spaces before a newline 64 // Never place spaces before a newline
75 line.lead[0].SpacesBefore = 0 65 line.lead[0].SpacesBefore = 0
@@ -80,9 +70,10 @@ func formatIndent(lines []formatLine) {
80 for _, token := range line.lead { 70 for _, token := range line.lead {
81 netBrackets += tokenBracketChange(token) 71 netBrackets += tokenBracketChange(token)
82 if token.Type == hclsyntax.TokenOHeredoc { 72 if token.Type == hclsyntax.TokenOHeredoc {
83 inHeredoc = true 73 break
84 } 74 }
85 } 75 }
76
86 for _, token := range line.assign { 77 for _, token := range line.assign {
87 netBrackets += tokenBracketChange(token) 78 netBrackets += tokenBracketChange(token)
88 } 79 }
@@ -391,9 +382,9 @@ func linesForFormat(tokens Tokens) []formatLine {
391 382
392 // Now we'll pick off any trailing comments and attribute assignments 383 // Now we'll pick off any trailing comments and attribute assignments
393 // to shuffle off into the "comment" and "assign" cells. 384 // to shuffle off into the "comment" and "assign" cells.
394 inHeredoc := false
395 for i := range lines { 385 for i := range lines {
396 line := &lines[i] 386 line := &lines[i]
387
397 if len(line.lead) == 0 { 388 if len(line.lead) == 0 {
398 // if the line is empty then there's nothing for us to do 389 // if the line is empty then there's nothing for us to do
399 // (this should happen only for the final line, because all other 390 // (this should happen only for the final line, because all other
@@ -401,26 +392,6 @@ func linesForFormat(tokens Tokens) []formatLine {
401 continue 392 continue
402 } 393 }
403 394
404 if inHeredoc {
405 for _, tok := range line.lead {
406 if tok.Type == hclsyntax.TokenCHeredoc {
407 inHeredoc = false
408 break
409 }
410 }
411 // Inside a heredoc everything is "lead", even if there's a
412 // template interpolation embedded in there that might otherwise
413 // confuse our logic below.
414 continue
415 }
416
417 for _, tok := range line.lead {
418 if tok.Type == hclsyntax.TokenOHeredoc {
419 inHeredoc = true
420 break
421 }
422 }
423
424 if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment { 395 if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment {
425 line.comment = line.lead[len(line.lead)-1:] 396 line.comment = line.lead[len(line.lead)-1:]
426 line.lead = line.lead[:len(line.lead)-1] 397 line.lead = line.lead[:len(line.lead)-1]
diff --git a/vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go b/vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go
new file mode 100644
index 0000000..7a63850
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go
@@ -0,0 +1,12 @@
1package addrs
2
3// ForEachAttr is the address of an attribute referencing the current "for_each" object in
4// the interpolation scope, addressed using the "each" keyword, ex. "each.key" and "each.value"
5type ForEachAttr struct {
6 referenceable
7 Name string
8}
9
10func (f ForEachAttr) String() string {
11 return "each." + f.Name
12}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go
index 84fe8a0..a230d0c 100644
--- a/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go
+++ b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go
@@ -85,6 +85,14 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
85 Remaining: remain, 85 Remaining: remain,
86 }, diags 86 }, diags
87 87
88 case "each":
89 name, rng, remain, diags := parseSingleAttrRef(traversal)
90 return &Reference{
91 Subject: ForEachAttr{Name: name},
92 SourceRange: tfdiags.SourceRangeFromHCL(rng),
93 Remaining: remain,
94 }, diags
95
88 case "data": 96 case "data":
89 if len(traversal) < 3 { 97 if len(traversal) < 3 {
90 diags = diags.Append(&hcl.Diagnostic{ 98 diags = diags.Append(&hcl.Diagnostic{
diff --git a/vendor/github.com/hashicorp/terraform/command/format/plan.go b/vendor/github.com/hashicorp/terraform/command/format/plan.go
index 098653f..ef129a9 100644
--- a/vendor/github.com/hashicorp/terraform/command/format/plan.go
+++ b/vendor/github.com/hashicorp/terraform/command/format/plan.go
@@ -83,6 +83,10 @@ func NewPlan(changes *plans.Changes) *Plan {
83 continue 83 continue
84 } 84 }
85 85
86 if rc.Action == plans.NoOp {
87 continue
88 }
89
86 // For now we'll shim this to work with our old types. 90 // For now we'll shim this to work with our old types.
87 // TODO: Update for the new plan types, ideally also switching over to 91 // TODO: Update for the new plan types, ideally also switching over to
88 // a structural diff renderer instead of a flat renderer. 92 // a structural diff renderer instead of a flat renderer.
diff --git a/vendor/github.com/hashicorp/terraform/command/format/state.go b/vendor/github.com/hashicorp/terraform/command/format/state.go
index f411ef9..be1ea24 100644
--- a/vendor/github.com/hashicorp/terraform/command/format/state.go
+++ b/vendor/github.com/hashicorp/terraform/command/format/state.go
@@ -75,11 +75,14 @@ func State(opts *StateOpts) string {
75 v := m.OutputValues[k] 75 v := m.OutputValues[k]
76 p.buf.WriteString(fmt.Sprintf("%s = ", k)) 76 p.buf.WriteString(fmt.Sprintf("%s = ", k))
77 p.writeValue(v.Value, plans.NoOp, 0) 77 p.writeValue(v.Value, plans.NoOp, 0)
78 p.buf.WriteString("\n\n") 78 p.buf.WriteString("\n")
79 } 79 }
80 } 80 }
81 81
82 return opts.Color.Color(strings.TrimSpace(p.buf.String())) 82 trimmedOutput := strings.TrimSpace(p.buf.String())
83 trimmedOutput += "[reset]"
84
85 return opts.Color.Color(trimmedOutput)
83 86
84} 87}
85 88
@@ -95,81 +98,114 @@ func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraf
95 // Go through each resource and begin building up the output. 98 // Go through each resource and begin building up the output.
96 for _, key := range names { 99 for _, key := range names {
97 for k, v := range m.Resources[key].Instances { 100 for k, v := range m.Resources[key].Instances {
101 // keep these in order to keep the current object first, and
102 // provide deterministic output for the deposed objects
103 type obj struct {
104 header string
105 instance *states.ResourceInstanceObjectSrc
106 }
107 instances := []obj{}
108
98 addr := m.Resources[key].Addr 109 addr := m.Resources[key].Addr
99 110
100 taintStr := "" 111 taintStr := ""
101 if v.Current.Status == 'T' { 112 if v.Current != nil && v.Current.Status == 'T' {
102 taintStr = "(tainted)" 113 taintStr = " (tainted)"
103 } 114 }
104 p.buf.WriteString(fmt.Sprintf("# %s: %s\n", addr.Absolute(m.Addr).Instance(k), taintStr)) 115
105 116 instances = append(instances,
106 var schema *configschema.Block 117 obj{fmt.Sprintf("# %s:%s\n", addr.Absolute(m.Addr).Instance(k), taintStr), v.Current})
107 provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact() 118
108 if _, exists := schemas.Providers[provider]; !exists { 119 for dk, v := range v.Deposed {
109 // This should never happen in normal use because we should've 120 instances = append(instances,
110 // loaded all of the schemas and checked things prior to this 121 obj{fmt.Sprintf("# %s: (deposed object %s)\n", addr.Absolute(m.Addr).Instance(k), dk), v})
111 // point. We can't return errors here, but since this is UI code
112 // we will try to do _something_ reasonable.
113 p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider))
114 continue
115 } 122 }
116 123
117 switch addr.Mode { 124 // Sort the instances for consistent output.
118 case addrs.ManagedResourceMode: 125 // Starting the sort from the second index, so the current instance
119 schema, _ = schemas.ResourceTypeConfig( 126 // is always first.
120 provider, 127 sort.Slice(instances[1:], func(i, j int) bool {
121 addr.Mode, 128 return instances[i+1].header < instances[j+1].header
122 addr.Type, 129 })
123 ) 130
124 if schema == nil { 131 for _, obj := range instances {
125 p.buf.WriteString(fmt.Sprintf( 132 header := obj.header
126 "# missing schema for provider %q resource type %s\n\n", provider, addr.Type)) 133 instance := obj.instance
134 p.buf.WriteString(header)
135 if instance == nil {
136 // this shouldn't happen, but there's nothing to do here so
137 // don't panic below.
127 continue 138 continue
128 } 139 }
129 140
130 p.buf.WriteString(fmt.Sprintf( 141 var schema *configschema.Block
131 "resource %q %q {", 142 provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact()
132 addr.Type, 143 if _, exists := schemas.Providers[provider]; !exists {
133 addr.Name, 144 // This should never happen in normal use because we should've
134 )) 145 // loaded all of the schemas and checked things prior to this
135 case addrs.DataResourceMode: 146 // point. We can't return errors here, but since this is UI code
136 schema, _ = schemas.ResourceTypeConfig( 147 // we will try to do _something_ reasonable.
137 provider, 148 p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider))
138 addr.Mode,
139 addr.Type,
140 )
141 if schema == nil {
142 p.buf.WriteString(fmt.Sprintf(
143 "# missing schema for provider %q data source %s\n\n", provider, addr.Type))
144 continue 149 continue
145 } 150 }
146 151
147 p.buf.WriteString(fmt.Sprintf( 152 switch addr.Mode {
148 "data %q %q {", 153 case addrs.ManagedResourceMode:
149 addr.Type, 154 schema, _ = schemas.ResourceTypeConfig(
150 addr.Name, 155 provider,
151 )) 156 addr.Mode,
152 default: 157 addr.Type,
153 // should never happen, since the above is exhaustive 158 )
154 p.buf.WriteString(addr.String()) 159 if schema == nil {
155 } 160 p.buf.WriteString(fmt.Sprintf(
161 "# missing schema for provider %q resource type %s\n\n", provider, addr.Type))
162 continue
163 }
156 164
157 val, err := v.Current.Decode(schema.ImpliedType()) 165 p.buf.WriteString(fmt.Sprintf(
158 if err != nil { 166 "resource %q %q {",
159 fmt.Println(err.Error()) 167 addr.Type,
160 break 168 addr.Name,
161 } 169 ))
170 case addrs.DataResourceMode:
171 schema, _ = schemas.ResourceTypeConfig(
172 provider,
173 addr.Mode,
174 addr.Type,
175 )
176 if schema == nil {
177 p.buf.WriteString(fmt.Sprintf(
178 "# missing schema for provider %q data source %s\n\n", provider, addr.Type))
179 continue
180 }
162 181
163 path := make(cty.Path, 0, 3) 182 p.buf.WriteString(fmt.Sprintf(
164 bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path) 183 "data %q %q {",
165 if bodyWritten { 184 addr.Type,
166 p.buf.WriteString("\n") 185 addr.Name,
167 } 186 ))
187 default:
188 // should never happen, since the above is exhaustive
189 p.buf.WriteString(addr.String())
190 }
168 191
169 p.buf.WriteString("}\n\n") 192 val, err := instance.Decode(schema.ImpliedType())
193 if err != nil {
194 fmt.Println(err.Error())
195 break
196 }
197
198 path := make(cty.Path, 0, 3)
199 bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path)
200 if bodyWritten {
201 p.buf.WriteString("\n")
202 }
203
204 p.buf.WriteString("}\n\n")
205 }
170 } 206 }
171 } 207 }
172 p.buf.WriteString("[reset]\n") 208 p.buf.WriteString("\n")
173} 209}
174 210
175func formatNestedList(indent string, outputList []interface{}) string { 211func formatNestedList(indent string, outputList []interface{}) string {
@@ -231,7 +267,7 @@ func formatListOutput(indent, outputName string, outputList []interface{}) strin
231 267
232func formatNestedMap(indent string, outputMap map[string]interface{}) string { 268func formatNestedMap(indent string, outputMap map[string]interface{}) string {
233 ks := make([]string, 0, len(outputMap)) 269 ks := make([]string, 0, len(outputMap))
234 for k, _ := range outputMap { 270 for k := range outputMap {
235 ks = append(ks, k) 271 ks = append(ks, k)
236 } 272 }
237 sort.Strings(ks) 273 sort.Strings(ks)
@@ -256,7 +292,7 @@ func formatNestedMap(indent string, outputMap map[string]interface{}) string {
256 292
257func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string { 293func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string {
258 ks := make([]string, 0, len(outputMap)) 294 ks := make([]string, 0, len(outputMap))
259 for k, _ := range outputMap { 295 for k := range outputMap {
260 ks = append(ks, k) 296 ks = append(ks, k)
261 } 297 }
262 sort.Strings(ks) 298 sort.Strings(ks)
diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go
index 1772fd7..f13a046 100644
--- a/vendor/github.com/hashicorp/terraform/config/config.go
+++ b/vendor/github.com/hashicorp/terraform/config/config.go
@@ -252,35 +252,6 @@ func (r *Resource) Id() string {
252 } 252 }
253} 253}
254 254
255// ProviderFullName returns the full name of the provider for this resource,
256// which may either be specified explicitly using the "provider" meta-argument
257// or implied by the prefix on the resource type name.
258func (r *Resource) ProviderFullName() string {
259 return ResourceProviderFullName(r.Type, r.Provider)
260}
261
262// ResourceProviderFullName returns the full (dependable) name of the
263// provider for a hypothetical resource with the given resource type and
264// explicit provider string. If the explicit provider string is empty then
265// the provider name is inferred from the resource type name.
266func ResourceProviderFullName(resourceType, explicitProvider string) string {
267 if explicitProvider != "" {
268 // check for an explicit provider name, or return the original
269 parts := strings.SplitAfter(explicitProvider, "provider.")
270 return parts[len(parts)-1]
271 }
272
273 idx := strings.IndexRune(resourceType, '_')
274 if idx == -1 {
275 // If no underscores, the resource name is assumed to be
276 // also the provider name, e.g. if the provider exposes
277 // only a single resource of each type.
278 return resourceType
279 }
280
281 return resourceType[:idx]
282}
283
284// Validate does some basic semantic checking of the configuration. 255// Validate does some basic semantic checking of the configuration.
285func (c *Config) Validate() tfdiags.Diagnostics { 256func (c *Config) Validate() tfdiags.Diagnostics {
286 if c == nil { 257 if c == nil {
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
index 66a677d..ce33ab1 100644
--- a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
@@ -7,6 +7,7 @@ import (
7 7
8 "github.com/hashicorp/hil" 8 "github.com/hashicorp/hil"
9 "github.com/hashicorp/hil/ast" 9 "github.com/hashicorp/hil/ast"
10 "github.com/hashicorp/terraform/config/hcl2shim"
10 "github.com/mitchellh/reflectwalk" 11 "github.com/mitchellh/reflectwalk"
11) 12)
12 13
@@ -160,7 +161,7 @@ func (w *interpolationWalker) Primitive(v reflect.Value) error {
160 if w.loc == reflectwalk.SliceElem { 161 if w.loc == reflectwalk.SliceElem {
161 switch typedReplaceVal := replaceVal.(type) { 162 switch typedReplaceVal := replaceVal.(type) {
162 case string: 163 case string:
163 if typedReplaceVal == UnknownVariableValue { 164 if typedReplaceVal == hcl2shim.UnknownVariableValue {
164 remove = true 165 remove = true
165 } 166 }
166 case []interface{}: 167 case []interface{}:
@@ -168,7 +169,7 @@ func (w *interpolationWalker) Primitive(v reflect.Value) error {
168 remove = true 169 remove = true
169 } 170 }
170 } 171 }
171 } else if replaceVal == UnknownVariableValue { 172 } else if replaceVal == hcl2shim.UnknownVariableValue {
172 remove = true 173 remove = true
173 } 174 }
174 175
@@ -224,7 +225,7 @@ func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
224func hasUnknownValue(variable []interface{}) bool { 225func hasUnknownValue(variable []interface{}) bool {
225 for _, value := range variable { 226 for _, value := range variable {
226 if strVal, ok := value.(string); ok { 227 if strVal, ok := value.(string); ok {
227 if strVal == UnknownVariableValue { 228 if strVal == hcl2shim.UnknownVariableValue {
228 return true 229 return true
229 } 230 }
230 } 231 }
diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go
index 6e34781..612e25b 100644
--- a/vendor/github.com/hashicorp/terraform/config/loader.go
+++ b/vendor/github.com/hashicorp/terraform/config/loader.go
@@ -135,21 +135,6 @@ func LoadDir(root string) (*Config, error) {
135 return result, nil 135 return result, nil
136} 136}
137 137
138// IsEmptyDir returns true if the directory given has no Terraform
139// configuration files.
140func IsEmptyDir(root string) (bool, error) {
141 if _, err := os.Stat(root); err != nil && os.IsNotExist(err) {
142 return true, nil
143 }
144
145 fs, os, err := dirFiles(root)
146 if err != nil {
147 return false, err
148 }
149
150 return len(fs) == 0 && len(os) == 0, nil
151}
152
153// Ext returns the Terraform configuration extension of the given 138// Ext returns the Terraform configuration extension of the given
154// path, or a blank string if it is an invalid function. 139// path, or a blank string if it is an invalid function.
155func ext(path string) string { 140func ext(path string) string {
diff --git a/vendor/github.com/hashicorp/terraform/config/module/versions.go b/vendor/github.com/hashicorp/terraform/config/module/versions.go
index 8348d4b..29701b9 100644
--- a/vendor/github.com/hashicorp/terraform/config/module/versions.go
+++ b/vendor/github.com/hashicorp/terraform/config/module/versions.go
@@ -3,7 +3,9 @@ package module
3import ( 3import (
4 "errors" 4 "errors"
5 "fmt" 5 "fmt"
6 "regexp"
6 "sort" 7 "sort"
8 "strings"
7 9
8 version "github.com/hashicorp/go-version" 10 version "github.com/hashicorp/go-version"
9 "github.com/hashicorp/terraform/registry/response" 11 "github.com/hashicorp/terraform/registry/response"
@@ -11,6 +13,8 @@ import (
11 13
12const anyVersion = ">=0.0.0" 14const anyVersion = ">=0.0.0"
13 15
16var explicitEqualityConstraint = regexp.MustCompile("^=[0-9]")
17
14// return the newest version that satisfies the provided constraint 18// return the newest version that satisfies the provided constraint
15func newest(versions []string, constraint string) (string, error) { 19func newest(versions []string, constraint string) (string, error) {
16 if constraint == "" { 20 if constraint == "" {
@@ -21,6 +25,30 @@ func newest(versions []string, constraint string) (string, error) {
21 return "", err 25 return "", err
22 } 26 }
23 27
28 // Find any build metadata in the constraints, and
29 // store whether the constraint is an explicit equality that
30 // contains a build metadata requirement, so we can return a specific,
31 // if requested, build metadata version
32 var constraintMetas []string
33 var equalsConstraint bool
34 for i := range cs {
35 constraintMeta := strings.SplitAfterN(cs[i].String(), "+", 2)
36 if len(constraintMeta) > 1 {
37 constraintMetas = append(constraintMetas, constraintMeta[1])
38 }
39 }
40
41 if len(cs) == 1 {
42 equalsConstraint = explicitEqualityConstraint.MatchString(cs.String())
43 }
44
45 // If the version string includes metadata, this is valid in go-version,
46 // However, it's confusing as to what expected behavior should be,
47 // so give an error so the user can do something more logical
48 if (len(cs) > 1 || !equalsConstraint) && len(constraintMetas) > 0 {
49 return "", fmt.Errorf("Constraints including build metadata must have explicit equality, or are otherwise too ambiguous: %s", cs.String())
50 }
51
24 switch len(versions) { 52 switch len(versions) {
25 case 0: 53 case 0:
26 return "", errors.New("no versions found") 54 return "", errors.New("no versions found")
@@ -58,6 +86,12 @@ func newest(versions []string, constraint string) (string, error) {
58 continue 86 continue
59 } 87 }
60 if cs.Check(v) { 88 if cs.Check(v) {
89 // Constraint has metadata and is explicit equality
90 if equalsConstraint && len(constraintMetas) > 0 {
91 if constraintMetas[0] != v.Metadata() {
92 continue
93 }
94 }
61 return versions[i], nil 95 return versions[i], nil
62 } 96 }
63 } 97 }
diff --git a/vendor/github.com/hashicorp/terraform/config/providers.go b/vendor/github.com/hashicorp/terraform/config/providers.go
index 7a50782..eeddabc 100644
--- a/vendor/github.com/hashicorp/terraform/config/providers.go
+++ b/vendor/github.com/hashicorp/terraform/config/providers.go
@@ -13,48 +13,6 @@ type ProviderVersionConstraint struct {
13// ProviderVersionConstraint, as produced by Config.RequiredProviders. 13// ProviderVersionConstraint, as produced by Config.RequiredProviders.
14type ProviderVersionConstraints map[string]ProviderVersionConstraint 14type ProviderVersionConstraints map[string]ProviderVersionConstraint
15 15
16// RequiredProviders returns the ProviderVersionConstraints for this
17// module.
18//
19// This includes both providers that are explicitly requested by provider
20// blocks and those that are used implicitly by instantiating one of their
21// resource types. In the latter case, the returned semver Range will
22// accept any version of the provider.
23func (c *Config) RequiredProviders() ProviderVersionConstraints {
24 ret := make(ProviderVersionConstraints, len(c.ProviderConfigs))
25
26 configs := c.ProviderConfigsByFullName()
27
28 // In order to find the *implied* dependencies (those without explicit
29 // "provider" blocks) we need to walk over all of the resources and
30 // cross-reference with the provider configs.
31 for _, rc := range c.Resources {
32 providerName := rc.ProviderFullName()
33 var providerType string
34
35 // Default to (effectively) no constraint whatsoever, but we might
36 // override if there's an explicit constraint in config.
37 constraint := ">=0.0.0"
38
39 config, ok := configs[providerName]
40 if ok {
41 if config.Version != "" {
42 constraint = config.Version
43 }
44 providerType = config.Name
45 } else {
46 providerType = providerName
47 }
48
49 ret[providerName] = ProviderVersionConstraint{
50 ProviderType: providerType,
51 Constraint: constraint,
52 }
53 }
54
55 return ret
56}
57
58// RequiredRanges returns a semver.Range for each distinct provider type in 16// RequiredRanges returns a semver.Range for each distinct provider type in
59// the constraint map. If the same provider type appears more than once 17// the constraint map. If the same provider type appears more than once
60// (e.g. because aliases are in use) then their respective constraints are 18// (e.g. because aliases are in use) then their respective constraints are
diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go
index 1854a8b..c5ac86d 100644
--- a/vendor/github.com/hashicorp/terraform/config/raw_config.go
+++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go
@@ -17,12 +17,6 @@ import (
17 "github.com/mitchellh/reflectwalk" 17 "github.com/mitchellh/reflectwalk"
18) 18)
19 19
20// UnknownVariableValue is a sentinel value that can be used
21// to denote that the value of a variable is unknown at this time.
22// RawConfig uses this information to build up data about
23// unknown keys.
24const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
25
26// RawConfig is a structure that holds a piece of configuration 20// RawConfig is a structure that holds a piece of configuration
27// where the overall structure is unknown since it will be used 21// where the overall structure is unknown since it will be used
28// to configure a plugin or some other similar external component. 22// to configure a plugin or some other similar external component.
diff --git a/vendor/github.com/hashicorp/terraform/configs/config_build.go b/vendor/github.com/hashicorp/terraform/configs/config_build.go
index 948b2c8..1ca1d77 100644
--- a/vendor/github.com/hashicorp/terraform/configs/config_build.go
+++ b/vendor/github.com/hashicorp/terraform/configs/config_build.go
@@ -76,6 +76,7 @@ func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config,
76 } 76 }
77 77
78 child.Children, modDiags = buildChildModules(child, walker) 78 child.Children, modDiags = buildChildModules(child, walker)
79 diags = append(diags, modDiags...)
79 80
80 ret[call.Name] = child 81 ret[call.Name] = child
81 } 82 }
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/getter.go b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go
index 4a3dace..75c7ef1 100644
--- a/vendor/github.com/hashicorp/terraform/configs/configload/getter.go
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go
@@ -20,6 +20,7 @@ import (
20var goGetterDetectors = []getter.Detector{ 20var goGetterDetectors = []getter.Detector{
21 new(getter.GitHubDetector), 21 new(getter.GitHubDetector),
22 new(getter.BitBucketDetector), 22 new(getter.BitBucketDetector),
23 new(getter.GCSDetector),
23 new(getter.S3Detector), 24 new(getter.S3Detector),
24 new(getter.FileDetector), 25 new(getter.FileDetector),
25} 26}
@@ -44,6 +45,7 @@ var goGetterDecompressors = map[string]getter.Decompressor{
44 45
45var goGetterGetters = map[string]getter.Getter{ 46var goGetterGetters = map[string]getter.Getter{
46 "file": new(getter.FileGetter), 47 "file": new(getter.FileGetter),
48 "gcs": new(getter.GCSGetter),
47 "git": new(getter.GitGetter), 49 "git": new(getter.GitGetter),
48 "hg": new(getter.HgGetter), 50 "hg": new(getter.HgGetter),
49 "s3": new(getter.S3Getter), 51 "s3": new(getter.S3Getter),
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go
index 93a9420..0e6cba9 100644
--- a/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go
@@ -64,7 +64,15 @@ func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module,
64 Subject: &req.SourceAddrRange, 64 Subject: &req.SourceAddrRange,
65 }) 65 })
66 } 66 }
67 if !req.VersionConstraint.Required.Check(record.Version) { 67 if len(req.VersionConstraint.Required) > 0 && record.Version == nil {
68 diags = append(diags, &hcl.Diagnostic{
69 Severity: hcl.DiagError,
70 Summary: "Module version requirements have changed",
71 Detail: "The version requirements have changed since this module was installed and the installed version is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.",
72 Subject: &req.SourceAddrRange,
73 })
74 }
75 if record.Version != nil && !req.VersionConstraint.Required.Check(record.Version) {
68 diags = append(diags, &hcl.Diagnostic{ 76 diags = append(diags, &hcl.Diagnostic{
69 Severity: hcl.DiagError, 77 Severity: hcl.DiagError,
70 Summary: "Module version requirements have changed", 78 Summary: "Module version requirements have changed",
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go
index e59f58d..7996c38 100644
--- a/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go
@@ -113,7 +113,10 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) {
113 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list") 113 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list")
114 } 114 }
115 l := coll.LengthInt() 115 l := coll.LengthInt()
116 if l < blockS.MinItems { 116
117 // Assume that if there are unknowns this could have come from
118 // a dynamic block, and we can't validate MinItems yet.
119 if l < blockS.MinItems && coll.IsWhollyKnown() {
117 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems) 120 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems)
118 } 121 }
119 if l > blockS.MaxItems && blockS.MaxItems > 0 { 122 if l > blockS.MaxItems && blockS.MaxItems > 0 {
@@ -161,7 +164,10 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) {
161 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set") 164 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set")
162 } 165 }
163 l := coll.LengthInt() 166 l := coll.LengthInt()
164 if l < blockS.MinItems { 167
168 // Assume that if there are unknowns this could have come from
169 // a dynamic block, and we can't validate MinItems yet.
170 if l < blockS.MinItems && coll.IsWhollyKnown() {
165 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems) 171 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems)
166 } 172 }
167 if l > blockS.MaxItems && blockS.MaxItems > 0 { 173 if l > blockS.MaxItems && blockS.MaxItems > 0 {
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go
index d8f41ea..e748dd2 100644
--- a/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go
@@ -33,6 +33,14 @@ func (b *Block) DecoderSpec() hcldec.Spec {
33 33
34 childSpec := blockS.Block.DecoderSpec() 34 childSpec := blockS.Block.DecoderSpec()
35 35
36 // We can only validate 0 or 1 for MinItems, because a dynamic block
37 // may satisfy any number of min items while only having a single
38 // block in the config.
39 minItems := 0
40 if blockS.MinItems > 1 {
41 minItems = 1
42 }
43
36 switch blockS.Nesting { 44 switch blockS.Nesting {
37 case NestingSingle, NestingGroup: 45 case NestingSingle, NestingGroup:
38 ret[name] = &hcldec.BlockSpec{ 46 ret[name] = &hcldec.BlockSpec{
@@ -57,14 +65,14 @@ func (b *Block) DecoderSpec() hcldec.Spec {
57 ret[name] = &hcldec.BlockTupleSpec{ 65 ret[name] = &hcldec.BlockTupleSpec{
58 TypeName: name, 66 TypeName: name,
59 Nested: childSpec, 67 Nested: childSpec,
60 MinItems: blockS.MinItems, 68 MinItems: minItems,
61 MaxItems: blockS.MaxItems, 69 MaxItems: blockS.MaxItems,
62 } 70 }
63 } else { 71 } else {
64 ret[name] = &hcldec.BlockListSpec{ 72 ret[name] = &hcldec.BlockListSpec{
65 TypeName: name, 73 TypeName: name,
66 Nested: childSpec, 74 Nested: childSpec,
67 MinItems: blockS.MinItems, 75 MinItems: minItems,
68 MaxItems: blockS.MaxItems, 76 MaxItems: blockS.MaxItems,
69 } 77 }
70 } 78 }
@@ -77,7 +85,7 @@ func (b *Block) DecoderSpec() hcldec.Spec {
77 ret[name] = &hcldec.BlockSetSpec{ 85 ret[name] = &hcldec.BlockSetSpec{
78 TypeName: name, 86 TypeName: name,
79 Nested: childSpec, 87 Nested: childSpec,
80 MinItems: blockS.MinItems, 88 MinItems: minItems,
81 MaxItems: blockS.MaxItems, 89 MaxItems: blockS.MaxItems,
82 } 90 }
83 case NestingMap: 91 case NestingMap:
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go
index 3014cb4..752d6d9 100644
--- a/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go
+++ b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go
@@ -2,6 +2,7 @@ package configs
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "os"
5 "path/filepath" 6 "path/filepath"
6 "strings" 7 "strings"
7 8
@@ -140,3 +141,23 @@ func IsIgnoredFile(name string) bool {
140 strings.HasSuffix(name, "~") || // vim 141 strings.HasSuffix(name, "~") || // vim
141 strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs 142 strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
142} 143}
144
145// IsEmptyDir returns true if the given filesystem path contains no Terraform
146// configuration files.
147//
148// Unlike the methods of the Parser type, this function always consults the
149// real filesystem, and thus it isn't appropriate to use when working with
150// configuration loaded from a plan file.
151func IsEmptyDir(path string) (bool, error) {
152 if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
153 return true, nil
154 }
155
156 p := NewParser(nil)
157 fs, os, err := p.dirFiles(path)
158 if err != nil {
159 return false, err
160 }
161
162 return len(fs) == 0 && len(os) == 0, nil
163}
diff --git a/vendor/github.com/hashicorp/terraform/configs/resource.go b/vendor/github.com/hashicorp/terraform/configs/resource.go
index de1a343..edf822c 100644
--- a/vendor/github.com/hashicorp/terraform/configs/resource.go
+++ b/vendor/github.com/hashicorp/terraform/configs/resource.go
@@ -111,13 +111,15 @@ func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) {
111 111
112 if attr, exists := content.Attributes["for_each"]; exists { 112 if attr, exists := content.Attributes["for_each"]; exists {
113 r.ForEach = attr.Expr 113 r.ForEach = attr.Expr
114 // We currently parse this, but don't yet do anything with it. 114 // Cannot have count and for_each on the same resource block
115 diags = append(diags, &hcl.Diagnostic{ 115 if r.Count != nil {
116 Severity: hcl.DiagError, 116 diags = append(diags, &hcl.Diagnostic{
117 Summary: "Reserved argument name in resource block", 117 Severity: hcl.DiagError,
118 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), 118 Summary: `Invalid combination of "count" and "for_each"`,
119 Subject: &attr.NameRange, 119 Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`,
120 }) 120 Subject: &attr.NameRange,
121 })
122 }
121 } 123 }
122 124
123 if attr, exists := content.Attributes["provider"]; exists { 125 if attr, exists := content.Attributes["provider"]; exists {
@@ -300,13 +302,15 @@ func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) {
300 302
301 if attr, exists := content.Attributes["for_each"]; exists { 303 if attr, exists := content.Attributes["for_each"]; exists {
302 r.ForEach = attr.Expr 304 r.ForEach = attr.Expr
303 // We currently parse this, but don't yet do anything with it. 305 // Cannot have count and for_each on the same data block
304 diags = append(diags, &hcl.Diagnostic{ 306 if r.Count != nil {
305 Severity: hcl.DiagError, 307 diags = append(diags, &hcl.Diagnostic{
306 Summary: "Reserved argument name in module block", 308 Severity: hcl.DiagError,
307 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), 309 Summary: `Invalid combination of "count" and "for_each"`,
308 Subject: &attr.NameRange, 310 Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`,
309 }) 311 Subject: &attr.NameRange,
312 })
313 }
310 } 314 }
311 315
312 if attr, exists := content.Attributes["provider"]; exists { 316 if attr, exists := content.Attributes["provider"]; exists {
diff --git a/vendor/github.com/hashicorp/terraform/configs/version_constraint.go b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go
index 7aa19ef..e40ce16 100644
--- a/vendor/github.com/hashicorp/terraform/configs/version_constraint.go
+++ b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go
@@ -45,6 +45,13 @@ func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagno
45 return ret, diags 45 return ret, diags
46 } 46 }
47 47
48 if !val.IsWhollyKnown() {
49 // If there is a syntax error, HCL sets the value of the given attribute
50 // to cty.DynamicVal. A diagnostic for the syntax error will already
51 // bubble up, so we will move forward gracefully here.
52 return ret, diags
53 }
54
48 constraintStr := val.AsString() 55 constraintStr := val.AsString()
49 constraints, err := version.NewConstraint(constraintStr) 56 constraints, err := version.NewConstraint(constraintStr)
50 if err != nil { 57 if err != nil {
diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go
index 510f47f..104c8f5 100644
--- a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go
+++ b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go
@@ -2,7 +2,6 @@ package plugin
2 2
3import ( 3import (
4 "encoding/json" 4 "encoding/json"
5 "errors"
6 "fmt" 5 "fmt"
7 "log" 6 "log"
8 "strconv" 7 "strconv"
@@ -16,6 +15,7 @@ import (
16 "github.com/hashicorp/terraform/configs/configschema" 15 "github.com/hashicorp/terraform/configs/configschema"
17 "github.com/hashicorp/terraform/helper/schema" 16 "github.com/hashicorp/terraform/helper/schema"
18 proto "github.com/hashicorp/terraform/internal/tfplugin5" 17 proto "github.com/hashicorp/terraform/internal/tfplugin5"
18 "github.com/hashicorp/terraform/plans/objchange"
19 "github.com/hashicorp/terraform/plugin/convert" 19 "github.com/hashicorp/terraform/plugin/convert"
20 "github.com/hashicorp/terraform/terraform" 20 "github.com/hashicorp/terraform/terraform"
21) 21)
@@ -284,6 +284,17 @@ func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.
284 return resp, nil 284 return resp, nil
285 } 285 }
286 286
287 // Now we need to make sure blocks are represented correctly, which means
288 // that missing blocks are empty collections, rather than null.
289 // First we need to CoerceValue to ensure that all object types match.
290 val, err = schemaBlock.CoerceValue(val)
291 if err != nil {
292 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
293 return resp, nil
294 }
295 // Normalize the value and fill in any missing blocks.
296 val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock)
297
287 // encode the final state to the expected msgpack format 298 // encode the final state to the expected msgpack format
288 newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) 299 newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType())
289 if err != nil { 300 if err != nil {
@@ -316,11 +327,15 @@ func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]strin
316 requiresMigrate = version < res.StateUpgraders[0].Version 327 requiresMigrate = version < res.StateUpgraders[0].Version
317 } 328 }
318 329
319 if requiresMigrate { 330 if requiresMigrate && res.MigrateState == nil {
320 if res.MigrateState == nil { 331 // Providers were previously allowed to bump the version
321 return nil, 0, errors.New("cannot upgrade state, missing MigrateState function") 332 // without declaring MigrateState.
333 // If there are further upgraders, then we've only updated that far.
334 if len(res.StateUpgraders) > 0 {
335 schemaType = res.StateUpgraders[0].Type
336 upgradedVersion = res.StateUpgraders[0].Version
322 } 337 }
323 338 } else if requiresMigrate {
324 is := &terraform.InstanceState{ 339 is := &terraform.InstanceState{
325 ID: m["id"], 340 ID: m["id"],
326 Attributes: m, 341 Attributes: m,
@@ -476,7 +491,12 @@ func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_R
476} 491}
477 492
478func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) { 493func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) {
479 resp := &proto.ReadResource_Response{} 494 resp := &proto.ReadResource_Response{
495 // helper/schema did previously handle private data during refresh, but
496 // core is now going to expect this to be maintained in order to
497 // persist it in the state.
498 Private: req.Private,
499 }
480 500
481 res := s.provider.ResourcesMap[req.TypeName] 501 res := s.provider.ResourcesMap[req.TypeName]
482 schemaBlock := s.getResourceSchemaBlock(req.TypeName) 502 schemaBlock := s.getResourceSchemaBlock(req.TypeName)
@@ -493,6 +513,15 @@ func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadReso
493 return resp, nil 513 return resp, nil
494 } 514 }
495 515
516 private := make(map[string]interface{})
517 if len(req.Private) > 0 {
518 if err := json.Unmarshal(req.Private, &private); err != nil {
519 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
520 return resp, nil
521 }
522 }
523 instanceState.Meta = private
524
496 newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta()) 525 newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta())
497 if err != nil { 526 if err != nil {
498 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) 527 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
@@ -569,6 +598,7 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl
569 // We don't usually plan destroys, but this can return early in any case. 598 // We don't usually plan destroys, but this can return early in any case.
570 if proposedNewStateVal.IsNull() { 599 if proposedNewStateVal.IsNull() {
571 resp.PlannedState = req.ProposedNewState 600 resp.PlannedState = req.ProposedNewState
601 resp.PlannedPrivate = req.PriorPrivate
572 return resp, nil 602 return resp, nil
573 } 603 }
574 604
@@ -623,6 +653,7 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl
623 // description that _shows_ there are no changes. This is always the 653 // description that _shows_ there are no changes. This is always the
624 // prior state, because we force a diff above if this is a new instance. 654 // prior state, because we force a diff above if this is a new instance.
625 resp.PlannedState = req.PriorState 655 resp.PlannedState = req.PriorState
656 resp.PlannedPrivate = req.PriorPrivate
626 return resp, nil 657 return resp, nil
627 } 658 }
628 659
@@ -683,6 +714,18 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl
683 Msgpack: plannedMP, 714 Msgpack: plannedMP,
684 } 715 }
685 716
717 // encode any timeouts into the diff Meta
718 t := &schema.ResourceTimeout{}
719 if err := t.ConfigDecode(res, cfg); err != nil {
720 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
721 return resp, nil
722 }
723
724 if err := t.DiffEncode(diff); err != nil {
725 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
726 return resp, nil
727 }
728
686 // Now we need to store any NewExtra values, which are where any actual 729 // Now we need to store any NewExtra values, which are where any actual
687 // StateFunc modified config fields are hidden. 730 // StateFunc modified config fields are hidden.
688 privateMap := diff.Meta 731 privateMap := diff.Meta
@@ -929,6 +972,9 @@ func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.I
929 return resp, nil 972 return resp, nil
930 } 973 }
931 974
975 // Normalize the value and fill in any missing blocks.
976 newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock)
977
932 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) 978 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
933 if err != nil { 979 if err != nil {
934 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) 980 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
@@ -1160,6 +1206,8 @@ func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value {
1160 } 1206 }
1161 } 1207 }
1162 1208
1209 // check the invariants that we need below, to ensure we are working with
1210 // non-null and known values.
1163 if src.IsNull() || !src.IsKnown() || !dst.IsKnown() { 1211 if src.IsNull() || !src.IsKnown() || !dst.IsKnown() {
1164 return dst 1212 return dst
1165 } 1213 }
@@ -1278,8 +1326,12 @@ func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value {
1278 return cty.ListVal(dsts) 1326 return cty.ListVal(dsts)
1279 } 1327 }
1280 1328
1281 case ty.IsPrimitiveType(): 1329 case ty == cty.String:
1282 if dst.IsNull() && src.IsWhollyKnown() && apply { 1330 // The legacy SDK should not be able to remove a value during plan or
1331 // apply, however we are only going to overwrite this if the source was
1332 // an empty string, since that is what is often equated with unset and
1333 // lost in the diff process.
1334 if dst.IsNull() && src.AsString() == "" {
1283 return src 1335 return src
1284 } 1336 }
1285 } 1337 }
@@ -1305,11 +1357,19 @@ func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic {
1305 for it.Next() { 1357 for it.Next() {
1306 kv, ev := it.Element() 1358 kv, ev := it.Element()
1307 if ev.IsNull() { 1359 if ev.IsNull() {
1360 // if this is a set, the kv is also going to be null which
1361 // isn't a valid path element, so we can't append it to the
1362 // diagnostic.
1363 p := path
1364 if !kv.IsNull() {
1365 p = append(p, cty.IndexStep{Key: kv})
1366 }
1367
1308 diags = append(diags, &proto.Diagnostic{ 1368 diags = append(diags, &proto.Diagnostic{
1309 Severity: proto.Diagnostic_ERROR, 1369 Severity: proto.Diagnostic_ERROR,
1310 Summary: "Null value found in list", 1370 Summary: "Null value found in list",
1311 Detail: "Null values are not allowed for this attribute value.", 1371 Detail: "Null values are not allowed for this attribute value.",
1312 Attribute: convert.PathToAttributePath(append(path, cty.IndexStep{Key: kv})), 1372 Attribute: convert.PathToAttributePath(p),
1313 }) 1373 })
1314 continue 1374 continue
1315 } 1375 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go
index b2aff99..f488207 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go
@@ -1,6 +1,7 @@
1package resource 1package resource
2 2
3import ( 3import (
4 "encoding/json"
4 "fmt" 5 "fmt"
5 6
6 "github.com/hashicorp/terraform/addrs" 7 "github.com/hashicorp/terraform/addrs"
@@ -52,43 +53,57 @@ func shimNewState(newState *states.State, providers map[string]terraform.Resourc
52 resource := getResource(providers, providerType, res.Addr) 53 resource := getResource(providers, providerType, res.Addr)
53 54
54 for key, i := range res.Instances { 55 for key, i := range res.Instances {
55 flatmap, err := shimmedAttributes(i.Current, resource) 56 resState := &terraform.ResourceState{
56 if err != nil { 57 Type: resType,
57 return nil, fmt.Errorf("error decoding state for %q: %s", resType, err) 58 Provider: res.ProviderConfig.String(),
58 } 59 }
59 60
60 resState := &terraform.ResourceState{ 61 // We should always have a Current instance here, but be safe about checking.
61 Type: resType, 62 if i.Current != nil {
62 Primary: &terraform.InstanceState{ 63 flatmap, err := shimmedAttributes(i.Current, resource)
64 if err != nil {
65 return nil, fmt.Errorf("error decoding state for %q: %s", resType, err)
66 }
67
68 var meta map[string]interface{}
69 if i.Current.Private != nil {
70 err := json.Unmarshal(i.Current.Private, &meta)
71 if err != nil {
72 return nil, err
73 }
74 }
75
76 resState.Primary = &terraform.InstanceState{
63 ID: flatmap["id"], 77 ID: flatmap["id"],
64 Attributes: flatmap, 78 Attributes: flatmap,
65 Tainted: i.Current.Status == states.ObjectTainted, 79 Tainted: i.Current.Status == states.ObjectTainted,
66 }, 80 Meta: meta,
67 Provider: res.ProviderConfig.String(),
68 }
69 if i.Current.SchemaVersion != 0 {
70 resState.Primary.Meta = map[string]interface{}{
71 "schema_version": i.Current.SchemaVersion,
72 } 81 }
73 }
74 82
75 for _, dep := range i.Current.Dependencies { 83 if i.Current.SchemaVersion != 0 {
76 resState.Dependencies = append(resState.Dependencies, dep.String()) 84 resState.Primary.Meta = map[string]interface{}{
77 } 85 "schema_version": i.Current.SchemaVersion,
86 }
87 }
78 88
79 // convert the indexes to the old style flapmap indexes 89 for _, dep := range i.Current.Dependencies {
80 idx := "" 90 resState.Dependencies = append(resState.Dependencies, dep.String())
81 switch key.(type) {
82 case addrs.IntKey:
83 // don't add numeric index values to resources with a count of 0
84 if len(res.Instances) > 1 {
85 idx = fmt.Sprintf(".%d", key)
86 } 91 }
87 case addrs.StringKey:
88 idx = "." + key.String()
89 }
90 92
91 mod.Resources[res.Addr.String()+idx] = resState 93 // convert the indexes to the old style flapmap indexes
94 idx := ""
95 switch key.(type) {
96 case addrs.IntKey:
97 // don't add numeric index values to resources with a count of 0
98 if len(res.Instances) > 1 {
99 idx = fmt.Sprintf(".%d", key)
100 }
101 case addrs.StringKey:
102 idx = "." + key.String()
103 }
104
105 mod.Resources[res.Addr.String()+idx] = resState
106 }
92 107
93 // add any deposed instances 108 // add any deposed instances
94 for _, dep := range i.Deposed { 109 for _, dep := range i.Deposed {
@@ -97,10 +112,19 @@ func shimNewState(newState *states.State, providers map[string]terraform.Resourc
97 return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err) 112 return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err)
98 } 113 }
99 114
115 var meta map[string]interface{}
116 if dep.Private != nil {
117 err := json.Unmarshal(dep.Private, &meta)
118 if err != nil {
119 return nil, err
120 }
121 }
122
100 deposed := &terraform.InstanceState{ 123 deposed := &terraform.InstanceState{
101 ID: flatmap["id"], 124 ID: flatmap["id"],
102 Attributes: flatmap, 125 Attributes: flatmap,
103 Tainted: dep.Status == states.ObjectTainted, 126 Tainted: dep.Status == states.ObjectTainted,
127 Meta: meta,
104 } 128 }
105 if dep.SchemaVersion != 0 { 129 if dep.SchemaVersion != 0 {
106 deposed.Meta = map[string]interface{}{ 130 deposed.Meta = map[string]interface{}{
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
index 311fdb6..f34e17a 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
@@ -10,7 +10,6 @@ import (
10 "strings" 10 "strings"
11 11
12 "github.com/hashicorp/terraform/addrs" 12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/config"
14 "github.com/hashicorp/terraform/config/hcl2shim" 13 "github.com/hashicorp/terraform/config/hcl2shim"
15 "github.com/hashicorp/terraform/states" 14 "github.com/hashicorp/terraform/states"
16 15
@@ -341,7 +340,7 @@ func legacyDiffComparisonString(changes *plans.Changes) string {
341 v := newAttrs[attrK] 340 v := newAttrs[attrK]
342 u := oldAttrs[attrK] 341 u := oldAttrs[attrK]
343 342
344 if v == config.UnknownVariableValue { 343 if v == hcl2shim.UnknownVariableValue {
345 v = "<computed>" 344 v = "<computed>"
346 } 345 }
347 // NOTE: we don't support <sensitive> here because we would 346 // NOTE: we don't support <sensitive> here because we would
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
index 808375c..6ad3f13 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
@@ -219,6 +219,9 @@ func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult,
219 v, _ := r.Config.Get(key) 219 v, _ := r.Config.Get(key)
220 result[ik] = v 220 result[ik] = v
221 } 221 }
222 case nil:
223 // the map may have been empty on the configuration, so we leave the
224 // empty result
222 default: 225 default:
223 panic(fmt.Sprintf("unknown type: %#v", mraw)) 226 panic(fmt.Sprintf("unknown type: %#v", mraw))
224 } 227 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
index ae35b4a..3e70acf 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
@@ -95,7 +95,9 @@ func (r *DiffFieldReader) readMap(
95 return FieldReadResult{}, err 95 return FieldReadResult{}, err
96 } 96 }
97 if source.Exists { 97 if source.Exists {
98 result = source.Value.(map[string]interface{}) 98 // readMap may return a nil value, or an unknown value placeholder in
99 // some cases, causing the type assertion to panic if we don't assign the ok value
100 result, _ = source.Value.(map[string]interface{})
99 resultSet = true 101 resultSet = true
100 } 102 }
101 103
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
index b5e3065..b59e4e8 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
@@ -95,9 +95,10 @@ type Resource struct {
95 // 95 //
96 // Exists is a function that is called to check if a resource still 96 // Exists is a function that is called to check if a resource still
97 // exists. If this returns false, then this will affect the diff 97 // exists. If this returns false, then this will affect the diff
98 // accordingly. If this function isn't set, it will not be called. It 98 // accordingly. If this function isn't set, it will not be called. You
99 // is highly recommended to set it. The *ResourceData passed to Exists 99 // can also signal existence in the Read method by calling d.SetId("")
100 // should _not_ be modified. 100 // if the Resource is no longer present and should be removed from state.
101 // The *ResourceData passed to Exists should _not_ be modified.
101 Create CreateFunc 102 Create CreateFunc
102 Read ReadFunc 103 Read ReadFunc
103 Update UpdateFunc 104 Update UpdateFunc
@@ -329,21 +330,13 @@ func (r *Resource) simpleDiff(
329 c *terraform.ResourceConfig, 330 c *terraform.ResourceConfig,
330 meta interface{}) (*terraform.InstanceDiff, error) { 331 meta interface{}) (*terraform.InstanceDiff, error) {
331 332
332 t := &ResourceTimeout{}
333 err := t.ConfigDecode(r, c)
334
335 if err != nil {
336 return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
337 }
338
339 instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false) 333 instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false)
340 if err != nil { 334 if err != nil {
341 return instanceDiff, err 335 return instanceDiff, err
342 } 336 }
343 337
344 if instanceDiff == nil { 338 if instanceDiff == nil {
345 log.Printf("[DEBUG] Instance Diff is nil in SimpleDiff()") 339 instanceDiff = terraform.NewInstanceDiff()
346 return nil, err
347 } 340 }
348 341
349 // Make sure the old value is set in each of the instance diffs. 342 // Make sure the old value is set in each of the instance diffs.
@@ -357,10 +350,7 @@ func (r *Resource) simpleDiff(
357 } 350 }
358 } 351 }
359 352
360 if err := t.DiffEncode(instanceDiff); err != nil { 353 return instanceDiff, nil
361 log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
362 }
363 return instanceDiff, err
364} 354}
365 355
366// Validate validates the resource configuration against the schema. 356// Validate validates the resource configuration against the schema.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
index 9e422c1..222b2cc 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
@@ -5,7 +5,7 @@ import (
5 "log" 5 "log"
6 "time" 6 "time"
7 7
8 "github.com/hashicorp/terraform/config" 8 "github.com/hashicorp/terraform/config/hcl2shim"
9 "github.com/hashicorp/terraform/terraform" 9 "github.com/hashicorp/terraform/terraform"
10 "github.com/mitchellh/copystructure" 10 "github.com/mitchellh/copystructure"
11) 11)
@@ -70,7 +70,7 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig)
70 case []map[string]interface{}: 70 case []map[string]interface{}:
71 rawTimeouts = raw 71 rawTimeouts = raw
72 case string: 72 case string:
73 if raw == config.UnknownVariableValue { 73 if raw == hcl2shim.UnknownVariableValue {
74 // Timeout is not defined in the config 74 // Timeout is not defined in the config
75 // Defaults will be used instead 75 // Defaults will be used instead
76 return nil 76 return nil
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
index 6a3c15a..bcc8e4b 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
@@ -22,7 +22,7 @@ import (
22 "strings" 22 "strings"
23 "sync" 23 "sync"
24 24
25 "github.com/hashicorp/terraform/config" 25 "github.com/hashicorp/terraform/config/hcl2shim"
26 "github.com/hashicorp/terraform/terraform" 26 "github.com/hashicorp/terraform/terraform"
27 "github.com/mitchellh/copystructure" 27 "github.com/mitchellh/copystructure"
28 "github.com/mitchellh/mapstructure" 28 "github.com/mitchellh/mapstructure"
@@ -1365,10 +1365,15 @@ func (m schemaMap) validate(
1365 "%q: this field cannot be set", k)} 1365 "%q: this field cannot be set", k)}
1366 } 1366 }
1367 1367
1368 if raw == config.UnknownVariableValue { 1368 // If the value is unknown then we can't validate it yet.
1369 // If the value is unknown then we can't validate it yet. 1369 // In particular, this avoids spurious type errors where downstream
1370 // In particular, this avoids spurious type errors where downstream 1370 // validation code sees UnknownVariableValue as being just a string.
1371 // validation code sees UnknownVariableValue as being just a string. 1371 // The SDK has to allow the unknown value through initially, so that
1372 // Required fields set via an interpolated value are accepted.
1373 if !isWhollyKnown(raw) {
1374 if schema.Deprecated != "" {
1375 return []string{fmt.Sprintf("%q: [DEPRECATED] %s", k, schema.Deprecated)}, nil
1376 }
1372 return nil, nil 1377 return nil, nil
1373 } 1378 }
1374 1379
@@ -1380,6 +1385,28 @@ func (m schemaMap) validate(
1380 return m.validateType(k, raw, schema, c) 1385 return m.validateType(k, raw, schema, c)
1381} 1386}
1382 1387
1388// isWhollyKnown returns false if the argument contains an UnknownVariableValue
1389func isWhollyKnown(raw interface{}) bool {
1390 switch raw := raw.(type) {
1391 case string:
1392 if raw == hcl2shim.UnknownVariableValue {
1393 return false
1394 }
1395 case []interface{}:
1396 for _, v := range raw {
1397 if !isWhollyKnown(v) {
1398 return false
1399 }
1400 }
1401 case map[string]interface{}:
1402 for _, v := range raw {
1403 if !isWhollyKnown(v) {
1404 return false
1405 }
1406 }
1407 }
1408 return true
1409}
1383func (m schemaMap) validateConflictingAttributes( 1410func (m schemaMap) validateConflictingAttributes(
1384 k string, 1411 k string,
1385 schema *Schema, 1412 schema *Schema,
@@ -1391,7 +1418,7 @@ func (m schemaMap) validateConflictingAttributes(
1391 1418
1392 for _, conflictingKey := range schema.ConflictsWith { 1419 for _, conflictingKey := range schema.ConflictsWith {
1393 if raw, ok := c.Get(conflictingKey); ok { 1420 if raw, ok := c.Get(conflictingKey); ok {
1394 if raw == config.UnknownVariableValue { 1421 if raw == hcl2shim.UnknownVariableValue {
1395 // An unknown value might become unset (null) once known, so 1422 // An unknown value might become unset (null) once known, so
1396 // we must defer validation until it's known. 1423 // we must defer validation until it's known.
1397 continue 1424 continue
@@ -1411,11 +1438,16 @@ func (m schemaMap) validateList(
1411 c *terraform.ResourceConfig) ([]string, []error) { 1438 c *terraform.ResourceConfig) ([]string, []error) {
1412 // first check if the list is wholly unknown 1439 // first check if the list is wholly unknown
1413 if s, ok := raw.(string); ok { 1440 if s, ok := raw.(string); ok {
1414 if s == config.UnknownVariableValue { 1441 if s == hcl2shim.UnknownVariableValue {
1415 return nil, nil 1442 return nil, nil
1416 } 1443 }
1417 } 1444 }
1418 1445
1446 // schemaMap can't validate nil
1447 if raw == nil {
1448 return nil, nil
1449 }
1450
1419 // We use reflection to verify the slice because you can't 1451 // We use reflection to verify the slice because you can't
1420 // case to []interface{} unless the slice is exactly that type. 1452 // case to []interface{} unless the slice is exactly that type.
1421 rawV := reflect.ValueOf(raw) 1453 rawV := reflect.ValueOf(raw)
@@ -1432,6 +1464,15 @@ func (m schemaMap) validateList(
1432 "%s: should be a list", k)} 1464 "%s: should be a list", k)}
1433 } 1465 }
1434 1466
1467 // We can't validate list length if this came from a dynamic block.
1468 // Since there's no way to determine if something was from a dynamic block
1469 // at this point, we're going to skip validation in the new protocol if
1470 // there are any unknowns. Validate will eventually be called again once
1471 // all values are known.
1472 if isProto5() && !isWhollyKnown(raw) {
1473 return nil, nil
1474 }
1475
1435 // Validate length 1476 // Validate length
1436 if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { 1477 if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems {
1437 return nil, []error{fmt.Errorf( 1478 return nil, []error{fmt.Errorf(
@@ -1489,11 +1530,15 @@ func (m schemaMap) validateMap(
1489 c *terraform.ResourceConfig) ([]string, []error) { 1530 c *terraform.ResourceConfig) ([]string, []error) {
1490 // first check if the list is wholly unknown 1531 // first check if the list is wholly unknown
1491 if s, ok := raw.(string); ok { 1532 if s, ok := raw.(string); ok {
1492 if s == config.UnknownVariableValue { 1533 if s == hcl2shim.UnknownVariableValue {
1493 return nil, nil 1534 return nil, nil
1494 } 1535 }
1495 } 1536 }
1496 1537
1538 // schemaMap can't validate nil
1539 if raw == nil {
1540 return nil, nil
1541 }
1497 // We use reflection to verify the slice because you can't 1542 // We use reflection to verify the slice because you can't
1498 // case to []interface{} unless the slice is exactly that type. 1543 // case to []interface{} unless the slice is exactly that type.
1499 rawV := reflect.ValueOf(raw) 1544 rawV := reflect.ValueOf(raw)
@@ -1620,6 +1665,12 @@ func (m schemaMap) validateObject(
1620 schema map[string]*Schema, 1665 schema map[string]*Schema,
1621 c *terraform.ResourceConfig) ([]string, []error) { 1666 c *terraform.ResourceConfig) ([]string, []error) {
1622 raw, _ := c.Get(k) 1667 raw, _ := c.Get(k)
1668
1669 // schemaMap can't validate nil
1670 if raw == nil {
1671 return nil, nil
1672 }
1673
1623 if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { 1674 if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) {
1624 return nil, []error{fmt.Errorf( 1675 return nil, []error{fmt.Errorf(
1625 "%s: expected object, got %s", 1676 "%s: expected object, got %s",
@@ -1664,6 +1715,14 @@ func (m schemaMap) validatePrimitive(
1664 raw interface{}, 1715 raw interface{},
1665 schema *Schema, 1716 schema *Schema,
1666 c *terraform.ResourceConfig) ([]string, []error) { 1717 c *terraform.ResourceConfig) ([]string, []error) {
1718
1719 // a nil value shouldn't happen in the old protocol, and in the new
1720 // protocol the types have already been validated. Either way, we can't
1721 // reflect on nil, so don't panic.
1722 if raw == nil {
1723 return nil, nil
1724 }
1725
1667 // Catch if the user gave a complex type where a primitive was 1726 // Catch if the user gave a complex type where a primitive was
1668 // expected, so we can return a friendly error message that 1727 // expected, so we can return a friendly error message that
1669 // doesn't contain Go type system terminology. 1728 // doesn't contain Go type system terminology.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/shims.go b/vendor/github.com/hashicorp/terraform/helper/schema/shims.go
index 203d017..988573e 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/shims.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/shims.go
@@ -6,7 +6,7 @@ import (
6 "github.com/zclconf/go-cty/cty" 6 "github.com/zclconf/go-cty/cty"
7 ctyjson "github.com/zclconf/go-cty/cty/json" 7 ctyjson "github.com/zclconf/go-cty/cty/json"
8 8
9 "github.com/hashicorp/terraform/config" 9 "github.com/hashicorp/terraform/config/hcl2shim"
10 "github.com/hashicorp/terraform/configs/configschema" 10 "github.com/hashicorp/terraform/configs/configschema"
11 "github.com/hashicorp/terraform/terraform" 11 "github.com/hashicorp/terraform/terraform"
12) 12)
@@ -50,7 +50,7 @@ func removeConfigUnknowns(cfg map[string]interface{}) {
50 for k, v := range cfg { 50 for k, v := range cfg {
51 switch v := v.(type) { 51 switch v := v.(type) {
52 case string: 52 case string:
53 if v == config.UnknownVariableValue { 53 if v == hcl2shim.UnknownVariableValue {
54 delete(cfg, k) 54 delete(cfg, k)
55 } 55 }
56 case []interface{}: 56 case []interface{}:
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go b/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go
index 50e2572..2f306be 100644
--- a/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go
@@ -22,6 +22,7 @@ import (
22var goGetterDetectors = []getter.Detector{ 22var goGetterDetectors = []getter.Detector{
23 new(getter.GitHubDetector), 23 new(getter.GitHubDetector),
24 new(getter.BitBucketDetector), 24 new(getter.BitBucketDetector),
25 new(getter.GCSDetector),
25 new(getter.S3Detector), 26 new(getter.S3Detector),
26 new(getter.FileDetector), 27 new(getter.FileDetector),
27} 28}
@@ -46,6 +47,7 @@ var goGetterDecompressors = map[string]getter.Decompressor{
46 47
47var goGetterGetters = map[string]getter.Getter{ 48var goGetterGetters = map[string]getter.Getter{
48 "file": new(getter.FileGetter), 49 "file": new(getter.FileGetter),
50 "gcs": new(getter.GCSGetter),
49 "git": new(getter.GitGetter), 51 "git": new(getter.GitGetter),
50 "hg": new(getter.HgGetter), 52 "hg": new(getter.HgGetter),
51 "s3": new(getter.S3Getter), 53 "s3": new(getter.S3Getter),
diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go
index 87a6bec..b2bdf88 100644
--- a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go
+++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go
@@ -3,13 +3,12 @@
3 3
4package tfplugin5 4package tfplugin5
5 5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10import ( 6import (
7 fmt "fmt"
8 proto "github.com/golang/protobuf/proto"
11 context "golang.org/x/net/context" 9 context "golang.org/x/net/context"
12 grpc "google.golang.org/grpc" 10 grpc "google.golang.org/grpc"
11 math "math"
13) 12)
14 13
15// Reference imports to suppress errors if they are not otherwise used. 14// Reference imports to suppress errors if they are not otherwise used.
@@ -36,6 +35,7 @@ var Diagnostic_Severity_name = map[int32]string{
36 1: "ERROR", 35 1: "ERROR",
37 2: "WARNING", 36 2: "WARNING",
38} 37}
38
39var Diagnostic_Severity_value = map[string]int32{ 39var Diagnostic_Severity_value = map[string]int32{
40 "INVALID": 0, 40 "INVALID": 0,
41 "ERROR": 1, 41 "ERROR": 1,
@@ -45,8 +45,9 @@ var Diagnostic_Severity_value = map[string]int32{
45func (x Diagnostic_Severity) String() string { 45func (x Diagnostic_Severity) String() string {
46 return proto.EnumName(Diagnostic_Severity_name, int32(x)) 46 return proto.EnumName(Diagnostic_Severity_name, int32(x))
47} 47}
48
48func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { 49func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) {
49 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{1, 0} 50 return fileDescriptor_17ae6090ff270234, []int{1, 0}
50} 51}
51 52
52type Schema_NestedBlock_NestingMode int32 53type Schema_NestedBlock_NestingMode int32
@@ -68,6 +69,7 @@ var Schema_NestedBlock_NestingMode_name = map[int32]string{
68 4: "MAP", 69 4: "MAP",
69 5: "GROUP", 70 5: "GROUP",
70} 71}
72
71var Schema_NestedBlock_NestingMode_value = map[string]int32{ 73var Schema_NestedBlock_NestingMode_value = map[string]int32{
72 "INVALID": 0, 74 "INVALID": 0,
73 "SINGLE": 1, 75 "SINGLE": 1,
@@ -80,8 +82,9 @@ var Schema_NestedBlock_NestingMode_value = map[string]int32{
80func (x Schema_NestedBlock_NestingMode) String() string { 82func (x Schema_NestedBlock_NestingMode) String() string {
81 return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x)) 83 return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x))
82} 84}
85
83func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { 86func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) {
84 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 2, 0} 87 return fileDescriptor_17ae6090ff270234, []int{5, 2, 0}
85} 88}
86 89
87// DynamicValue is an opaque encoding of terraform data, with the field name 90// DynamicValue is an opaque encoding of terraform data, with the field name
@@ -98,16 +101,17 @@ func (m *DynamicValue) Reset() { *m = DynamicValue{} }
98func (m *DynamicValue) String() string { return proto.CompactTextString(m) } 101func (m *DynamicValue) String() string { return proto.CompactTextString(m) }
99func (*DynamicValue) ProtoMessage() {} 102func (*DynamicValue) ProtoMessage() {}
100func (*DynamicValue) Descriptor() ([]byte, []int) { 103func (*DynamicValue) Descriptor() ([]byte, []int) {
101 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{0} 104 return fileDescriptor_17ae6090ff270234, []int{0}
102} 105}
106
103func (m *DynamicValue) XXX_Unmarshal(b []byte) error { 107func (m *DynamicValue) XXX_Unmarshal(b []byte) error {
104 return xxx_messageInfo_DynamicValue.Unmarshal(m, b) 108 return xxx_messageInfo_DynamicValue.Unmarshal(m, b)
105} 109}
106func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 110func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
107 return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic) 111 return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic)
108} 112}
109func (dst *DynamicValue) XXX_Merge(src proto.Message) { 113func (m *DynamicValue) XXX_Merge(src proto.Message) {
110 xxx_messageInfo_DynamicValue.Merge(dst, src) 114 xxx_messageInfo_DynamicValue.Merge(m, src)
111} 115}
112func (m *DynamicValue) XXX_Size() int { 116func (m *DynamicValue) XXX_Size() int {
113 return xxx_messageInfo_DynamicValue.Size(m) 117 return xxx_messageInfo_DynamicValue.Size(m)
@@ -146,16 +150,17 @@ func (m *Diagnostic) Reset() { *m = Diagnostic{} }
146func (m *Diagnostic) String() string { return proto.CompactTextString(m) } 150func (m *Diagnostic) String() string { return proto.CompactTextString(m) }
147func (*Diagnostic) ProtoMessage() {} 151func (*Diagnostic) ProtoMessage() {}
148func (*Diagnostic) Descriptor() ([]byte, []int) { 152func (*Diagnostic) Descriptor() ([]byte, []int) {
149 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{1} 153 return fileDescriptor_17ae6090ff270234, []int{1}
150} 154}
155
151func (m *Diagnostic) XXX_Unmarshal(b []byte) error { 156func (m *Diagnostic) XXX_Unmarshal(b []byte) error {
152 return xxx_messageInfo_Diagnostic.Unmarshal(m, b) 157 return xxx_messageInfo_Diagnostic.Unmarshal(m, b)
153} 158}
154func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 159func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
155 return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic) 160 return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic)
156} 161}
157func (dst *Diagnostic) XXX_Merge(src proto.Message) { 162func (m *Diagnostic) XXX_Merge(src proto.Message) {
158 xxx_messageInfo_Diagnostic.Merge(dst, src) 163 xxx_messageInfo_Diagnostic.Merge(m, src)
159} 164}
160func (m *Diagnostic) XXX_Size() int { 165func (m *Diagnostic) XXX_Size() int {
161 return xxx_messageInfo_Diagnostic.Size(m) 166 return xxx_messageInfo_Diagnostic.Size(m)
@@ -205,16 +210,17 @@ func (m *AttributePath) Reset() { *m = AttributePath{} }
205func (m *AttributePath) String() string { return proto.CompactTextString(m) } 210func (m *AttributePath) String() string { return proto.CompactTextString(m) }
206func (*AttributePath) ProtoMessage() {} 211func (*AttributePath) ProtoMessage() {}
207func (*AttributePath) Descriptor() ([]byte, []int) { 212func (*AttributePath) Descriptor() ([]byte, []int) {
208 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{2} 213 return fileDescriptor_17ae6090ff270234, []int{2}
209} 214}
215
210func (m *AttributePath) XXX_Unmarshal(b []byte) error { 216func (m *AttributePath) XXX_Unmarshal(b []byte) error {
211 return xxx_messageInfo_AttributePath.Unmarshal(m, b) 217 return xxx_messageInfo_AttributePath.Unmarshal(m, b)
212} 218}
213func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 219func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
214 return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic) 220 return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic)
215} 221}
216func (dst *AttributePath) XXX_Merge(src proto.Message) { 222func (m *AttributePath) XXX_Merge(src proto.Message) {
217 xxx_messageInfo_AttributePath.Merge(dst, src) 223 xxx_messageInfo_AttributePath.Merge(m, src)
218} 224}
219func (m *AttributePath) XXX_Size() int { 225func (m *AttributePath) XXX_Size() int {
220 return xxx_messageInfo_AttributePath.Size(m) 226 return xxx_messageInfo_AttributePath.Size(m)
@@ -247,16 +253,17 @@ func (m *AttributePath_Step) Reset() { *m = AttributePath_Step{} }
247func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) } 253func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) }
248func (*AttributePath_Step) ProtoMessage() {} 254func (*AttributePath_Step) ProtoMessage() {}
249func (*AttributePath_Step) Descriptor() ([]byte, []int) { 255func (*AttributePath_Step) Descriptor() ([]byte, []int) {
250 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{2, 0} 256 return fileDescriptor_17ae6090ff270234, []int{2, 0}
251} 257}
258
252func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error { 259func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error {
253 return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b) 260 return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b)
254} 261}
255func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 262func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
256 return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic) 263 return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic)
257} 264}
258func (dst *AttributePath_Step) XXX_Merge(src proto.Message) { 265func (m *AttributePath_Step) XXX_Merge(src proto.Message) {
259 xxx_messageInfo_AttributePath_Step.Merge(dst, src) 266 xxx_messageInfo_AttributePath_Step.Merge(m, src)
260} 267}
261func (m *AttributePath_Step) XXX_Size() int { 268func (m *AttributePath_Step) XXX_Size() int {
262 return xxx_messageInfo_AttributePath_Step.Size(m) 269 return xxx_messageInfo_AttributePath_Step.Size(m)
@@ -407,16 +414,17 @@ func (m *Stop) Reset() { *m = Stop{} }
407func (m *Stop) String() string { return proto.CompactTextString(m) } 414func (m *Stop) String() string { return proto.CompactTextString(m) }
408func (*Stop) ProtoMessage() {} 415func (*Stop) ProtoMessage() {}
409func (*Stop) Descriptor() ([]byte, []int) { 416func (*Stop) Descriptor() ([]byte, []int) {
410 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3} 417 return fileDescriptor_17ae6090ff270234, []int{3}
411} 418}
419
412func (m *Stop) XXX_Unmarshal(b []byte) error { 420func (m *Stop) XXX_Unmarshal(b []byte) error {
413 return xxx_messageInfo_Stop.Unmarshal(m, b) 421 return xxx_messageInfo_Stop.Unmarshal(m, b)
414} 422}
415func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 423func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
416 return xxx_messageInfo_Stop.Marshal(b, m, deterministic) 424 return xxx_messageInfo_Stop.Marshal(b, m, deterministic)
417} 425}
418func (dst *Stop) XXX_Merge(src proto.Message) { 426func (m *Stop) XXX_Merge(src proto.Message) {
419 xxx_messageInfo_Stop.Merge(dst, src) 427 xxx_messageInfo_Stop.Merge(m, src)
420} 428}
421func (m *Stop) XXX_Size() int { 429func (m *Stop) XXX_Size() int {
422 return xxx_messageInfo_Stop.Size(m) 430 return xxx_messageInfo_Stop.Size(m)
@@ -437,16 +445,17 @@ func (m *Stop_Request) Reset() { *m = Stop_Request{} }
437func (m *Stop_Request) String() string { return proto.CompactTextString(m) } 445func (m *Stop_Request) String() string { return proto.CompactTextString(m) }
438func (*Stop_Request) ProtoMessage() {} 446func (*Stop_Request) ProtoMessage() {}
439func (*Stop_Request) Descriptor() ([]byte, []int) { 447func (*Stop_Request) Descriptor() ([]byte, []int) {
440 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3, 0} 448 return fileDescriptor_17ae6090ff270234, []int{3, 0}
441} 449}
450
442func (m *Stop_Request) XXX_Unmarshal(b []byte) error { 451func (m *Stop_Request) XXX_Unmarshal(b []byte) error {
443 return xxx_messageInfo_Stop_Request.Unmarshal(m, b) 452 return xxx_messageInfo_Stop_Request.Unmarshal(m, b)
444} 453}
445func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 454func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
446 return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic) 455 return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic)
447} 456}
448func (dst *Stop_Request) XXX_Merge(src proto.Message) { 457func (m *Stop_Request) XXX_Merge(src proto.Message) {
449 xxx_messageInfo_Stop_Request.Merge(dst, src) 458 xxx_messageInfo_Stop_Request.Merge(m, src)
450} 459}
451func (m *Stop_Request) XXX_Size() int { 460func (m *Stop_Request) XXX_Size() int {
452 return xxx_messageInfo_Stop_Request.Size(m) 461 return xxx_messageInfo_Stop_Request.Size(m)
@@ -468,16 +477,17 @@ func (m *Stop_Response) Reset() { *m = Stop_Response{} }
468func (m *Stop_Response) String() string { return proto.CompactTextString(m) } 477func (m *Stop_Response) String() string { return proto.CompactTextString(m) }
469func (*Stop_Response) ProtoMessage() {} 478func (*Stop_Response) ProtoMessage() {}
470func (*Stop_Response) Descriptor() ([]byte, []int) { 479func (*Stop_Response) Descriptor() ([]byte, []int) {
471 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3, 1} 480 return fileDescriptor_17ae6090ff270234, []int{3, 1}
472} 481}
482
473func (m *Stop_Response) XXX_Unmarshal(b []byte) error { 483func (m *Stop_Response) XXX_Unmarshal(b []byte) error {
474 return xxx_messageInfo_Stop_Response.Unmarshal(m, b) 484 return xxx_messageInfo_Stop_Response.Unmarshal(m, b)
475} 485}
476func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 486func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
477 return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic) 487 return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic)
478} 488}
479func (dst *Stop_Response) XXX_Merge(src proto.Message) { 489func (m *Stop_Response) XXX_Merge(src proto.Message) {
480 xxx_messageInfo_Stop_Response.Merge(dst, src) 490 xxx_messageInfo_Stop_Response.Merge(m, src)
481} 491}
482func (m *Stop_Response) XXX_Size() int { 492func (m *Stop_Response) XXX_Size() int {
483 return xxx_messageInfo_Stop_Response.Size(m) 493 return xxx_messageInfo_Stop_Response.Size(m)
@@ -510,16 +520,17 @@ func (m *RawState) Reset() { *m = RawState{} }
510func (m *RawState) String() string { return proto.CompactTextString(m) } 520func (m *RawState) String() string { return proto.CompactTextString(m) }
511func (*RawState) ProtoMessage() {} 521func (*RawState) ProtoMessage() {}
512func (*RawState) Descriptor() ([]byte, []int) { 522func (*RawState) Descriptor() ([]byte, []int) {
513 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{4} 523 return fileDescriptor_17ae6090ff270234, []int{4}
514} 524}
525
515func (m *RawState) XXX_Unmarshal(b []byte) error { 526func (m *RawState) XXX_Unmarshal(b []byte) error {
516 return xxx_messageInfo_RawState.Unmarshal(m, b) 527 return xxx_messageInfo_RawState.Unmarshal(m, b)
517} 528}
518func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 529func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
519 return xxx_messageInfo_RawState.Marshal(b, m, deterministic) 530 return xxx_messageInfo_RawState.Marshal(b, m, deterministic)
520} 531}
521func (dst *RawState) XXX_Merge(src proto.Message) { 532func (m *RawState) XXX_Merge(src proto.Message) {
522 xxx_messageInfo_RawState.Merge(dst, src) 533 xxx_messageInfo_RawState.Merge(m, src)
523} 534}
524func (m *RawState) XXX_Size() int { 535func (m *RawState) XXX_Size() int {
525 return xxx_messageInfo_RawState.Size(m) 536 return xxx_messageInfo_RawState.Size(m)
@@ -561,16 +572,17 @@ func (m *Schema) Reset() { *m = Schema{} }
561func (m *Schema) String() string { return proto.CompactTextString(m) } 572func (m *Schema) String() string { return proto.CompactTextString(m) }
562func (*Schema) ProtoMessage() {} 573func (*Schema) ProtoMessage() {}
563func (*Schema) Descriptor() ([]byte, []int) { 574func (*Schema) Descriptor() ([]byte, []int) {
564 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5} 575 return fileDescriptor_17ae6090ff270234, []int{5}
565} 576}
577
566func (m *Schema) XXX_Unmarshal(b []byte) error { 578func (m *Schema) XXX_Unmarshal(b []byte) error {
567 return xxx_messageInfo_Schema.Unmarshal(m, b) 579 return xxx_messageInfo_Schema.Unmarshal(m, b)
568} 580}
569func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 581func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
570 return xxx_messageInfo_Schema.Marshal(b, m, deterministic) 582 return xxx_messageInfo_Schema.Marshal(b, m, deterministic)
571} 583}
572func (dst *Schema) XXX_Merge(src proto.Message) { 584func (m *Schema) XXX_Merge(src proto.Message) {
573 xxx_messageInfo_Schema.Merge(dst, src) 585 xxx_messageInfo_Schema.Merge(m, src)
574} 586}
575func (m *Schema) XXX_Size() int { 587func (m *Schema) XXX_Size() int {
576 return xxx_messageInfo_Schema.Size(m) 588 return xxx_messageInfo_Schema.Size(m)
@@ -608,16 +620,17 @@ func (m *Schema_Block) Reset() { *m = Schema_Block{} }
608func (m *Schema_Block) String() string { return proto.CompactTextString(m) } 620func (m *Schema_Block) String() string { return proto.CompactTextString(m) }
609func (*Schema_Block) ProtoMessage() {} 621func (*Schema_Block) ProtoMessage() {}
610func (*Schema_Block) Descriptor() ([]byte, []int) { 622func (*Schema_Block) Descriptor() ([]byte, []int) {
611 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 0} 623 return fileDescriptor_17ae6090ff270234, []int{5, 0}
612} 624}
625
613func (m *Schema_Block) XXX_Unmarshal(b []byte) error { 626func (m *Schema_Block) XXX_Unmarshal(b []byte) error {
614 return xxx_messageInfo_Schema_Block.Unmarshal(m, b) 627 return xxx_messageInfo_Schema_Block.Unmarshal(m, b)
615} 628}
616func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 629func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
617 return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic) 630 return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic)
618} 631}
619func (dst *Schema_Block) XXX_Merge(src proto.Message) { 632func (m *Schema_Block) XXX_Merge(src proto.Message) {
620 xxx_messageInfo_Schema_Block.Merge(dst, src) 633 xxx_messageInfo_Schema_Block.Merge(m, src)
621} 634}
622func (m *Schema_Block) XXX_Size() int { 635func (m *Schema_Block) XXX_Size() int {
623 return xxx_messageInfo_Schema_Block.Size(m) 636 return xxx_messageInfo_Schema_Block.Size(m)
@@ -666,16 +679,17 @@ func (m *Schema_Attribute) Reset() { *m = Schema_Attribute{} }
666func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) } 679func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) }
667func (*Schema_Attribute) ProtoMessage() {} 680func (*Schema_Attribute) ProtoMessage() {}
668func (*Schema_Attribute) Descriptor() ([]byte, []int) { 681func (*Schema_Attribute) Descriptor() ([]byte, []int) {
669 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 1} 682 return fileDescriptor_17ae6090ff270234, []int{5, 1}
670} 683}
684
671func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error { 685func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error {
672 return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b) 686 return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b)
673} 687}
674func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 688func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
675 return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic) 689 return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic)
676} 690}
677func (dst *Schema_Attribute) XXX_Merge(src proto.Message) { 691func (m *Schema_Attribute) XXX_Merge(src proto.Message) {
678 xxx_messageInfo_Schema_Attribute.Merge(dst, src) 692 xxx_messageInfo_Schema_Attribute.Merge(m, src)
679} 693}
680func (m *Schema_Attribute) XXX_Size() int { 694func (m *Schema_Attribute) XXX_Size() int {
681 return xxx_messageInfo_Schema_Attribute.Size(m) 695 return xxx_messageInfo_Schema_Attribute.Size(m)
@@ -750,16 +764,17 @@ func (m *Schema_NestedBlock) Reset() { *m = Schema_NestedBlock{} }
750func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) } 764func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) }
751func (*Schema_NestedBlock) ProtoMessage() {} 765func (*Schema_NestedBlock) ProtoMessage() {}
752func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { 766func (*Schema_NestedBlock) Descriptor() ([]byte, []int) {
753 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 2} 767 return fileDescriptor_17ae6090ff270234, []int{5, 2}
754} 768}
769
755func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error { 770func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error {
756 return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b) 771 return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b)
757} 772}
758func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 773func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
759 return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic) 774 return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic)
760} 775}
761func (dst *Schema_NestedBlock) XXX_Merge(src proto.Message) { 776func (m *Schema_NestedBlock) XXX_Merge(src proto.Message) {
762 xxx_messageInfo_Schema_NestedBlock.Merge(dst, src) 777 xxx_messageInfo_Schema_NestedBlock.Merge(m, src)
763} 778}
764func (m *Schema_NestedBlock) XXX_Size() int { 779func (m *Schema_NestedBlock) XXX_Size() int {
765 return xxx_messageInfo_Schema_NestedBlock.Size(m) 780 return xxx_messageInfo_Schema_NestedBlock.Size(m)
@@ -815,16 +830,17 @@ func (m *GetProviderSchema) Reset() { *m = GetProviderSchema{} }
815func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) } 830func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) }
816func (*GetProviderSchema) ProtoMessage() {} 831func (*GetProviderSchema) ProtoMessage() {}
817func (*GetProviderSchema) Descriptor() ([]byte, []int) { 832func (*GetProviderSchema) Descriptor() ([]byte, []int) {
818 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6} 833 return fileDescriptor_17ae6090ff270234, []int{6}
819} 834}
835
820func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error { 836func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error {
821 return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b) 837 return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b)
822} 838}
823func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 839func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
824 return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic) 840 return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic)
825} 841}
826func (dst *GetProviderSchema) XXX_Merge(src proto.Message) { 842func (m *GetProviderSchema) XXX_Merge(src proto.Message) {
827 xxx_messageInfo_GetProviderSchema.Merge(dst, src) 843 xxx_messageInfo_GetProviderSchema.Merge(m, src)
828} 844}
829func (m *GetProviderSchema) XXX_Size() int { 845func (m *GetProviderSchema) XXX_Size() int {
830 return xxx_messageInfo_GetProviderSchema.Size(m) 846 return xxx_messageInfo_GetProviderSchema.Size(m)
@@ -845,16 +861,17 @@ func (m *GetProviderSchema_Request) Reset() { *m = GetProviderSchema_Req
845func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) } 861func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) }
846func (*GetProviderSchema_Request) ProtoMessage() {} 862func (*GetProviderSchema_Request) ProtoMessage() {}
847func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { 863func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) {
848 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6, 0} 864 return fileDescriptor_17ae6090ff270234, []int{6, 0}
849} 865}
866
850func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error { 867func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error {
851 return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b) 868 return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b)
852} 869}
853func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 870func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
854 return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic) 871 return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic)
855} 872}
856func (dst *GetProviderSchema_Request) XXX_Merge(src proto.Message) { 873func (m *GetProviderSchema_Request) XXX_Merge(src proto.Message) {
857 xxx_messageInfo_GetProviderSchema_Request.Merge(dst, src) 874 xxx_messageInfo_GetProviderSchema_Request.Merge(m, src)
858} 875}
859func (m *GetProviderSchema_Request) XXX_Size() int { 876func (m *GetProviderSchema_Request) XXX_Size() int {
860 return xxx_messageInfo_GetProviderSchema_Request.Size(m) 877 return xxx_messageInfo_GetProviderSchema_Request.Size(m)
@@ -879,16 +896,17 @@ func (m *GetProviderSchema_Response) Reset() { *m = GetProviderSchema_Re
879func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) } 896func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) }
880func (*GetProviderSchema_Response) ProtoMessage() {} 897func (*GetProviderSchema_Response) ProtoMessage() {}
881func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { 898func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) {
882 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6, 1} 899 return fileDescriptor_17ae6090ff270234, []int{6, 1}
883} 900}
901
884func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error { 902func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error {
885 return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b) 903 return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b)
886} 904}
887func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 905func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
888 return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic) 906 return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic)
889} 907}
890func (dst *GetProviderSchema_Response) XXX_Merge(src proto.Message) { 908func (m *GetProviderSchema_Response) XXX_Merge(src proto.Message) {
891 xxx_messageInfo_GetProviderSchema_Response.Merge(dst, src) 909 xxx_messageInfo_GetProviderSchema_Response.Merge(m, src)
892} 910}
893func (m *GetProviderSchema_Response) XXX_Size() int { 911func (m *GetProviderSchema_Response) XXX_Size() int {
894 return xxx_messageInfo_GetProviderSchema_Response.Size(m) 912 return xxx_messageInfo_GetProviderSchema_Response.Size(m)
@@ -937,16 +955,17 @@ func (m *PrepareProviderConfig) Reset() { *m = PrepareProviderConfig{} }
937func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) } 955func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) }
938func (*PrepareProviderConfig) ProtoMessage() {} 956func (*PrepareProviderConfig) ProtoMessage() {}
939func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { 957func (*PrepareProviderConfig) Descriptor() ([]byte, []int) {
940 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7} 958 return fileDescriptor_17ae6090ff270234, []int{7}
941} 959}
960
942func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error { 961func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error {
943 return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b) 962 return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b)
944} 963}
945func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 964func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
946 return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic) 965 return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic)
947} 966}
948func (dst *PrepareProviderConfig) XXX_Merge(src proto.Message) { 967func (m *PrepareProviderConfig) XXX_Merge(src proto.Message) {
949 xxx_messageInfo_PrepareProviderConfig.Merge(dst, src) 968 xxx_messageInfo_PrepareProviderConfig.Merge(m, src)
950} 969}
951func (m *PrepareProviderConfig) XXX_Size() int { 970func (m *PrepareProviderConfig) XXX_Size() int {
952 return xxx_messageInfo_PrepareProviderConfig.Size(m) 971 return xxx_messageInfo_PrepareProviderConfig.Size(m)
@@ -968,16 +987,17 @@ func (m *PrepareProviderConfig_Request) Reset() { *m = PrepareProviderCo
968func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) } 987func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) }
969func (*PrepareProviderConfig_Request) ProtoMessage() {} 988func (*PrepareProviderConfig_Request) ProtoMessage() {}
970func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { 989func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) {
971 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7, 0} 990 return fileDescriptor_17ae6090ff270234, []int{7, 0}
972} 991}
992
973func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error { 993func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error {
974 return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b) 994 return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b)
975} 995}
976func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 996func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
977 return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic) 997 return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic)
978} 998}
979func (dst *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) { 999func (m *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) {
980 xxx_messageInfo_PrepareProviderConfig_Request.Merge(dst, src) 1000 xxx_messageInfo_PrepareProviderConfig_Request.Merge(m, src)
981} 1001}
982func (m *PrepareProviderConfig_Request) XXX_Size() int { 1002func (m *PrepareProviderConfig_Request) XXX_Size() int {
983 return xxx_messageInfo_PrepareProviderConfig_Request.Size(m) 1003 return xxx_messageInfo_PrepareProviderConfig_Request.Size(m)
@@ -1007,16 +1027,17 @@ func (m *PrepareProviderConfig_Response) Reset() { *m = PrepareProviderC
1007func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) } 1027func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) }
1008func (*PrepareProviderConfig_Response) ProtoMessage() {} 1028func (*PrepareProviderConfig_Response) ProtoMessage() {}
1009func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { 1029func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) {
1010 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7, 1} 1030 return fileDescriptor_17ae6090ff270234, []int{7, 1}
1011} 1031}
1032
1012func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error { 1033func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error {
1013 return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b) 1034 return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b)
1014} 1035}
1015func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1036func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1016 return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic) 1037 return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic)
1017} 1038}
1018func (dst *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) { 1039func (m *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) {
1019 xxx_messageInfo_PrepareProviderConfig_Response.Merge(dst, src) 1040 xxx_messageInfo_PrepareProviderConfig_Response.Merge(m, src)
1020} 1041}
1021func (m *PrepareProviderConfig_Response) XXX_Size() int { 1042func (m *PrepareProviderConfig_Response) XXX_Size() int {
1022 return xxx_messageInfo_PrepareProviderConfig_Response.Size(m) 1043 return xxx_messageInfo_PrepareProviderConfig_Response.Size(m)
@@ -1051,16 +1072,17 @@ func (m *UpgradeResourceState) Reset() { *m = UpgradeResourceState{} }
1051func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) } 1072func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) }
1052func (*UpgradeResourceState) ProtoMessage() {} 1073func (*UpgradeResourceState) ProtoMessage() {}
1053func (*UpgradeResourceState) Descriptor() ([]byte, []int) { 1074func (*UpgradeResourceState) Descriptor() ([]byte, []int) {
1054 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8} 1075 return fileDescriptor_17ae6090ff270234, []int{8}
1055} 1076}
1077
1056func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error { 1078func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error {
1057 return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b) 1079 return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b)
1058} 1080}
1059func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1081func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1060 return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic) 1082 return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic)
1061} 1083}
1062func (dst *UpgradeResourceState) XXX_Merge(src proto.Message) { 1084func (m *UpgradeResourceState) XXX_Merge(src proto.Message) {
1063 xxx_messageInfo_UpgradeResourceState.Merge(dst, src) 1085 xxx_messageInfo_UpgradeResourceState.Merge(m, src)
1064} 1086}
1065func (m *UpgradeResourceState) XXX_Size() int { 1087func (m *UpgradeResourceState) XXX_Size() int {
1066 return xxx_messageInfo_UpgradeResourceState.Size(m) 1088 return xxx_messageInfo_UpgradeResourceState.Size(m)
@@ -1090,16 +1112,17 @@ func (m *UpgradeResourceState_Request) Reset() { *m = UpgradeResourceSta
1090func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) } 1112func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) }
1091func (*UpgradeResourceState_Request) ProtoMessage() {} 1113func (*UpgradeResourceState_Request) ProtoMessage() {}
1092func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { 1114func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) {
1093 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8, 0} 1115 return fileDescriptor_17ae6090ff270234, []int{8, 0}
1094} 1116}
1117
1095func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error { 1118func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error {
1096 return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b) 1119 return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b)
1097} 1120}
1098func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1121func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1099 return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic) 1122 return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic)
1100} 1123}
1101func (dst *UpgradeResourceState_Request) XXX_Merge(src proto.Message) { 1124func (m *UpgradeResourceState_Request) XXX_Merge(src proto.Message) {
1102 xxx_messageInfo_UpgradeResourceState_Request.Merge(dst, src) 1125 xxx_messageInfo_UpgradeResourceState_Request.Merge(m, src)
1103} 1126}
1104func (m *UpgradeResourceState_Request) XXX_Size() int { 1127func (m *UpgradeResourceState_Request) XXX_Size() int {
1105 return xxx_messageInfo_UpgradeResourceState_Request.Size(m) 1128 return xxx_messageInfo_UpgradeResourceState_Request.Size(m)
@@ -1149,16 +1172,17 @@ func (m *UpgradeResourceState_Response) Reset() { *m = UpgradeResourceSt
1149func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) } 1172func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) }
1150func (*UpgradeResourceState_Response) ProtoMessage() {} 1173func (*UpgradeResourceState_Response) ProtoMessage() {}
1151func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { 1174func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) {
1152 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8, 1} 1175 return fileDescriptor_17ae6090ff270234, []int{8, 1}
1153} 1176}
1177
1154func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error { 1178func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error {
1155 return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b) 1179 return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b)
1156} 1180}
1157func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1181func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1158 return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic) 1182 return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic)
1159} 1183}
1160func (dst *UpgradeResourceState_Response) XXX_Merge(src proto.Message) { 1184func (m *UpgradeResourceState_Response) XXX_Merge(src proto.Message) {
1161 xxx_messageInfo_UpgradeResourceState_Response.Merge(dst, src) 1185 xxx_messageInfo_UpgradeResourceState_Response.Merge(m, src)
1162} 1186}
1163func (m *UpgradeResourceState_Response) XXX_Size() int { 1187func (m *UpgradeResourceState_Response) XXX_Size() int {
1164 return xxx_messageInfo_UpgradeResourceState_Response.Size(m) 1188 return xxx_messageInfo_UpgradeResourceState_Response.Size(m)
@@ -1193,16 +1217,17 @@ func (m *ValidateResourceTypeConfig) Reset() { *m = ValidateResourceType
1193func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) } 1217func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) }
1194func (*ValidateResourceTypeConfig) ProtoMessage() {} 1218func (*ValidateResourceTypeConfig) ProtoMessage() {}
1195func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { 1219func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) {
1196 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9} 1220 return fileDescriptor_17ae6090ff270234, []int{9}
1197} 1221}
1222
1198func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error { 1223func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error {
1199 return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b) 1224 return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b)
1200} 1225}
1201func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1226func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1202 return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic) 1227 return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic)
1203} 1228}
1204func (dst *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) { 1229func (m *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) {
1205 xxx_messageInfo_ValidateResourceTypeConfig.Merge(dst, src) 1230 xxx_messageInfo_ValidateResourceTypeConfig.Merge(m, src)
1206} 1231}
1207func (m *ValidateResourceTypeConfig) XXX_Size() int { 1232func (m *ValidateResourceTypeConfig) XXX_Size() int {
1208 return xxx_messageInfo_ValidateResourceTypeConfig.Size(m) 1233 return xxx_messageInfo_ValidateResourceTypeConfig.Size(m)
@@ -1225,16 +1250,17 @@ func (m *ValidateResourceTypeConfig_Request) Reset() { *m = ValidateReso
1225func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) } 1250func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) }
1226func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} 1251func (*ValidateResourceTypeConfig_Request) ProtoMessage() {}
1227func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { 1252func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) {
1228 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9, 0} 1253 return fileDescriptor_17ae6090ff270234, []int{9, 0}
1229} 1254}
1255
1230func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error { 1256func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error {
1231 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b) 1257 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b)
1232} 1258}
1233func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1259func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1234 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic) 1260 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic)
1235} 1261}
1236func (dst *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) { 1262func (m *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) {
1237 xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(dst, src) 1263 xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(m, src)
1238} 1264}
1239func (m *ValidateResourceTypeConfig_Request) XXX_Size() int { 1265func (m *ValidateResourceTypeConfig_Request) XXX_Size() int {
1240 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m) 1266 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m)
@@ -1270,16 +1296,17 @@ func (m *ValidateResourceTypeConfig_Response) Reset() { *m = ValidateRes
1270func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) } 1296func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) }
1271func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} 1297func (*ValidateResourceTypeConfig_Response) ProtoMessage() {}
1272func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { 1298func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) {
1273 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9, 1} 1299 return fileDescriptor_17ae6090ff270234, []int{9, 1}
1274} 1300}
1301
1275func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error { 1302func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error {
1276 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b) 1303 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b)
1277} 1304}
1278func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1305func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1279 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic) 1306 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic)
1280} 1307}
1281func (dst *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) { 1308func (m *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) {
1282 xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(dst, src) 1309 xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(m, src)
1283} 1310}
1284func (m *ValidateResourceTypeConfig_Response) XXX_Size() int { 1311func (m *ValidateResourceTypeConfig_Response) XXX_Size() int {
1285 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m) 1312 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m)
@@ -1307,16 +1334,17 @@ func (m *ValidateDataSourceConfig) Reset() { *m = ValidateDataSourceConf
1307func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) } 1334func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) }
1308func (*ValidateDataSourceConfig) ProtoMessage() {} 1335func (*ValidateDataSourceConfig) ProtoMessage() {}
1309func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { 1336func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) {
1310 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10} 1337 return fileDescriptor_17ae6090ff270234, []int{10}
1311} 1338}
1339
1312func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error { 1340func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error {
1313 return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b) 1341 return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b)
1314} 1342}
1315func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1343func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1316 return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic) 1344 return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic)
1317} 1345}
1318func (dst *ValidateDataSourceConfig) XXX_Merge(src proto.Message) { 1346func (m *ValidateDataSourceConfig) XXX_Merge(src proto.Message) {
1319 xxx_messageInfo_ValidateDataSourceConfig.Merge(dst, src) 1347 xxx_messageInfo_ValidateDataSourceConfig.Merge(m, src)
1320} 1348}
1321func (m *ValidateDataSourceConfig) XXX_Size() int { 1349func (m *ValidateDataSourceConfig) XXX_Size() int {
1322 return xxx_messageInfo_ValidateDataSourceConfig.Size(m) 1350 return xxx_messageInfo_ValidateDataSourceConfig.Size(m)
@@ -1339,16 +1367,17 @@ func (m *ValidateDataSourceConfig_Request) Reset() { *m = ValidateDataSo
1339func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) } 1367func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) }
1340func (*ValidateDataSourceConfig_Request) ProtoMessage() {} 1368func (*ValidateDataSourceConfig_Request) ProtoMessage() {}
1341func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { 1369func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) {
1342 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10, 0} 1370 return fileDescriptor_17ae6090ff270234, []int{10, 0}
1343} 1371}
1372
1344func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error { 1373func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error {
1345 return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b) 1374 return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b)
1346} 1375}
1347func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1376func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1348 return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic) 1377 return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic)
1349} 1378}
1350func (dst *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) { 1379func (m *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) {
1351 xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(dst, src) 1380 xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(m, src)
1352} 1381}
1353func (m *ValidateDataSourceConfig_Request) XXX_Size() int { 1382func (m *ValidateDataSourceConfig_Request) XXX_Size() int {
1354 return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m) 1383 return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m)
@@ -1384,16 +1413,17 @@ func (m *ValidateDataSourceConfig_Response) Reset() { *m = ValidateDataS
1384func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) } 1413func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) }
1385func (*ValidateDataSourceConfig_Response) ProtoMessage() {} 1414func (*ValidateDataSourceConfig_Response) ProtoMessage() {}
1386func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { 1415func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) {
1387 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10, 1} 1416 return fileDescriptor_17ae6090ff270234, []int{10, 1}
1388} 1417}
1418
1389func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error { 1419func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error {
1390 return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b) 1420 return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b)
1391} 1421}
1392func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1422func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1393 return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic) 1423 return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic)
1394} 1424}
1395func (dst *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) { 1425func (m *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) {
1396 xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(dst, src) 1426 xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(m, src)
1397} 1427}
1398func (m *ValidateDataSourceConfig_Response) XXX_Size() int { 1428func (m *ValidateDataSourceConfig_Response) XXX_Size() int {
1399 return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m) 1429 return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m)
@@ -1421,16 +1451,17 @@ func (m *Configure) Reset() { *m = Configure{} }
1421func (m *Configure) String() string { return proto.CompactTextString(m) } 1451func (m *Configure) String() string { return proto.CompactTextString(m) }
1422func (*Configure) ProtoMessage() {} 1452func (*Configure) ProtoMessage() {}
1423func (*Configure) Descriptor() ([]byte, []int) { 1453func (*Configure) Descriptor() ([]byte, []int) {
1424 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11} 1454 return fileDescriptor_17ae6090ff270234, []int{11}
1425} 1455}
1456
1426func (m *Configure) XXX_Unmarshal(b []byte) error { 1457func (m *Configure) XXX_Unmarshal(b []byte) error {
1427 return xxx_messageInfo_Configure.Unmarshal(m, b) 1458 return xxx_messageInfo_Configure.Unmarshal(m, b)
1428} 1459}
1429func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1460func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1430 return xxx_messageInfo_Configure.Marshal(b, m, deterministic) 1461 return xxx_messageInfo_Configure.Marshal(b, m, deterministic)
1431} 1462}
1432func (dst *Configure) XXX_Merge(src proto.Message) { 1463func (m *Configure) XXX_Merge(src proto.Message) {
1433 xxx_messageInfo_Configure.Merge(dst, src) 1464 xxx_messageInfo_Configure.Merge(m, src)
1434} 1465}
1435func (m *Configure) XXX_Size() int { 1466func (m *Configure) XXX_Size() int {
1436 return xxx_messageInfo_Configure.Size(m) 1467 return xxx_messageInfo_Configure.Size(m)
@@ -1453,16 +1484,17 @@ func (m *Configure_Request) Reset() { *m = Configure_Request{} }
1453func (m *Configure_Request) String() string { return proto.CompactTextString(m) } 1484func (m *Configure_Request) String() string { return proto.CompactTextString(m) }
1454func (*Configure_Request) ProtoMessage() {} 1485func (*Configure_Request) ProtoMessage() {}
1455func (*Configure_Request) Descriptor() ([]byte, []int) { 1486func (*Configure_Request) Descriptor() ([]byte, []int) {
1456 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11, 0} 1487 return fileDescriptor_17ae6090ff270234, []int{11, 0}
1457} 1488}
1489
1458func (m *Configure_Request) XXX_Unmarshal(b []byte) error { 1490func (m *Configure_Request) XXX_Unmarshal(b []byte) error {
1459 return xxx_messageInfo_Configure_Request.Unmarshal(m, b) 1491 return xxx_messageInfo_Configure_Request.Unmarshal(m, b)
1460} 1492}
1461func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1493func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1462 return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic) 1494 return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic)
1463} 1495}
1464func (dst *Configure_Request) XXX_Merge(src proto.Message) { 1496func (m *Configure_Request) XXX_Merge(src proto.Message) {
1465 xxx_messageInfo_Configure_Request.Merge(dst, src) 1497 xxx_messageInfo_Configure_Request.Merge(m, src)
1466} 1498}
1467func (m *Configure_Request) XXX_Size() int { 1499func (m *Configure_Request) XXX_Size() int {
1468 return xxx_messageInfo_Configure_Request.Size(m) 1500 return xxx_messageInfo_Configure_Request.Size(m)
@@ -1498,16 +1530,17 @@ func (m *Configure_Response) Reset() { *m = Configure_Response{} }
1498func (m *Configure_Response) String() string { return proto.CompactTextString(m) } 1530func (m *Configure_Response) String() string { return proto.CompactTextString(m) }
1499func (*Configure_Response) ProtoMessage() {} 1531func (*Configure_Response) ProtoMessage() {}
1500func (*Configure_Response) Descriptor() ([]byte, []int) { 1532func (*Configure_Response) Descriptor() ([]byte, []int) {
1501 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11, 1} 1533 return fileDescriptor_17ae6090ff270234, []int{11, 1}
1502} 1534}
1535
1503func (m *Configure_Response) XXX_Unmarshal(b []byte) error { 1536func (m *Configure_Response) XXX_Unmarshal(b []byte) error {
1504 return xxx_messageInfo_Configure_Response.Unmarshal(m, b) 1537 return xxx_messageInfo_Configure_Response.Unmarshal(m, b)
1505} 1538}
1506func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1539func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1507 return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic) 1540 return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic)
1508} 1541}
1509func (dst *Configure_Response) XXX_Merge(src proto.Message) { 1542func (m *Configure_Response) XXX_Merge(src proto.Message) {
1510 xxx_messageInfo_Configure_Response.Merge(dst, src) 1543 xxx_messageInfo_Configure_Response.Merge(m, src)
1511} 1544}
1512func (m *Configure_Response) XXX_Size() int { 1545func (m *Configure_Response) XXX_Size() int {
1513 return xxx_messageInfo_Configure_Response.Size(m) 1546 return xxx_messageInfo_Configure_Response.Size(m)
@@ -1535,16 +1568,17 @@ func (m *ReadResource) Reset() { *m = ReadResource{} }
1535func (m *ReadResource) String() string { return proto.CompactTextString(m) } 1568func (m *ReadResource) String() string { return proto.CompactTextString(m) }
1536func (*ReadResource) ProtoMessage() {} 1569func (*ReadResource) ProtoMessage() {}
1537func (*ReadResource) Descriptor() ([]byte, []int) { 1570func (*ReadResource) Descriptor() ([]byte, []int) {
1538 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12} 1571 return fileDescriptor_17ae6090ff270234, []int{12}
1539} 1572}
1573
1540func (m *ReadResource) XXX_Unmarshal(b []byte) error { 1574func (m *ReadResource) XXX_Unmarshal(b []byte) error {
1541 return xxx_messageInfo_ReadResource.Unmarshal(m, b) 1575 return xxx_messageInfo_ReadResource.Unmarshal(m, b)
1542} 1576}
1543func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1577func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1544 return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic) 1578 return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic)
1545} 1579}
1546func (dst *ReadResource) XXX_Merge(src proto.Message) { 1580func (m *ReadResource) XXX_Merge(src proto.Message) {
1547 xxx_messageInfo_ReadResource.Merge(dst, src) 1581 xxx_messageInfo_ReadResource.Merge(m, src)
1548} 1582}
1549func (m *ReadResource) XXX_Size() int { 1583func (m *ReadResource) XXX_Size() int {
1550 return xxx_messageInfo_ReadResource.Size(m) 1584 return xxx_messageInfo_ReadResource.Size(m)
@@ -1558,6 +1592,7 @@ var xxx_messageInfo_ReadResource proto.InternalMessageInfo
1558type ReadResource_Request struct { 1592type ReadResource_Request struct {
1559 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` 1593 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1560 CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` 1594 CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"`
1595 Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"`
1561 XXX_NoUnkeyedLiteral struct{} `json:"-"` 1596 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1562 XXX_unrecognized []byte `json:"-"` 1597 XXX_unrecognized []byte `json:"-"`
1563 XXX_sizecache int32 `json:"-"` 1598 XXX_sizecache int32 `json:"-"`
@@ -1567,16 +1602,17 @@ func (m *ReadResource_Request) Reset() { *m = ReadResource_Request{} }
1567func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) } 1602func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) }
1568func (*ReadResource_Request) ProtoMessage() {} 1603func (*ReadResource_Request) ProtoMessage() {}
1569func (*ReadResource_Request) Descriptor() ([]byte, []int) { 1604func (*ReadResource_Request) Descriptor() ([]byte, []int) {
1570 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12, 0} 1605 return fileDescriptor_17ae6090ff270234, []int{12, 0}
1571} 1606}
1607
1572func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error { 1608func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error {
1573 return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b) 1609 return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b)
1574} 1610}
1575func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1611func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1576 return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic) 1612 return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic)
1577} 1613}
1578func (dst *ReadResource_Request) XXX_Merge(src proto.Message) { 1614func (m *ReadResource_Request) XXX_Merge(src proto.Message) {
1579 xxx_messageInfo_ReadResource_Request.Merge(dst, src) 1615 xxx_messageInfo_ReadResource_Request.Merge(m, src)
1580} 1616}
1581func (m *ReadResource_Request) XXX_Size() int { 1617func (m *ReadResource_Request) XXX_Size() int {
1582 return xxx_messageInfo_ReadResource_Request.Size(m) 1618 return xxx_messageInfo_ReadResource_Request.Size(m)
@@ -1601,9 +1637,17 @@ func (m *ReadResource_Request) GetCurrentState() *DynamicValue {
1601 return nil 1637 return nil
1602} 1638}
1603 1639
1640func (m *ReadResource_Request) GetPrivate() []byte {
1641 if m != nil {
1642 return m.Private
1643 }
1644 return nil
1645}
1646
1604type ReadResource_Response struct { 1647type ReadResource_Response struct {
1605 NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` 1648 NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"`
1606 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` 1649 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1650 Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"`
1607 XXX_NoUnkeyedLiteral struct{} `json:"-"` 1651 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1608 XXX_unrecognized []byte `json:"-"` 1652 XXX_unrecognized []byte `json:"-"`
1609 XXX_sizecache int32 `json:"-"` 1653 XXX_sizecache int32 `json:"-"`
@@ -1613,16 +1657,17 @@ func (m *ReadResource_Response) Reset() { *m = ReadResource_Response{} }
1613func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) } 1657func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) }
1614func (*ReadResource_Response) ProtoMessage() {} 1658func (*ReadResource_Response) ProtoMessage() {}
1615func (*ReadResource_Response) Descriptor() ([]byte, []int) { 1659func (*ReadResource_Response) Descriptor() ([]byte, []int) {
1616 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12, 1} 1660 return fileDescriptor_17ae6090ff270234, []int{12, 1}
1617} 1661}
1662
1618func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error { 1663func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error {
1619 return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b) 1664 return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b)
1620} 1665}
1621func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1666func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1622 return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic) 1667 return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic)
1623} 1668}
1624func (dst *ReadResource_Response) XXX_Merge(src proto.Message) { 1669func (m *ReadResource_Response) XXX_Merge(src proto.Message) {
1625 xxx_messageInfo_ReadResource_Response.Merge(dst, src) 1670 xxx_messageInfo_ReadResource_Response.Merge(m, src)
1626} 1671}
1627func (m *ReadResource_Response) XXX_Size() int { 1672func (m *ReadResource_Response) XXX_Size() int {
1628 return xxx_messageInfo_ReadResource_Response.Size(m) 1673 return xxx_messageInfo_ReadResource_Response.Size(m)
@@ -1647,6 +1692,13 @@ func (m *ReadResource_Response) GetDiagnostics() []*Diagnostic {
1647 return nil 1692 return nil
1648} 1693}
1649 1694
1695func (m *ReadResource_Response) GetPrivate() []byte {
1696 if m != nil {
1697 return m.Private
1698 }
1699 return nil
1700}
1701
1650type PlanResourceChange struct { 1702type PlanResourceChange struct {
1651 XXX_NoUnkeyedLiteral struct{} `json:"-"` 1703 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1652 XXX_unrecognized []byte `json:"-"` 1704 XXX_unrecognized []byte `json:"-"`
@@ -1657,16 +1709,17 @@ func (m *PlanResourceChange) Reset() { *m = PlanResourceChange{} }
1657func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) } 1709func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) }
1658func (*PlanResourceChange) ProtoMessage() {} 1710func (*PlanResourceChange) ProtoMessage() {}
1659func (*PlanResourceChange) Descriptor() ([]byte, []int) { 1711func (*PlanResourceChange) Descriptor() ([]byte, []int) {
1660 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13} 1712 return fileDescriptor_17ae6090ff270234, []int{13}
1661} 1713}
1714
1662func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error { 1715func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error {
1663 return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b) 1716 return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b)
1664} 1717}
1665func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1718func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1666 return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic) 1719 return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic)
1667} 1720}
1668func (dst *PlanResourceChange) XXX_Merge(src proto.Message) { 1721func (m *PlanResourceChange) XXX_Merge(src proto.Message) {
1669 xxx_messageInfo_PlanResourceChange.Merge(dst, src) 1722 xxx_messageInfo_PlanResourceChange.Merge(m, src)
1670} 1723}
1671func (m *PlanResourceChange) XXX_Size() int { 1724func (m *PlanResourceChange) XXX_Size() int {
1672 return xxx_messageInfo_PlanResourceChange.Size(m) 1725 return xxx_messageInfo_PlanResourceChange.Size(m)
@@ -1692,16 +1745,17 @@ func (m *PlanResourceChange_Request) Reset() { *m = PlanResourceChange_R
1692func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) } 1745func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) }
1693func (*PlanResourceChange_Request) ProtoMessage() {} 1746func (*PlanResourceChange_Request) ProtoMessage() {}
1694func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { 1747func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) {
1695 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13, 0} 1748 return fileDescriptor_17ae6090ff270234, []int{13, 0}
1696} 1749}
1750
1697func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error { 1751func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error {
1698 return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b) 1752 return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b)
1699} 1753}
1700func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1754func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1701 return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic) 1755 return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic)
1702} 1756}
1703func (dst *PlanResourceChange_Request) XXX_Merge(src proto.Message) { 1757func (m *PlanResourceChange_Request) XXX_Merge(src proto.Message) {
1704 xxx_messageInfo_PlanResourceChange_Request.Merge(dst, src) 1758 xxx_messageInfo_PlanResourceChange_Request.Merge(m, src)
1705} 1759}
1706func (m *PlanResourceChange_Request) XXX_Size() int { 1760func (m *PlanResourceChange_Request) XXX_Size() int {
1707 return xxx_messageInfo_PlanResourceChange_Request.Size(m) 1761 return xxx_messageInfo_PlanResourceChange_Request.Size(m)
@@ -1773,16 +1827,17 @@ func (m *PlanResourceChange_Response) Reset() { *m = PlanResourceChange_
1773func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) } 1827func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) }
1774func (*PlanResourceChange_Response) ProtoMessage() {} 1828func (*PlanResourceChange_Response) ProtoMessage() {}
1775func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { 1829func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) {
1776 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13, 1} 1830 return fileDescriptor_17ae6090ff270234, []int{13, 1}
1777} 1831}
1832
1778func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error { 1833func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error {
1779 return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b) 1834 return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b)
1780} 1835}
1781func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1836func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1782 return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic) 1837 return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic)
1783} 1838}
1784func (dst *PlanResourceChange_Response) XXX_Merge(src proto.Message) { 1839func (m *PlanResourceChange_Response) XXX_Merge(src proto.Message) {
1785 xxx_messageInfo_PlanResourceChange_Response.Merge(dst, src) 1840 xxx_messageInfo_PlanResourceChange_Response.Merge(m, src)
1786} 1841}
1787func (m *PlanResourceChange_Response) XXX_Size() int { 1842func (m *PlanResourceChange_Response) XXX_Size() int {
1788 return xxx_messageInfo_PlanResourceChange_Response.Size(m) 1843 return xxx_messageInfo_PlanResourceChange_Response.Size(m)
@@ -1838,16 +1893,17 @@ func (m *ApplyResourceChange) Reset() { *m = ApplyResourceChange{} }
1838func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) } 1893func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) }
1839func (*ApplyResourceChange) ProtoMessage() {} 1894func (*ApplyResourceChange) ProtoMessage() {}
1840func (*ApplyResourceChange) Descriptor() ([]byte, []int) { 1895func (*ApplyResourceChange) Descriptor() ([]byte, []int) {
1841 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14} 1896 return fileDescriptor_17ae6090ff270234, []int{14}
1842} 1897}
1898
1843func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error { 1899func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error {
1844 return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b) 1900 return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b)
1845} 1901}
1846func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1902func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1847 return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic) 1903 return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic)
1848} 1904}
1849func (dst *ApplyResourceChange) XXX_Merge(src proto.Message) { 1905func (m *ApplyResourceChange) XXX_Merge(src proto.Message) {
1850 xxx_messageInfo_ApplyResourceChange.Merge(dst, src) 1906 xxx_messageInfo_ApplyResourceChange.Merge(m, src)
1851} 1907}
1852func (m *ApplyResourceChange) XXX_Size() int { 1908func (m *ApplyResourceChange) XXX_Size() int {
1853 return xxx_messageInfo_ApplyResourceChange.Size(m) 1909 return xxx_messageInfo_ApplyResourceChange.Size(m)
@@ -1873,16 +1929,17 @@ func (m *ApplyResourceChange_Request) Reset() { *m = ApplyResourceChange
1873func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) } 1929func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) }
1874func (*ApplyResourceChange_Request) ProtoMessage() {} 1930func (*ApplyResourceChange_Request) ProtoMessage() {}
1875func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { 1931func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) {
1876 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14, 0} 1932 return fileDescriptor_17ae6090ff270234, []int{14, 0}
1877} 1933}
1934
1878func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error { 1935func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error {
1879 return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b) 1936 return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b)
1880} 1937}
1881func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 1938func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1882 return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic) 1939 return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic)
1883} 1940}
1884func (dst *ApplyResourceChange_Request) XXX_Merge(src proto.Message) { 1941func (m *ApplyResourceChange_Request) XXX_Merge(src proto.Message) {
1885 xxx_messageInfo_ApplyResourceChange_Request.Merge(dst, src) 1942 xxx_messageInfo_ApplyResourceChange_Request.Merge(m, src)
1886} 1943}
1887func (m *ApplyResourceChange_Request) XXX_Size() int { 1944func (m *ApplyResourceChange_Request) XXX_Size() int {
1888 return xxx_messageInfo_ApplyResourceChange_Request.Size(m) 1945 return xxx_messageInfo_ApplyResourceChange_Request.Size(m)
@@ -1953,16 +2010,17 @@ func (m *ApplyResourceChange_Response) Reset() { *m = ApplyResourceChang
1953func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) } 2010func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) }
1954func (*ApplyResourceChange_Response) ProtoMessage() {} 2011func (*ApplyResourceChange_Response) ProtoMessage() {}
1955func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { 2012func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) {
1956 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14, 1} 2013 return fileDescriptor_17ae6090ff270234, []int{14, 1}
1957} 2014}
2015
1958func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error { 2016func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error {
1959 return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b) 2017 return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b)
1960} 2018}
1961func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2019func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1962 return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic) 2020 return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic)
1963} 2021}
1964func (dst *ApplyResourceChange_Response) XXX_Merge(src proto.Message) { 2022func (m *ApplyResourceChange_Response) XXX_Merge(src proto.Message) {
1965 xxx_messageInfo_ApplyResourceChange_Response.Merge(dst, src) 2023 xxx_messageInfo_ApplyResourceChange_Response.Merge(m, src)
1966} 2024}
1967func (m *ApplyResourceChange_Response) XXX_Size() int { 2025func (m *ApplyResourceChange_Response) XXX_Size() int {
1968 return xxx_messageInfo_ApplyResourceChange_Response.Size(m) 2026 return xxx_messageInfo_ApplyResourceChange_Response.Size(m)
@@ -2011,16 +2069,17 @@ func (m *ImportResourceState) Reset() { *m = ImportResourceState{} }
2011func (m *ImportResourceState) String() string { return proto.CompactTextString(m) } 2069func (m *ImportResourceState) String() string { return proto.CompactTextString(m) }
2012func (*ImportResourceState) ProtoMessage() {} 2070func (*ImportResourceState) ProtoMessage() {}
2013func (*ImportResourceState) Descriptor() ([]byte, []int) { 2071func (*ImportResourceState) Descriptor() ([]byte, []int) {
2014 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15} 2072 return fileDescriptor_17ae6090ff270234, []int{15}
2015} 2073}
2074
2016func (m *ImportResourceState) XXX_Unmarshal(b []byte) error { 2075func (m *ImportResourceState) XXX_Unmarshal(b []byte) error {
2017 return xxx_messageInfo_ImportResourceState.Unmarshal(m, b) 2076 return xxx_messageInfo_ImportResourceState.Unmarshal(m, b)
2018} 2077}
2019func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2078func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2020 return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic) 2079 return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic)
2021} 2080}
2022func (dst *ImportResourceState) XXX_Merge(src proto.Message) { 2081func (m *ImportResourceState) XXX_Merge(src proto.Message) {
2023 xxx_messageInfo_ImportResourceState.Merge(dst, src) 2082 xxx_messageInfo_ImportResourceState.Merge(m, src)
2024} 2083}
2025func (m *ImportResourceState) XXX_Size() int { 2084func (m *ImportResourceState) XXX_Size() int {
2026 return xxx_messageInfo_ImportResourceState.Size(m) 2085 return xxx_messageInfo_ImportResourceState.Size(m)
@@ -2043,16 +2102,17 @@ func (m *ImportResourceState_Request) Reset() { *m = ImportResourceState
2043func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) } 2102func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) }
2044func (*ImportResourceState_Request) ProtoMessage() {} 2103func (*ImportResourceState_Request) ProtoMessage() {}
2045func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { 2104func (*ImportResourceState_Request) Descriptor() ([]byte, []int) {
2046 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 0} 2105 return fileDescriptor_17ae6090ff270234, []int{15, 0}
2047} 2106}
2107
2048func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error { 2108func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error {
2049 return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b) 2109 return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b)
2050} 2110}
2051func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2111func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2052 return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic) 2112 return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic)
2053} 2113}
2054func (dst *ImportResourceState_Request) XXX_Merge(src proto.Message) { 2114func (m *ImportResourceState_Request) XXX_Merge(src proto.Message) {
2055 xxx_messageInfo_ImportResourceState_Request.Merge(dst, src) 2115 xxx_messageInfo_ImportResourceState_Request.Merge(m, src)
2056} 2116}
2057func (m *ImportResourceState_Request) XXX_Size() int { 2117func (m *ImportResourceState_Request) XXX_Size() int {
2058 return xxx_messageInfo_ImportResourceState_Request.Size(m) 2118 return xxx_messageInfo_ImportResourceState_Request.Size(m)
@@ -2090,16 +2150,17 @@ func (m *ImportResourceState_ImportedResource) Reset() { *m = ImportReso
2090func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) } 2150func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) }
2091func (*ImportResourceState_ImportedResource) ProtoMessage() {} 2151func (*ImportResourceState_ImportedResource) ProtoMessage() {}
2092func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { 2152func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) {
2093 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 1} 2153 return fileDescriptor_17ae6090ff270234, []int{15, 1}
2094} 2154}
2155
2095func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error { 2156func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error {
2096 return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b) 2157 return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b)
2097} 2158}
2098func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2159func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2099 return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic) 2160 return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic)
2100} 2161}
2101func (dst *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) { 2162func (m *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) {
2102 xxx_messageInfo_ImportResourceState_ImportedResource.Merge(dst, src) 2163 xxx_messageInfo_ImportResourceState_ImportedResource.Merge(m, src)
2103} 2164}
2104func (m *ImportResourceState_ImportedResource) XXX_Size() int { 2165func (m *ImportResourceState_ImportedResource) XXX_Size() int {
2105 return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m) 2166 return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m)
@@ -2143,16 +2204,17 @@ func (m *ImportResourceState_Response) Reset() { *m = ImportResourceStat
2143func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) } 2204func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) }
2144func (*ImportResourceState_Response) ProtoMessage() {} 2205func (*ImportResourceState_Response) ProtoMessage() {}
2145func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { 2206func (*ImportResourceState_Response) Descriptor() ([]byte, []int) {
2146 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 2} 2207 return fileDescriptor_17ae6090ff270234, []int{15, 2}
2147} 2208}
2209
2148func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error { 2210func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error {
2149 return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b) 2211 return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b)
2150} 2212}
2151func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2213func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2152 return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic) 2214 return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic)
2153} 2215}
2154func (dst *ImportResourceState_Response) XXX_Merge(src proto.Message) { 2216func (m *ImportResourceState_Response) XXX_Merge(src proto.Message) {
2155 xxx_messageInfo_ImportResourceState_Response.Merge(dst, src) 2217 xxx_messageInfo_ImportResourceState_Response.Merge(m, src)
2156} 2218}
2157func (m *ImportResourceState_Response) XXX_Size() int { 2219func (m *ImportResourceState_Response) XXX_Size() int {
2158 return xxx_messageInfo_ImportResourceState_Response.Size(m) 2220 return xxx_messageInfo_ImportResourceState_Response.Size(m)
@@ -2187,16 +2249,17 @@ func (m *ReadDataSource) Reset() { *m = ReadDataSource{} }
2187func (m *ReadDataSource) String() string { return proto.CompactTextString(m) } 2249func (m *ReadDataSource) String() string { return proto.CompactTextString(m) }
2188func (*ReadDataSource) ProtoMessage() {} 2250func (*ReadDataSource) ProtoMessage() {}
2189func (*ReadDataSource) Descriptor() ([]byte, []int) { 2251func (*ReadDataSource) Descriptor() ([]byte, []int) {
2190 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16} 2252 return fileDescriptor_17ae6090ff270234, []int{16}
2191} 2253}
2254
2192func (m *ReadDataSource) XXX_Unmarshal(b []byte) error { 2255func (m *ReadDataSource) XXX_Unmarshal(b []byte) error {
2193 return xxx_messageInfo_ReadDataSource.Unmarshal(m, b) 2256 return xxx_messageInfo_ReadDataSource.Unmarshal(m, b)
2194} 2257}
2195func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2258func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2196 return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic) 2259 return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic)
2197} 2260}
2198func (dst *ReadDataSource) XXX_Merge(src proto.Message) { 2261func (m *ReadDataSource) XXX_Merge(src proto.Message) {
2199 xxx_messageInfo_ReadDataSource.Merge(dst, src) 2262 xxx_messageInfo_ReadDataSource.Merge(m, src)
2200} 2263}
2201func (m *ReadDataSource) XXX_Size() int { 2264func (m *ReadDataSource) XXX_Size() int {
2202 return xxx_messageInfo_ReadDataSource.Size(m) 2265 return xxx_messageInfo_ReadDataSource.Size(m)
@@ -2219,16 +2282,17 @@ func (m *ReadDataSource_Request) Reset() { *m = ReadDataSource_Request{}
2219func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) } 2282func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) }
2220func (*ReadDataSource_Request) ProtoMessage() {} 2283func (*ReadDataSource_Request) ProtoMessage() {}
2221func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { 2284func (*ReadDataSource_Request) Descriptor() ([]byte, []int) {
2222 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16, 0} 2285 return fileDescriptor_17ae6090ff270234, []int{16, 0}
2223} 2286}
2287
2224func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error { 2288func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error {
2225 return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b) 2289 return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b)
2226} 2290}
2227func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2291func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2228 return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic) 2292 return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic)
2229} 2293}
2230func (dst *ReadDataSource_Request) XXX_Merge(src proto.Message) { 2294func (m *ReadDataSource_Request) XXX_Merge(src proto.Message) {
2231 xxx_messageInfo_ReadDataSource_Request.Merge(dst, src) 2295 xxx_messageInfo_ReadDataSource_Request.Merge(m, src)
2232} 2296}
2233func (m *ReadDataSource_Request) XXX_Size() int { 2297func (m *ReadDataSource_Request) XXX_Size() int {
2234 return xxx_messageInfo_ReadDataSource_Request.Size(m) 2298 return xxx_messageInfo_ReadDataSource_Request.Size(m)
@@ -2265,16 +2329,17 @@ func (m *ReadDataSource_Response) Reset() { *m = ReadDataSource_Response
2265func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) } 2329func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) }
2266func (*ReadDataSource_Response) ProtoMessage() {} 2330func (*ReadDataSource_Response) ProtoMessage() {}
2267func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { 2331func (*ReadDataSource_Response) Descriptor() ([]byte, []int) {
2268 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16, 1} 2332 return fileDescriptor_17ae6090ff270234, []int{16, 1}
2269} 2333}
2334
2270func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error { 2335func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error {
2271 return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b) 2336 return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b)
2272} 2337}
2273func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2338func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2274 return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic) 2339 return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic)
2275} 2340}
2276func (dst *ReadDataSource_Response) XXX_Merge(src proto.Message) { 2341func (m *ReadDataSource_Response) XXX_Merge(src proto.Message) {
2277 xxx_messageInfo_ReadDataSource_Response.Merge(dst, src) 2342 xxx_messageInfo_ReadDataSource_Response.Merge(m, src)
2278} 2343}
2279func (m *ReadDataSource_Response) XXX_Size() int { 2344func (m *ReadDataSource_Response) XXX_Size() int {
2280 return xxx_messageInfo_ReadDataSource_Response.Size(m) 2345 return xxx_messageInfo_ReadDataSource_Response.Size(m)
@@ -2309,16 +2374,17 @@ func (m *GetProvisionerSchema) Reset() { *m = GetProvisionerSchema{} }
2309func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) } 2374func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) }
2310func (*GetProvisionerSchema) ProtoMessage() {} 2375func (*GetProvisionerSchema) ProtoMessage() {}
2311func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { 2376func (*GetProvisionerSchema) Descriptor() ([]byte, []int) {
2312 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17} 2377 return fileDescriptor_17ae6090ff270234, []int{17}
2313} 2378}
2379
2314func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error { 2380func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error {
2315 return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b) 2381 return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b)
2316} 2382}
2317func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2383func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2318 return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic) 2384 return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic)
2319} 2385}
2320func (dst *GetProvisionerSchema) XXX_Merge(src proto.Message) { 2386func (m *GetProvisionerSchema) XXX_Merge(src proto.Message) {
2321 xxx_messageInfo_GetProvisionerSchema.Merge(dst, src) 2387 xxx_messageInfo_GetProvisionerSchema.Merge(m, src)
2322} 2388}
2323func (m *GetProvisionerSchema) XXX_Size() int { 2389func (m *GetProvisionerSchema) XXX_Size() int {
2324 return xxx_messageInfo_GetProvisionerSchema.Size(m) 2390 return xxx_messageInfo_GetProvisionerSchema.Size(m)
@@ -2339,16 +2405,17 @@ func (m *GetProvisionerSchema_Request) Reset() { *m = GetProvisionerSche
2339func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) } 2405func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) }
2340func (*GetProvisionerSchema_Request) ProtoMessage() {} 2406func (*GetProvisionerSchema_Request) ProtoMessage() {}
2341func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { 2407func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) {
2342 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17, 0} 2408 return fileDescriptor_17ae6090ff270234, []int{17, 0}
2343} 2409}
2410
2344func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error { 2411func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error {
2345 return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b) 2412 return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b)
2346} 2413}
2347func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2414func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2348 return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic) 2415 return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic)
2349} 2416}
2350func (dst *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) { 2417func (m *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) {
2351 xxx_messageInfo_GetProvisionerSchema_Request.Merge(dst, src) 2418 xxx_messageInfo_GetProvisionerSchema_Request.Merge(m, src)
2352} 2419}
2353func (m *GetProvisionerSchema_Request) XXX_Size() int { 2420func (m *GetProvisionerSchema_Request) XXX_Size() int {
2354 return xxx_messageInfo_GetProvisionerSchema_Request.Size(m) 2421 return xxx_messageInfo_GetProvisionerSchema_Request.Size(m)
@@ -2371,16 +2438,17 @@ func (m *GetProvisionerSchema_Response) Reset() { *m = GetProvisionerSch
2371func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) } 2438func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) }
2372func (*GetProvisionerSchema_Response) ProtoMessage() {} 2439func (*GetProvisionerSchema_Response) ProtoMessage() {}
2373func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { 2440func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) {
2374 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17, 1} 2441 return fileDescriptor_17ae6090ff270234, []int{17, 1}
2375} 2442}
2443
2376func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error { 2444func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error {
2377 return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b) 2445 return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b)
2378} 2446}
2379func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2447func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2380 return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic) 2448 return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic)
2381} 2449}
2382func (dst *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) { 2450func (m *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) {
2383 xxx_messageInfo_GetProvisionerSchema_Response.Merge(dst, src) 2451 xxx_messageInfo_GetProvisionerSchema_Response.Merge(m, src)
2384} 2452}
2385func (m *GetProvisionerSchema_Response) XXX_Size() int { 2453func (m *GetProvisionerSchema_Response) XXX_Size() int {
2386 return xxx_messageInfo_GetProvisionerSchema_Response.Size(m) 2454 return xxx_messageInfo_GetProvisionerSchema_Response.Size(m)
@@ -2415,16 +2483,17 @@ func (m *ValidateProvisionerConfig) Reset() { *m = ValidateProvisionerCo
2415func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) } 2483func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) }
2416func (*ValidateProvisionerConfig) ProtoMessage() {} 2484func (*ValidateProvisionerConfig) ProtoMessage() {}
2417func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { 2485func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) {
2418 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18} 2486 return fileDescriptor_17ae6090ff270234, []int{18}
2419} 2487}
2488
2420func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error { 2489func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error {
2421 return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b) 2490 return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b)
2422} 2491}
2423func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2492func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2424 return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic) 2493 return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic)
2425} 2494}
2426func (dst *ValidateProvisionerConfig) XXX_Merge(src proto.Message) { 2495func (m *ValidateProvisionerConfig) XXX_Merge(src proto.Message) {
2427 xxx_messageInfo_ValidateProvisionerConfig.Merge(dst, src) 2496 xxx_messageInfo_ValidateProvisionerConfig.Merge(m, src)
2428} 2497}
2429func (m *ValidateProvisionerConfig) XXX_Size() int { 2498func (m *ValidateProvisionerConfig) XXX_Size() int {
2430 return xxx_messageInfo_ValidateProvisionerConfig.Size(m) 2499 return xxx_messageInfo_ValidateProvisionerConfig.Size(m)
@@ -2446,16 +2515,17 @@ func (m *ValidateProvisionerConfig_Request) Reset() { *m = ValidateProvi
2446func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) } 2515func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) }
2447func (*ValidateProvisionerConfig_Request) ProtoMessage() {} 2516func (*ValidateProvisionerConfig_Request) ProtoMessage() {}
2448func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { 2517func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) {
2449 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18, 0} 2518 return fileDescriptor_17ae6090ff270234, []int{18, 0}
2450} 2519}
2520
2451func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error { 2521func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error {
2452 return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b) 2522 return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b)
2453} 2523}
2454func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2524func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2455 return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic) 2525 return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic)
2456} 2526}
2457func (dst *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) { 2527func (m *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) {
2458 xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(dst, src) 2528 xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(m, src)
2459} 2529}
2460func (m *ValidateProvisionerConfig_Request) XXX_Size() int { 2530func (m *ValidateProvisionerConfig_Request) XXX_Size() int {
2461 return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m) 2531 return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m)
@@ -2484,16 +2554,17 @@ func (m *ValidateProvisionerConfig_Response) Reset() { *m = ValidateProv
2484func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) } 2554func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) }
2485func (*ValidateProvisionerConfig_Response) ProtoMessage() {} 2555func (*ValidateProvisionerConfig_Response) ProtoMessage() {}
2486func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { 2556func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) {
2487 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18, 1} 2557 return fileDescriptor_17ae6090ff270234, []int{18, 1}
2488} 2558}
2559
2489func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error { 2560func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error {
2490 return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b) 2561 return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b)
2491} 2562}
2492func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2563func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2493 return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic) 2564 return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic)
2494} 2565}
2495func (dst *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) { 2566func (m *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) {
2496 xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(dst, src) 2567 xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(m, src)
2497} 2568}
2498func (m *ValidateProvisionerConfig_Response) XXX_Size() int { 2569func (m *ValidateProvisionerConfig_Response) XXX_Size() int {
2499 return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m) 2570 return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m)
@@ -2521,16 +2592,17 @@ func (m *ProvisionResource) Reset() { *m = ProvisionResource{} }
2521func (m *ProvisionResource) String() string { return proto.CompactTextString(m) } 2592func (m *ProvisionResource) String() string { return proto.CompactTextString(m) }
2522func (*ProvisionResource) ProtoMessage() {} 2593func (*ProvisionResource) ProtoMessage() {}
2523func (*ProvisionResource) Descriptor() ([]byte, []int) { 2594func (*ProvisionResource) Descriptor() ([]byte, []int) {
2524 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19} 2595 return fileDescriptor_17ae6090ff270234, []int{19}
2525} 2596}
2597
2526func (m *ProvisionResource) XXX_Unmarshal(b []byte) error { 2598func (m *ProvisionResource) XXX_Unmarshal(b []byte) error {
2527 return xxx_messageInfo_ProvisionResource.Unmarshal(m, b) 2599 return xxx_messageInfo_ProvisionResource.Unmarshal(m, b)
2528} 2600}
2529func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2601func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2530 return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic) 2602 return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic)
2531} 2603}
2532func (dst *ProvisionResource) XXX_Merge(src proto.Message) { 2604func (m *ProvisionResource) XXX_Merge(src proto.Message) {
2533 xxx_messageInfo_ProvisionResource.Merge(dst, src) 2605 xxx_messageInfo_ProvisionResource.Merge(m, src)
2534} 2606}
2535func (m *ProvisionResource) XXX_Size() int { 2607func (m *ProvisionResource) XXX_Size() int {
2536 return xxx_messageInfo_ProvisionResource.Size(m) 2608 return xxx_messageInfo_ProvisionResource.Size(m)
@@ -2553,16 +2625,17 @@ func (m *ProvisionResource_Request) Reset() { *m = ProvisionResource_Req
2553func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) } 2625func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) }
2554func (*ProvisionResource_Request) ProtoMessage() {} 2626func (*ProvisionResource_Request) ProtoMessage() {}
2555func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { 2627func (*ProvisionResource_Request) Descriptor() ([]byte, []int) {
2556 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19, 0} 2628 return fileDescriptor_17ae6090ff270234, []int{19, 0}
2557} 2629}
2630
2558func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error { 2631func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error {
2559 return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b) 2632 return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b)
2560} 2633}
2561func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2634func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2562 return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic) 2635 return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic)
2563} 2636}
2564func (dst *ProvisionResource_Request) XXX_Merge(src proto.Message) { 2637func (m *ProvisionResource_Request) XXX_Merge(src proto.Message) {
2565 xxx_messageInfo_ProvisionResource_Request.Merge(dst, src) 2638 xxx_messageInfo_ProvisionResource_Request.Merge(m, src)
2566} 2639}
2567func (m *ProvisionResource_Request) XXX_Size() int { 2640func (m *ProvisionResource_Request) XXX_Size() int {
2568 return xxx_messageInfo_ProvisionResource_Request.Size(m) 2641 return xxx_messageInfo_ProvisionResource_Request.Size(m)
@@ -2599,16 +2672,17 @@ func (m *ProvisionResource_Response) Reset() { *m = ProvisionResource_Re
2599func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) } 2672func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) }
2600func (*ProvisionResource_Response) ProtoMessage() {} 2673func (*ProvisionResource_Response) ProtoMessage() {}
2601func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { 2674func (*ProvisionResource_Response) Descriptor() ([]byte, []int) {
2602 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19, 1} 2675 return fileDescriptor_17ae6090ff270234, []int{19, 1}
2603} 2676}
2677
2604func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error { 2678func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error {
2605 return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b) 2679 return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b)
2606} 2680}
2607func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 2681func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2608 return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic) 2682 return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic)
2609} 2683}
2610func (dst *ProvisionResource_Response) XXX_Merge(src proto.Message) { 2684func (m *ProvisionResource_Response) XXX_Merge(src proto.Message) {
2611 xxx_messageInfo_ProvisionResource_Response.Merge(dst, src) 2685 xxx_messageInfo_ProvisionResource_Response.Merge(m, src)
2612} 2686}
2613func (m *ProvisionResource_Response) XXX_Size() int { 2687func (m *ProvisionResource_Response) XXX_Size() int {
2614 return xxx_messageInfo_ProvisionResource_Response.Size(m) 2688 return xxx_messageInfo_ProvisionResource_Response.Size(m)
@@ -2634,6 +2708,8 @@ func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic {
2634} 2708}
2635 2709
2636func init() { 2710func init() {
2711 proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value)
2712 proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value)
2637 proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue") 2713 proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue")
2638 proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic") 2714 proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic")
2639 proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath") 2715 proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath")
@@ -2692,8 +2768,130 @@ func init() {
2692 proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource") 2768 proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource")
2693 proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request") 2769 proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request")
2694 proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response") 2770 proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response")
2695 proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value) 2771}
2696 proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value) 2772
2773func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_17ae6090ff270234) }
2774
2775var fileDescriptor_17ae6090ff270234 = []byte{
2776 // 1880 bytes of a gzipped FileDescriptorProto
2777 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x6f, 0x23, 0x49,
2778 0x19, 0x9f, 0xf6, 0x23, 0xb1, 0x3f, 0xe7, 0xe1, 0xd4, 0xcc, 0x0e, 0xa6, 0x77, 0x17, 0x82, 0x79,
2779 0x24, 0xab, 0xdd, 0xf1, 0xac, 0x32, 0xb0, 0xbb, 0x84, 0xd1, 0x8a, 0x6c, 0x26, 0x64, 0x22, 0x66,
2780 0xb2, 0xa1, 0x3c, 0x0f, 0x24, 0xa4, 0xb5, 0x6a, 0xdc, 0x15, 0x4f, 0x33, 0x76, 0x77, 0x6f, 0x75,
2781 0x39, 0x89, 0x85, 0xc4, 0x05, 0xc1, 0x19, 0x09, 0xf1, 0x90, 0x78, 0x5c, 0x40, 0xe2, 0x1f, 0xe0,
2782 0x00, 0xdc, 0x38, 0xf1, 0x0f, 0x70, 0x03, 0x4e, 0x08, 0x6e, 0x9c, 0xe1, 0x82, 0x84, 0xea, 0xd5,
2783 0x5d, 0xb6, 0xdb, 0x4e, 0x4f, 0xb2, 0x23, 0xc4, 0xad, 0xab, 0xbe, 0x5f, 0x7d, 0xdf, 0x57, 0xdf,
2784 0xab, 0xbe, 0xcf, 0x86, 0x55, 0x7e, 0x1c, 0xf5, 0x87, 0x3d, 0x3f, 0xf8, 0x42, 0x2b, 0x62, 0x21,
2785 0x0f, 0x51, 0x35, 0xd9, 0x68, 0xde, 0x86, 0xa5, 0x3b, 0xa3, 0x80, 0x0c, 0xfc, 0xee, 0x23, 0xd2,
2786 0x1f, 0x52, 0xd4, 0x80, 0xc5, 0x41, 0xdc, 0x8b, 0x48, 0xf7, 0x59, 0xc3, 0x59, 0x77, 0x36, 0x97,
2787 0xb0, 0x59, 0x22, 0x04, 0xa5, 0x6f, 0xc6, 0x61, 0xd0, 0x28, 0xc8, 0x6d, 0xf9, 0xdd, 0xfc, 0x9b,
2788 0x03, 0x70, 0xc7, 0x27, 0xbd, 0x20, 0x8c, 0xb9, 0xdf, 0x45, 0xdb, 0x50, 0x89, 0xe9, 0x09, 0x65,
2789 0x3e, 0x1f, 0xc9, 0xd3, 0x2b, 0x5b, 0x9f, 0x68, 0xa5, 0xb2, 0x53, 0x60, 0xab, 0xad, 0x51, 0x38,
2790 0xc1, 0x0b, 0xc1, 0xf1, 0x70, 0x30, 0x20, 0x6c, 0x24, 0x25, 0x54, 0xb1, 0x59, 0xa2, 0xeb, 0xb0,
2791 0xe0, 0x51, 0x4e, 0xfc, 0x7e, 0xa3, 0x28, 0x09, 0x7a, 0x85, 0xde, 0x82, 0x2a, 0xe1, 0x9c, 0xf9,
2792 0x4f, 0x86, 0x9c, 0x36, 0x4a, 0xeb, 0xce, 0x66, 0x6d, 0xab, 0x61, 0x89, 0xdb, 0x31, 0xb4, 0x23,
2793 0xc2, 0x9f, 0xe2, 0x14, 0xda, 0xbc, 0x09, 0x15, 0x23, 0x1f, 0xd5, 0x60, 0xf1, 0xe0, 0xf0, 0xd1,
2794 0xce, 0xbd, 0x83, 0x3b, 0xf5, 0x2b, 0xa8, 0x0a, 0xe5, 0x3d, 0x8c, 0xdf, 0xc7, 0x75, 0x47, 0xec,
2795 0x3f, 0xde, 0xc1, 0x87, 0x07, 0x87, 0xfb, 0xf5, 0x42, 0xf3, 0x2f, 0x0e, 0x2c, 0x8f, 0x71, 0x43,
2796 0xb7, 0xa0, 0x1c, 0x73, 0x1a, 0xc5, 0x0d, 0x67, 0xbd, 0xb8, 0x59, 0xdb, 0x7a, 0x75, 0x96, 0xd8,
2797 0x56, 0x9b, 0xd3, 0x08, 0x2b, 0xac, 0xfb, 0x43, 0x07, 0x4a, 0x62, 0x8d, 0x36, 0x60, 0x25, 0xd1,
2798 0xa6, 0x13, 0x90, 0x01, 0x95, 0xc6, 0xaa, 0xde, 0xbd, 0x82, 0x97, 0x93, 0xfd, 0x43, 0x32, 0xa0,
2799 0xa8, 0x05, 0x88, 0xf6, 0xe9, 0x80, 0x06, 0xbc, 0xf3, 0x8c, 0x8e, 0x3a, 0x31, 0x67, 0x7e, 0xd0,
2800 0x53, 0xe6, 0xb9, 0x7b, 0x05, 0xd7, 0x35, 0xed, 0xab, 0x74, 0xd4, 0x96, 0x14, 0xb4, 0x09, 0xab,
2801 0x36, 0xde, 0x0f, 0xb8, 0x34, 0x59, 0x51, 0x70, 0x4e, 0xc1, 0x07, 0x01, 0x7f, 0x0f, 0x84, 0xa7,
2802 0xfa, 0xb4, 0xcb, 0x43, 0xd6, 0xbc, 0x25, 0xd4, 0x0a, 0x23, 0xb7, 0x0a, 0x8b, 0x98, 0x7e, 0x38,
2803 0xa4, 0x31, 0x77, 0xd7, 0xa1, 0x82, 0x69, 0x1c, 0x85, 0x41, 0x4c, 0xd1, 0x35, 0x28, 0xef, 0x31,
2804 0x16, 0x32, 0xa5, 0x24, 0x56, 0x8b, 0xe6, 0x8f, 0x1c, 0xa8, 0x60, 0x72, 0xda, 0xe6, 0x84, 0xd3,
2805 0x24, 0x34, 0x9c, 0x34, 0x34, 0xd0, 0x36, 0x2c, 0x1e, 0xf7, 0x09, 0x1f, 0x90, 0xa8, 0x51, 0x90,
2806 0x46, 0x5a, 0xb7, 0x8c, 0x64, 0x4e, 0xb6, 0xbe, 0xa2, 0x20, 0x7b, 0x01, 0x67, 0x23, 0x6c, 0x0e,
2807 0xb8, 0xdb, 0xb0, 0x64, 0x13, 0x50, 0x1d, 0x8a, 0xcf, 0xe8, 0x48, 0x2b, 0x20, 0x3e, 0x85, 0x52,
2808 0x27, 0x22, 0x5e, 0x75, 0xac, 0xa8, 0xc5, 0x76, 0xe1, 0x1d, 0xa7, 0xf9, 0x8f, 0x32, 0x2c, 0xb4,
2809 0xbb, 0x4f, 0xe9, 0x80, 0x88, 0x90, 0x3a, 0xa1, 0x2c, 0xf6, 0xb5, 0x66, 0x45, 0x6c, 0x96, 0xe8,
2810 0x06, 0x94, 0x9f, 0xf4, 0xc3, 0xee, 0x33, 0x79, 0xbc, 0xb6, 0xf5, 0x31, 0x4b, 0x35, 0x75, 0xb6,
2811 0xf5, 0x9e, 0x20, 0x63, 0x85, 0x72, 0x7f, 0xe1, 0x40, 0x59, 0x6e, 0xcc, 0x61, 0xf9, 0x25, 0x80,
2812 0xc4, 0x79, 0xb1, 0xbe, 0xf2, 0xcb, 0xd3, 0x7c, 0x93, 0xf0, 0xc0, 0x16, 0x1c, 0xbd, 0x0b, 0x35,
2813 0x29, 0xa9, 0xc3, 0x47, 0x11, 0x8d, 0x1b, 0xc5, 0xa9, 0xa8, 0xd2, 0xa7, 0x0f, 0x69, 0xcc, 0xa9,
2814 0xa7, 0x74, 0x03, 0x79, 0xe2, 0x81, 0x38, 0xe0, 0xfe, 0xd1, 0x81, 0x6a, 0xc2, 0x59, 0xb8, 0x23,
2815 0x8d, 0x2a, 0x2c, 0xbf, 0xc5, 0x9e, 0xe0, 0x6d, 0xb2, 0x57, 0x7c, 0xa3, 0x75, 0xa8, 0x79, 0x34,
2816 0xee, 0x32, 0x3f, 0xe2, 0xe2, 0x42, 0x2a, 0xbb, 0xec, 0x2d, 0xe4, 0x42, 0x85, 0xd1, 0x0f, 0x87,
2817 0x3e, 0xa3, 0x9e, 0xcc, 0xb0, 0x0a, 0x4e, 0xd6, 0x82, 0x16, 0x4a, 0x14, 0xe9, 0x37, 0xca, 0x8a,
2818 0x66, 0xd6, 0x82, 0xd6, 0x0d, 0x07, 0xd1, 0x90, 0x53, 0xaf, 0xb1, 0xa0, 0x68, 0x66, 0x8d, 0x5e,
2819 0x81, 0x6a, 0x4c, 0x83, 0xd8, 0xe7, 0xfe, 0x09, 0x6d, 0x2c, 0x4a, 0x62, 0xba, 0xe1, 0xfe, 0xba,
2820 0x00, 0x35, 0xeb, 0x96, 0xe8, 0x65, 0xa8, 0x0a, 0x5d, 0xad, 0x34, 0xc1, 0x15, 0xb1, 0x21, 0xf3,
2821 0xe3, 0xf9, 0xdc, 0x88, 0x76, 0x61, 0x31, 0xa0, 0x31, 0x17, 0x39, 0x54, 0x94, 0xd5, 0xe9, 0xb5,
2822 0xb9, 0x16, 0x96, 0xdf, 0x7e, 0xd0, 0xbb, 0x1f, 0x7a, 0x14, 0x9b, 0x93, 0x42, 0xa1, 0x81, 0x1f,
2823 0x74, 0x7c, 0x4e, 0x07, 0xb1, 0xb4, 0x49, 0x11, 0x57, 0x06, 0x7e, 0x70, 0x20, 0xd6, 0x92, 0x48,
2824 0xce, 0x34, 0xb1, 0xac, 0x89, 0xe4, 0x4c, 0x12, 0x9b, 0xf7, 0xd5, 0xcd, 0x34, 0xc7, 0xf1, 0xd2,
2825 0x03, 0xb0, 0xd0, 0x3e, 0x38, 0xdc, 0xbf, 0xb7, 0x57, 0x77, 0x50, 0x05, 0x4a, 0xf7, 0x0e, 0xda,
2826 0x0f, 0xea, 0x05, 0xb4, 0x08, 0xc5, 0xf6, 0xde, 0x83, 0x7a, 0x51, 0x7c, 0xdc, 0xdf, 0x39, 0xaa,
2827 0x97, 0x44, 0x89, 0xda, 0xc7, 0xef, 0x3f, 0x3c, 0xaa, 0x97, 0x9b, 0x3f, 0x29, 0xc1, 0xda, 0x3e,
2828 0xe5, 0x47, 0x2c, 0x3c, 0xf1, 0x3d, 0xca, 0x94, 0xfe, 0x76, 0x12, 0xff, 0xab, 0x68, 0x65, 0xf1,
2829 0x0d, 0xa8, 0x44, 0x1a, 0x29, 0xcd, 0x58, 0xdb, 0x5a, 0x9b, 0xba, 0x3c, 0x4e, 0x20, 0x88, 0x42,
2830 0x9d, 0xd1, 0x38, 0x1c, 0xb2, 0x2e, 0xed, 0xc4, 0x92, 0x68, 0x62, 0x7a, 0xdb, 0x3a, 0x36, 0x25,
2831 0xbe, 0x65, 0xe4, 0x89, 0x0f, 0x79, 0x5a, 0xed, 0xc7, 0x2a, 0xc1, 0x57, 0xd9, 0xf8, 0x2e, 0xea,
2832 0xc3, 0x55, 0x8f, 0x70, 0xd2, 0x99, 0x90, 0xa4, 0xe2, 0xff, 0x76, 0x3e, 0x49, 0x77, 0x08, 0x27,
2833 0xed, 0x69, 0x59, 0x6b, 0xde, 0xe4, 0x3e, 0x7a, 0x1b, 0x6a, 0x5e, 0xf2, 0x06, 0x09, 0xe7, 0x09,
2834 0x29, 0x2f, 0x65, 0xbe, 0x50, 0xd8, 0x46, 0xba, 0x0f, 0xe1, 0x5a, 0xd6, 0x7d, 0x32, 0xea, 0xd2,
2835 0x86, 0x5d, 0x97, 0x32, 0x6d, 0x9c, 0x96, 0x2a, 0xf7, 0x31, 0x5c, 0xcf, 0x56, 0xfe, 0x92, 0x8c,
2836 0x9b, 0x7f, 0x76, 0xe0, 0xa5, 0x23, 0x46, 0x23, 0xc2, 0xa8, 0xb1, 0xda, 0x6e, 0x18, 0x1c, 0xfb,
2837 0x3d, 0x77, 0x3b, 0x09, 0x0f, 0x74, 0x13, 0x16, 0xba, 0x72, 0x53, 0xc7, 0x83, 0x9d, 0x3d, 0x76,
2838 0x4b, 0x80, 0x35, 0xcc, 0xfd, 0xae, 0x63, 0xc5, 0xd3, 0x97, 0x61, 0x35, 0x52, 0x12, 0xbc, 0x4e,
2839 0x3e, 0x36, 0x2b, 0x06, 0xaf, 0x54, 0x99, 0xf4, 0x46, 0x21, 0xaf, 0x37, 0x9a, 0xdf, 0x2f, 0xc0,
2840 0xb5, 0x87, 0x51, 0x8f, 0x11, 0x8f, 0x26, 0x5e, 0x11, 0x8f, 0x89, 0xcb, 0xd2, 0xcb, 0xcd, 0x2d,
2841 0x1b, 0x56, 0x11, 0x2f, 0x8c, 0x17, 0xf1, 0x37, 0xa1, 0xca, 0xc8, 0x69, 0x27, 0x16, 0xec, 0x64,
2842 0x8d, 0xa8, 0x6d, 0x5d, 0xcd, 0x78, 0xb6, 0x70, 0x85, 0xe9, 0x2f, 0xf7, 0x3b, 0xb6, 0x51, 0xde,
2843 0x85, 0x95, 0xa1, 0x52, 0xcc, 0xd3, 0x3c, 0xce, 0xb1, 0xc9, 0xb2, 0x81, 0xab, 0x77, 0xf4, 0xc2,
2844 0x26, 0xf9, 0xbd, 0x03, 0xee, 0x23, 0xd2, 0xf7, 0x3d, 0xa1, 0x9c, 0xb6, 0x89, 0x78, 0x19, 0xb4,
2845 0xd7, 0x1f, 0xe7, 0x34, 0x4c, 0x1a, 0x12, 0x85, 0x7c, 0x21, 0xb1, 0x6b, 0x5d, 0x7e, 0x42, 0x79,
2846 0x27, 0xb7, 0xf2, 0xbf, 0x75, 0xa0, 0x61, 0x94, 0x4f, 0xf3, 0xe1, 0xff, 0x42, 0xf5, 0xdf, 0x39,
2847 0x50, 0x55, 0x8a, 0x0e, 0x19, 0x75, 0x7b, 0xa9, 0xae, 0xaf, 0xc3, 0x1a, 0xa7, 0x8c, 0x91, 0xe3,
2848 0x90, 0x0d, 0x3a, 0x76, 0xc7, 0x50, 0xc5, 0xf5, 0x84, 0xf0, 0x48, 0x47, 0xdd, 0xff, 0x46, 0xf7,
2849 0x5f, 0x15, 0x60, 0x09, 0x53, 0xe2, 0x99, 0x78, 0x71, 0xbf, 0x9d, 0xd3, 0xd4, 0xb7, 0x61, 0xb9,
2850 0x3b, 0x64, 0x4c, 0x74, 0x99, 0x2a, 0xc8, 0xcf, 0xd1, 0x7a, 0x49, 0xa3, 0x55, 0x8c, 0x37, 0x60,
2851 0x31, 0x62, 0xfe, 0x89, 0x49, 0xb0, 0x25, 0x6c, 0x96, 0xee, 0x0f, 0xec, 0x54, 0xfa, 0x3c, 0x54,
2852 0x03, 0x7a, 0x9a, 0x2f, 0x8b, 0x2a, 0x01, 0x3d, 0xbd, 0x5c, 0x02, 0xcd, 0xd6, 0xaa, 0xf9, 0x9b,
2853 0x12, 0xa0, 0xa3, 0x3e, 0x09, 0x8c, 0x99, 0x76, 0x9f, 0x92, 0xa0, 0x47, 0xdd, 0xff, 0x38, 0x39,
2854 0xad, 0xf5, 0x0e, 0xd4, 0x22, 0xe6, 0x87, 0x2c, 0x9f, 0xad, 0x40, 0x62, 0xd5, 0x65, 0xf6, 0x00,
2855 0x45, 0x2c, 0x8c, 0xc2, 0x98, 0x7a, 0x9d, 0xd4, 0x16, 0xc5, 0xf9, 0x0c, 0xea, 0xe6, 0xc8, 0xa1,
2856 0xb1, 0x49, 0x1a, 0x5d, 0xa5, 0x5c, 0xd1, 0x85, 0x3e, 0x0d, 0xcb, 0x4a, 0x63, 0x63, 0x91, 0xb2,
2857 0xb4, 0xc8, 0x92, 0xdc, 0x3c, 0xd2, 0xce, 0xfa, 0x79, 0xc1, 0x72, 0xd6, 0x6d, 0x58, 0x8e, 0xfa,
2858 0x24, 0x08, 0xf2, 0x96, 0xbd, 0x25, 0x8d, 0x56, 0x0a, 0xee, 0x8a, 0x5e, 0x43, 0x36, 0x95, 0x71,
2859 0x87, 0xd1, 0xa8, 0x4f, 0xba, 0x54, 0x7b, 0x6e, 0xf6, 0x38, 0xb7, 0x6a, 0x4e, 0x60, 0x75, 0x00,
2860 0x6d, 0xc0, 0xaa, 0x51, 0x61, 0xdc, 0x91, 0x2b, 0x7a, 0x5b, 0x2b, 0x7e, 0xe1, 0x26, 0x00, 0xbd,
2861 0x01, 0xa8, 0x4f, 0x7b, 0xa4, 0x3b, 0x92, 0x4d, 0x7a, 0x27, 0x1e, 0xc5, 0x9c, 0x0e, 0x74, 0xe7,
2862 0x5b, 0x57, 0x14, 0x51, 0x72, 0xdb, 0x72, 0xbf, 0xf9, 0xa7, 0x22, 0x5c, 0xdd, 0x89, 0xa2, 0xfe,
2863 0x68, 0x22, 0x6e, 0xfe, 0xfd, 0xe2, 0xe3, 0x66, 0xca, 0x1b, 0xc5, 0xe7, 0xf1, 0xc6, 0x73, 0x87,
2864 0x4b, 0x86, 0xe5, 0xcb, 0x59, 0x96, 0x77, 0xff, 0x70, 0xf9, 0xfc, 0xb6, 0xd2, 0xb4, 0x30, 0x96,
2865 0xa6, 0x93, 0x6e, 0x2d, 0x5e, 0xd2, 0xad, 0xa5, 0x19, 0x6e, 0xfd, 0x67, 0x01, 0xae, 0x1e, 0x0c,
2866 0xa2, 0x90, 0xf1, 0xf1, 0xd6, 0xe3, 0xad, 0x9c, 0x5e, 0x5d, 0x81, 0x82, 0xef, 0xe9, 0xa1, 0xb5,
2867 0xe0, 0x7b, 0xee, 0x19, 0xd4, 0x15, 0x3b, 0x9a, 0xd4, 0xe1, 0x73, 0x47, 0x9e, 0x5c, 0x01, 0xa1,
2868 0x50, 0x73, 0xaa, 0xed, 0x2f, 0x6d, 0x6f, 0x7c, 0x00, 0xc8, 0xd7, 0x6a, 0x74, 0x4c, 0x8f, 0x6e,
2869 0xde, 0x92, 0x9b, 0x96, 0x88, 0x8c, 0xab, 0xb7, 0x26, 0xf5, 0xc7, 0x6b, 0xfe, 0xc4, 0x4e, 0x7c,
2870 0xf1, 0xc6, 0xe6, 0xaf, 0x0e, 0xac, 0x88, 0x47, 0x2a, 0xed, 0x0b, 0x5e, 0x5c, 0x47, 0xc0, 0xc6,
2871 0xc6, 0xa5, 0x72, 0xae, 0xd0, 0xd4, 0x66, 0xbe, 0xf0, 0xfd, 0x7e, 0xea, 0xc0, 0x35, 0x33, 0xdb,
2872 0x88, 0x5e, 0x20, 0x6b, 0x8e, 0x3b, 0xb3, 0xf4, 0xba, 0x25, 0xaa, 0x42, 0x82, 0x9d, 0x3d, 0xc9,
2873 0xd9, 0xa8, 0x8b, 0x6b, 0xf7, 0x33, 0x07, 0x3e, 0x6e, 0x3a, 0x33, 0x4b, 0xc5, 0x8f, 0x60, 0x96,
2874 0xf8, 0x48, 0x3a, 0x98, 0xbf, 0x3b, 0xb0, 0x96, 0xa8, 0x95, 0xb4, 0x31, 0xf1, 0xc5, 0xd5, 0x42,
2875 0x6f, 0x03, 0x74, 0xc3, 0x20, 0xa0, 0x5d, 0x6e, 0x86, 0x83, 0x79, 0x35, 0x37, 0x85, 0xba, 0xdf,
2876 0xb0, 0xee, 0x73, 0x1d, 0x16, 0xc2, 0x21, 0x8f, 0x86, 0x5c, 0x87, 0xa4, 0x5e, 0x5d, 0xd8, 0x0d,
2877 0x5b, 0x3f, 0xae, 0x42, 0xc5, 0xcc, 0x71, 0xe8, 0xeb, 0x50, 0xdd, 0xa7, 0x5c, 0xff, 0xc2, 0xf5,
2878 0x99, 0x73, 0x46, 0x64, 0x15, 0x40, 0x9f, 0xcd, 0x35, 0x48, 0xa3, 0xfe, 0x8c, 0xa1, 0x11, 0x6d,
2879 0x5a, 0xe7, 0x33, 0x11, 0x89, 0xa4, 0xd7, 0x72, 0x20, 0xb5, 0xb4, 0x6f, 0xcd, 0x9b, 0x58, 0xd0,
2880 0x0d, 0x8b, 0xd1, 0x6c, 0x58, 0x22, 0xb7, 0x95, 0x17, 0xae, 0x85, 0x0f, 0x67, 0x4f, 0x1c, 0xe8,
2881 0xf5, 0x0c, 0x5e, 0x93, 0xa0, 0x44, 0xf0, 0x1b, 0xf9, 0xc0, 0x5a, 0xac, 0x9f, 0x3d, 0xb8, 0xa2,
2882 0x0d, 0x8b, 0x4b, 0x16, 0x20, 0x11, 0xb7, 0x79, 0x3e, 0x50, 0x8b, 0xba, 0x6b, 0x0d, 0x26, 0xe8,
2883 0x15, 0xeb, 0x58, 0xb2, 0x9b, 0x30, 0x7d, 0x75, 0x06, 0x55, 0x73, 0xfa, 0xda, 0xf8, 0x98, 0x80,
2884 0x3e, 0x69, 0x0f, 0xc4, 0x16, 0x21, 0xe1, 0xb7, 0x3e, 0x1b, 0xa0, 0x59, 0x76, 0xb3, 0x5a, 0x6a,
2885 0x64, 0x87, 0xe9, 0x34, 0x39, 0x61, 0xff, 0xb9, 0xf3, 0x60, 0x5a, 0xc8, 0x71, 0x66, 0x03, 0x86,
2886 0xec, 0xe3, 0x19, 0xf4, 0x44, 0xcc, 0xc6, 0xb9, 0xb8, 0x54, 0x4e, 0xc6, 0xb3, 0x38, 0x26, 0x27,
2887 0xeb, 0xd9, 0xcc, 0x92, 0x93, 0x8d, 0xd3, 0x72, 0x1e, 0x4f, 0xbe, 0x84, 0xe8, 0x53, 0x13, 0x86,
2888 0x4e, 0x49, 0x09, 0xf7, 0xe6, 0x3c, 0x88, 0x66, 0xfc, 0x45, 0xf5, 0xfb, 0x3f, 0x1a, 0xfb, 0xf9,
2889 0x94, 0x87, 0x51, 0xc2, 0xa4, 0x31, 0x4d, 0x50, 0x47, 0xb7, 0xbe, 0x57, 0x84, 0x9a, 0xf5, 0x30,
2890 0xa0, 0x0f, 0xec, 0xe2, 0xb4, 0x91, 0x51, 0x76, 0xec, 0x37, 0x2e, 0x33, 0xaa, 0x67, 0x00, 0xb5,
2891 0xaa, 0x67, 0x73, 0xde, 0x23, 0x94, 0x95, 0x8b, 0x53, 0xa8, 0x44, 0xe8, 0x8d, 0x9c, 0x68, 0x2d,
2892 0xf9, 0x49, 0xc6, 0x53, 0x33, 0x56, 0x7e, 0xa7, 0xa8, 0x99, 0xe5, 0x37, 0x0b, 0xa5, 0x24, 0xbc,
2893 0xe9, 0x5c, 0xc2, 0x11, 0x4f, 0x16, 0xe4, 0x1f, 0x7b, 0xb7, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff,
2894 0x8a, 0x61, 0xfa, 0xcc, 0xeb, 0x1b, 0x00, 0x00,
2697} 2895}
2698 2896
2699// Reference imports to suppress errors if they are not otherwise used. 2897// Reference imports to suppress errors if they are not otherwise used.
@@ -3329,127 +3527,3 @@ var _Provisioner_serviceDesc = grpc.ServiceDesc{
3329 }, 3527 },
3330 Metadata: "tfplugin5.proto", 3528 Metadata: "tfplugin5.proto",
3331} 3529}
3332
3333func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_tfplugin5_56820f4fb67360c5) }
3334
3335var fileDescriptor_tfplugin5_56820f4fb67360c5 = []byte{
3336 // 1876 bytes of a gzipped FileDescriptorProto
3337 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x23, 0x49,
3338 0x15, 0x9f, 0x76, 0xdb, 0x89, 0xfd, 0x9c, 0x0f, 0xa7, 0x66, 0x76, 0x30, 0xbd, 0xbb, 0x10, 0xcc,
3339 0x47, 0xb2, 0xda, 0x1d, 0xcf, 0x2a, 0x03, 0xbb, 0x4b, 0x18, 0xad, 0xc8, 0x66, 0x42, 0x26, 0x62,
3340 0x26, 0x1b, 0xca, 0xf3, 0x81, 0x84, 0xb4, 0x56, 0x8d, 0xbb, 0xe2, 0x69, 0xc6, 0xee, 0xee, 0xad,
3341 0x2e, 0x67, 0x62, 0x71, 0x44, 0x70, 0xe6, 0xc2, 0x87, 0xc4, 0xc7, 0x85, 0x03, 0xff, 0x00, 0x07,
3342 0xe0, 0xc6, 0x89, 0x7f, 0x80, 0x1b, 0x70, 0x42, 0x70, 0x43, 0x1c, 0xe1, 0x82, 0x84, 0xea, 0xab,
3343 0xbb, 0x6c, 0xb7, 0x93, 0x9e, 0x64, 0x57, 0x88, 0x5b, 0x57, 0xbd, 0x5f, 0xbd, 0xf7, 0xab, 0xf7,
3344 0x5e, 0xbd, 0x57, 0x65, 0xc3, 0x2a, 0x3f, 0x8e, 0x07, 0xa3, 0x7e, 0x10, 0x7e, 0xa9, 0x1d, 0xb3,
3345 0x88, 0x47, 0xa8, 0x96, 0x4e, 0xb4, 0x6e, 0xc3, 0xd2, 0x9d, 0x71, 0x48, 0x86, 0x41, 0xef, 0x11,
3346 0x19, 0x8c, 0x28, 0x6a, 0xc2, 0xe2, 0x30, 0xe9, 0xc7, 0xa4, 0xf7, 0xac, 0xe9, 0xac, 0x3b, 0x9b,
3347 0x4b, 0xd8, 0x0c, 0x11, 0x82, 0xf2, 0xb7, 0x93, 0x28, 0x6c, 0x96, 0xe4, 0xb4, 0xfc, 0x6e, 0xfd,
3348 0xd5, 0x01, 0xb8, 0x13, 0x90, 0x7e, 0x18, 0x25, 0x3c, 0xe8, 0xa1, 0x6d, 0xa8, 0x26, 0xf4, 0x84,
3349 0xb2, 0x80, 0x8f, 0xe5, 0xea, 0x95, 0xad, 0x4f, 0xb5, 0x33, 0xdb, 0x19, 0xb0, 0xdd, 0xd1, 0x28,
3350 0x9c, 0xe2, 0x85, 0xe1, 0x64, 0x34, 0x1c, 0x12, 0x36, 0x96, 0x16, 0x6a, 0xd8, 0x0c, 0xd1, 0x75,
3351 0x58, 0xf0, 0x29, 0x27, 0xc1, 0xa0, 0xe9, 0x4a, 0x81, 0x1e, 0xa1, 0xb7, 0xa0, 0x46, 0x38, 0x67,
3352 0xc1, 0x93, 0x11, 0xa7, 0xcd, 0xf2, 0xba, 0xb3, 0x59, 0xdf, 0x6a, 0x5a, 0xe6, 0x76, 0x8c, 0xec,
3353 0x88, 0xf0, 0xa7, 0x38, 0x83, 0xb6, 0x6e, 0x42, 0xd5, 0xd8, 0x47, 0x75, 0x58, 0x3c, 0x38, 0x7c,
3354 0xb4, 0x73, 0xef, 0xe0, 0x4e, 0xe3, 0x0a, 0xaa, 0x41, 0x65, 0x0f, 0xe3, 0xf7, 0x71, 0xc3, 0x11,
3355 0xf3, 0x8f, 0x77, 0xf0, 0xe1, 0xc1, 0xe1, 0x7e, 0xa3, 0xd4, 0xfa, 0xb3, 0x03, 0xcb, 0x13, 0xda,
3356 0xd0, 0x2d, 0xa8, 0x24, 0x9c, 0xc6, 0x49, 0xd3, 0x59, 0x77, 0x37, 0xeb, 0x5b, 0xaf, 0xce, 0x33,
3357 0xdb, 0xee, 0x70, 0x1a, 0x63, 0x85, 0xf5, 0x7e, 0xe8, 0x40, 0x59, 0x8c, 0xd1, 0x06, 0xac, 0xa4,
3358 0x6c, 0xba, 0x21, 0x19, 0x52, 0xe9, 0xac, 0xda, 0xdd, 0x2b, 0x78, 0x39, 0x9d, 0x3f, 0x24, 0x43,
3359 0x8a, 0xda, 0x80, 0xe8, 0x80, 0x0e, 0x69, 0xc8, 0xbb, 0xcf, 0xe8, 0xb8, 0x9b, 0x70, 0x16, 0x84,
3360 0x7d, 0xe5, 0x9e, 0xbb, 0x57, 0x70, 0x43, 0xcb, 0xbe, 0x4e, 0xc7, 0x1d, 0x29, 0x41, 0x9b, 0xb0,
3361 0x6a, 0xe3, 0x83, 0x90, 0x4b, 0x97, 0xb9, 0x42, 0x73, 0x06, 0x3e, 0x08, 0xf9, 0x7b, 0x20, 0x22,
3362 0x35, 0xa0, 0x3d, 0x1e, 0xb1, 0xd6, 0x2d, 0x41, 0x2b, 0x8a, 0xbd, 0x1a, 0x2c, 0x62, 0xfa, 0xe1,
3363 0x88, 0x26, 0xdc, 0x5b, 0x87, 0x2a, 0xa6, 0x49, 0x1c, 0x85, 0x09, 0x45, 0xd7, 0xa0, 0xb2, 0xc7,
3364 0x58, 0xc4, 0x14, 0x49, 0xac, 0x06, 0xad, 0x1f, 0x39, 0x50, 0xc5, 0xe4, 0x79, 0x87, 0x13, 0x4e,
3365 0xd3, 0xd4, 0x70, 0xb2, 0xd4, 0x40, 0xdb, 0xb0, 0x78, 0x3c, 0x20, 0x7c, 0x48, 0xe2, 0x66, 0x49,
3366 0x3a, 0x69, 0xdd, 0x72, 0x92, 0x59, 0xd9, 0xfe, 0x9a, 0x82, 0xec, 0x85, 0x9c, 0x8d, 0xb1, 0x59,
3367 0xe0, 0x6d, 0xc3, 0x92, 0x2d, 0x40, 0x0d, 0x70, 0x9f, 0xd1, 0xb1, 0x26, 0x20, 0x3e, 0x05, 0xa9,
3368 0x13, 0x91, 0xaf, 0x3a, 0x57, 0xd4, 0x60, 0xbb, 0xf4, 0x8e, 0xd3, 0xfa, 0x7b, 0x05, 0x16, 0x3a,
3369 0xbd, 0xa7, 0x74, 0x48, 0x44, 0x4a, 0x9d, 0x50, 0x96, 0x04, 0x9a, 0x99, 0x8b, 0xcd, 0x10, 0xdd,
3370 0x80, 0xca, 0x93, 0x41, 0xd4, 0x7b, 0x26, 0x97, 0xd7, 0xb7, 0x3e, 0x61, 0x51, 0x53, 0x6b, 0xdb,
3371 0xef, 0x09, 0x31, 0x56, 0x28, 0xef, 0x17, 0x0e, 0x54, 0xe4, 0xc4, 0x19, 0x2a, 0xbf, 0x02, 0x90,
3372 0x06, 0x2f, 0xd1, 0x5b, 0x7e, 0x79, 0x56, 0x6f, 0x9a, 0x1e, 0xd8, 0x82, 0xa3, 0x77, 0xa1, 0x2e,
3373 0x2d, 0x75, 0xf9, 0x38, 0xa6, 0x49, 0xd3, 0x9d, 0xc9, 0x2a, 0xbd, 0xfa, 0x90, 0x26, 0x9c, 0xfa,
3374 0x8a, 0x1b, 0xc8, 0x15, 0x0f, 0xc4, 0x02, 0xef, 0x0f, 0x0e, 0xd4, 0x52, 0xcd, 0x22, 0x1c, 0x59,
3375 0x56, 0x61, 0xf9, 0x2d, 0xe6, 0x84, 0x6e, 0x73, 0x7a, 0xc5, 0x37, 0x5a, 0x87, 0xba, 0x4f, 0x93,
3376 0x1e, 0x0b, 0x62, 0x2e, 0x36, 0xa4, 0x4e, 0x97, 0x3d, 0x85, 0x3c, 0xa8, 0x32, 0xfa, 0xe1, 0x28,
3377 0x60, 0xd4, 0x97, 0x27, 0xac, 0x8a, 0xd3, 0xb1, 0x90, 0x45, 0x12, 0x45, 0x06, 0xcd, 0x8a, 0x92,
3378 0x99, 0xb1, 0x90, 0xf5, 0xa2, 0x61, 0x3c, 0xe2, 0xd4, 0x6f, 0x2e, 0x28, 0x99, 0x19, 0xa3, 0x57,
3379 0xa0, 0x96, 0xd0, 0x30, 0x09, 0x78, 0x70, 0x42, 0x9b, 0x8b, 0x52, 0x98, 0x4d, 0x78, 0xbf, 0x2a,
3380 0x41, 0xdd, 0xda, 0x25, 0x7a, 0x19, 0x6a, 0x82, 0xab, 0x75, 0x4c, 0x70, 0x55, 0x4c, 0xc8, 0xf3,
3381 0xf1, 0x62, 0x61, 0x44, 0xbb, 0xb0, 0x18, 0xd2, 0x84, 0x8b, 0x33, 0xe4, 0xca, 0xea, 0xf4, 0xda,
3382 0x99, 0x1e, 0x96, 0xdf, 0x41, 0xd8, 0xbf, 0x1f, 0xf9, 0x14, 0x9b, 0x95, 0x82, 0xd0, 0x30, 0x08,
3383 0xbb, 0x01, 0xa7, 0xc3, 0x44, 0xfa, 0xc4, 0xc5, 0xd5, 0x61, 0x10, 0x1e, 0x88, 0xb1, 0x14, 0x92,
3384 0x53, 0x2d, 0xac, 0x68, 0x21, 0x39, 0x95, 0xc2, 0xd6, 0x7d, 0xb5, 0x33, 0xad, 0x71, 0xb2, 0xf4,
3385 0x00, 0x2c, 0x74, 0x0e, 0x0e, 0xf7, 0xef, 0xed, 0x35, 0x1c, 0x54, 0x85, 0xf2, 0xbd, 0x83, 0xce,
3386 0x83, 0x46, 0x09, 0x2d, 0x82, 0xdb, 0xd9, 0x7b, 0xd0, 0x70, 0xc5, 0xc7, 0xfd, 0x9d, 0xa3, 0x46,
3387 0x59, 0x94, 0xa8, 0x7d, 0xfc, 0xfe, 0xc3, 0xa3, 0x46, 0xa5, 0xf5, 0x93, 0x32, 0xac, 0xed, 0x53,
3388 0x7e, 0xc4, 0xa2, 0x93, 0xc0, 0xa7, 0x4c, 0xf1, 0xb7, 0x0f, 0xf1, 0xbf, 0x5c, 0xeb, 0x14, 0xdf,
3389 0x80, 0x6a, 0xac, 0x91, 0xd2, 0x8d, 0xf5, 0xad, 0xb5, 0x99, 0xcd, 0xe3, 0x14, 0x82, 0x28, 0x34,
3390 0x18, 0x4d, 0xa2, 0x11, 0xeb, 0xd1, 0x6e, 0x22, 0x85, 0x26, 0xa7, 0xb7, 0xad, 0x65, 0x33, 0xe6,
3391 0xdb, 0xc6, 0x9e, 0xf8, 0x90, 0xab, 0xd5, 0x7c, 0xa2, 0x0e, 0xf8, 0x2a, 0x9b, 0x9c, 0x45, 0x03,
3392 0xb8, 0xea, 0x13, 0x4e, 0xba, 0x53, 0x96, 0x54, 0xfe, 0xdf, 0x2e, 0x66, 0xe9, 0x0e, 0xe1, 0xa4,
3393 0x33, 0x6b, 0x6b, 0xcd, 0x9f, 0x9e, 0x47, 0x6f, 0x43, 0xdd, 0x4f, 0x7b, 0x90, 0x08, 0x9e, 0xb0,
3394 0xf2, 0x52, 0x6e, 0x87, 0xc2, 0x36, 0xd2, 0x7b, 0x08, 0xd7, 0xf2, 0xf6, 0x93, 0x53, 0x97, 0x36,
3395 0xec, 0xba, 0x94, 0xeb, 0xe3, 0xac, 0x54, 0x79, 0x8f, 0xe1, 0x7a, 0x3e, 0xf9, 0x4b, 0x2a, 0x6e,
3396 0xfd, 0xc9, 0x81, 0x97, 0x8e, 0x18, 0x8d, 0x09, 0xa3, 0xc6, 0x6b, 0xbb, 0x51, 0x78, 0x1c, 0xf4,
3397 0xbd, 0xed, 0x34, 0x3d, 0xd0, 0x4d, 0x58, 0xe8, 0xc9, 0x49, 0x9d, 0x0f, 0xf6, 0xe9, 0xb1, 0xaf,
3398 0x04, 0x58, 0xc3, 0xbc, 0xef, 0x39, 0x56, 0x3e, 0x7d, 0x15, 0x56, 0x63, 0x65, 0xc1, 0xef, 0x16,
3399 0x53, 0xb3, 0x62, 0xf0, 0x8a, 0xca, 0x74, 0x34, 0x4a, 0x45, 0xa3, 0xd1, 0xfa, 0x41, 0x09, 0xae,
3400 0x3d, 0x8c, 0xfb, 0x8c, 0xf8, 0x34, 0x8d, 0x8a, 0x68, 0x26, 0x1e, 0xcb, 0x36, 0x77, 0x66, 0xd9,
3401 0xb0, 0x8a, 0x78, 0x69, 0xb2, 0x88, 0xbf, 0x09, 0x35, 0x46, 0x9e, 0x77, 0x13, 0xa1, 0x4e, 0xd6,
3402 0x88, 0xfa, 0xd6, 0xd5, 0x9c, 0xb6, 0x85, 0xab, 0x4c, 0x7f, 0x79, 0xdf, 0xb5, 0x9d, 0xf2, 0x2e,
3403 0xac, 0x8c, 0x14, 0x31, 0x5f, 0xeb, 0x38, 0xc7, 0x27, 0xcb, 0x06, 0xae, 0xfa, 0xe8, 0x85, 0x5d,
3404 0xf2, 0x3b, 0x07, 0xbc, 0x47, 0x64, 0x10, 0xf8, 0x82, 0x9c, 0xf6, 0x89, 0xe8, 0x0c, 0x3a, 0xea,
3405 0x8f, 0x0b, 0x3a, 0x26, 0x4b, 0x89, 0x52, 0xb1, 0x94, 0xd8, 0xb5, 0x36, 0x3f, 0x45, 0xde, 0x29,
3406 0x4c, 0xfe, 0x37, 0x0e, 0x34, 0x0d, 0xf9, 0xec, 0x3c, 0xfc, 0x5f, 0x50, 0xff, 0xad, 0x03, 0x35,
3407 0x45, 0x74, 0xc4, 0xa8, 0xd7, 0xcf, 0xb8, 0xbe, 0x0e, 0x6b, 0x9c, 0x32, 0x46, 0x8e, 0x23, 0x36,
3408 0xec, 0xda, 0x37, 0x86, 0x1a, 0x6e, 0xa4, 0x82, 0x47, 0x3a, 0xeb, 0xfe, 0x37, 0xdc, 0xff, 0xe9,
3409 0xc0, 0x12, 0xa6, 0xc4, 0x37, 0xf9, 0xe2, 0xf9, 0x05, 0x5d, 0x7d, 0x1b, 0x96, 0x7b, 0x23, 0xc6,
3410 0xc4, 0x2d, 0x53, 0x25, 0xf9, 0x39, 0xac, 0x97, 0x34, 0x5a, 0x1d, 0x98, 0xb1, 0xc5, 0xfd, 0x8b,
3411 0x50, 0x0b, 0xe9, 0xf3, 0x62, 0x47, 0xa5, 0x1a, 0xd2, 0xe7, 0x97, 0x3c, 0x25, 0xbf, 0x2e, 0x03,
3412 0x3a, 0x1a, 0x90, 0xd0, 0xec, 0x78, 0xf7, 0x29, 0x09, 0xfb, 0xd4, 0xfb, 0x8f, 0x53, 0x70, 0xe3,
3413 0xef, 0x40, 0x3d, 0x66, 0x41, 0xc4, 0x8a, 0x6d, 0x1b, 0x24, 0x56, 0x51, 0xde, 0x03, 0x14, 0xb3,
3414 0x28, 0x8e, 0x12, 0xea, 0x77, 0xb3, 0x1d, 0xbb, 0x67, 0x2b, 0x68, 0x98, 0x25, 0x87, 0x66, 0xe7,
3415 0x59, 0xa2, 0x94, 0x0b, 0x25, 0x0a, 0xfa, 0x2c, 0x2c, 0x2b, 0xc6, 0x31, 0x0b, 0x4e, 0x84, 0xc9,
3416 0x8a, 0xbc, 0xfe, 0x2d, 0xc9, 0xc9, 0x23, 0x35, 0xe7, 0xfd, 0xbc, 0x64, 0x85, 0xe4, 0x36, 0x2c,
3417 0xc7, 0x03, 0x12, 0x86, 0x45, 0x2b, 0xd8, 0x92, 0x46, 0x2b, 0x82, 0xbb, 0xe2, 0xda, 0x20, 0xef,
3418 0x87, 0x49, 0x97, 0xd1, 0x78, 0x40, 0x7a, 0x54, 0xc7, 0x67, 0xfe, 0xcb, 0x6c, 0xd5, 0xac, 0xc0,
3419 0x6a, 0x01, 0xda, 0x80, 0x55, 0x43, 0xc1, 0xd0, 0x76, 0x25, 0xed, 0x15, 0x3d, 0xad, 0x89, 0x5f,
3420 0xb8, 0x9f, 0xa3, 0x37, 0x00, 0x0d, 0x68, 0x9f, 0xf4, 0xc6, 0xf2, 0xbe, 0xdd, 0x4d, 0xc6, 0x09,
3421 0xa7, 0x43, 0x7d, 0x89, 0x6d, 0x28, 0x89, 0xa8, 0x9e, 0x1d, 0x39, 0xdf, 0xfa, 0xa3, 0x0b, 0x57,
3422 0x77, 0xe2, 0x78, 0x30, 0x9e, 0xca, 0x9b, 0x7f, 0x7f, 0xfc, 0x79, 0x33, 0x13, 0x0d, 0xf7, 0x45,
3423 0xa2, 0xf1, 0xc2, 0xe9, 0x92, 0xe3, 0xf9, 0x4a, 0x9e, 0xe7, 0xbd, 0xdf, 0x3b, 0x97, 0x3e, 0xc5,
3424 0x4d, 0x58, 0x34, 0x36, 0xd4, 0x9b, 0xc4, 0x0c, 0xa7, 0xc3, 0xea, 0x5e, 0x32, 0xac, 0xe5, 0x39,
3425 0x61, 0xfd, 0x47, 0x09, 0xae, 0x1e, 0x0c, 0xe3, 0x88, 0xf1, 0xc9, 0x5b, 0xc4, 0x5b, 0x05, 0xa3,
3426 0xba, 0x02, 0xa5, 0xc0, 0xd7, 0xef, 0xcf, 0x52, 0xe0, 0x7b, 0xa7, 0xd0, 0x50, 0xea, 0x68, 0x5a,
3427 0x52, 0xcf, 0x7d, 0xbd, 0x14, 0x4a, 0x08, 0x85, 0xb2, 0x1d, 0xe6, 0x4e, 0x38, 0xcc, 0xfb, 0xa5,
3428 0x1d, 0x8d, 0x0f, 0x00, 0x05, 0x9a, 0x46, 0xd7, 0x5c, 0xb7, 0x4d, 0x5b, 0xb8, 0x69, 0x99, 0xc8,
3429 0xd9, 0x7a, 0x7b, 0x9a, 0x3f, 0x5e, 0x0b, 0xa6, 0x66, 0x92, 0x8b, 0x57, 0xdf, 0xbf, 0x38, 0xb0,
3430 0x22, 0xfa, 0x4d, 0xd6, 0xe2, 0x3f, 0xbe, 0xe6, 0xce, 0x26, 0x5e, 0x3e, 0x95, 0x42, 0xa9, 0xa9,
3431 0xdd, 0x7c, 0xe1, 0xfd, 0xfd, 0xd4, 0x81, 0x6b, 0xe6, 0x99, 0x22, 0xda, 0x7a, 0xde, 0x93, 0xec,
3432 0xd4, 0xe2, 0x75, 0x4b, 0x54, 0x85, 0x14, 0x3b, 0xff, 0x51, 0x66, 0xa3, 0x2e, 0xce, 0xee, 0x67,
3433 0x0e, 0x7c, 0xd2, 0x5c, 0xb2, 0x2c, 0x8a, 0x1f, 0xc1, 0xb3, 0xe0, 0x23, 0xb9, 0x8c, 0xfc, 0xcd,
3434 0x81, 0xb5, 0x94, 0x56, 0x7a, 0x23, 0x49, 0x2e, 0x4e, 0x0b, 0xbd, 0x0d, 0xd0, 0x8b, 0xc2, 0x90,
3435 0xf6, 0xb8, 0xb9, 0xe7, 0x9f, 0x55, 0x73, 0x33, 0xa8, 0xf7, 0x2d, 0x6b, 0x3f, 0xd7, 0x61, 0x21,
3436 0x1a, 0xf1, 0x78, 0xc4, 0x75, 0x4a, 0xea, 0xd1, 0x85, 0xc3, 0xb0, 0xf5, 0xe3, 0x1a, 0x54, 0xcd,
3437 0x93, 0x0c, 0x7d, 0x13, 0x6a, 0xfb, 0x94, 0xeb, 0x1f, 0xab, 0x3e, 0x77, 0xce, 0x6b, 0x57, 0x25,
3438 0xd0, 0xe7, 0x0b, 0xbd, 0x89, 0xd1, 0x60, 0xce, 0xfb, 0x0f, 0x6d, 0x5a, 0xeb, 0x73, 0x11, 0xa9,
3439 0xa5, 0xd7, 0x0a, 0x20, 0xb5, 0xb5, 0xef, 0x9c, 0xf5, 0xf8, 0x40, 0x37, 0x2c, 0x45, 0xf3, 0x61,
3440 0xa9, 0xdd, 0x76, 0x51, 0xb8, 0x36, 0x3e, 0x9a, 0xff, 0x78, 0x40, 0xaf, 0xe7, 0xe8, 0x9a, 0x06,
3441 0xa5, 0x86, 0xdf, 0x28, 0x06, 0xd6, 0x66, 0x83, 0xfc, 0x37, 0x28, 0xda, 0xb0, 0xb4, 0xe4, 0x01,
3442 0x52, 0x73, 0x9b, 0xe7, 0x03, 0xb5, 0xa9, 0xbb, 0xd6, 0x1b, 0x03, 0xbd, 0x62, 0x2d, 0x4b, 0x67,
3443 0x53, 0xa5, 0xaf, 0xce, 0x91, 0x6a, 0x4d, 0xdf, 0x98, 0xbc, 0xf1, 0xa3, 0x4f, 0xdb, 0x6f, 0x5b,
3444 0x4b, 0x90, 0xea, 0x5b, 0x9f, 0x0f, 0xd0, 0x2a, 0x7b, 0x79, 0x57, 0x6a, 0x64, 0xa7, 0xe9, 0xac,
3445 0x38, 0x55, 0xff, 0x85, 0xf3, 0x60, 0xda, 0xc8, 0x71, 0xee, 0x05, 0x0c, 0xd9, 0xcb, 0x73, 0xe4,
3446 0xa9, 0x99, 0x8d, 0x73, 0x71, 0x99, 0x9d, 0x9c, 0xb6, 0x38, 0x61, 0x27, 0xaf, 0x6d, 0xe6, 0xd9,
3447 0xc9, 0xc7, 0x69, 0x3b, 0x8f, 0xa7, 0x3b, 0x21, 0xfa, 0xcc, 0x94, 0xa3, 0x33, 0x51, 0xaa, 0xbd,
3448 0x75, 0x16, 0x44, 0x2b, 0xfe, 0xb2, 0xfa, 0x29, 0x1f, 0x4d, 0xfc, 0x12, 0xca, 0xa3, 0x38, 0x55,
3449 0xd2, 0x9c, 0x15, 0xa8, 0xa5, 0x5b, 0xdf, 0x77, 0xa1, 0x6e, 0x35, 0x06, 0xf4, 0x81, 0x5d, 0x9c,
3450 0x36, 0x72, 0xca, 0x8e, 0xdd, 0xe3, 0x72, 0xb3, 0x7a, 0x0e, 0x50, 0x53, 0x3d, 0x3d, 0xa3, 0x1f,
3451 0xa1, 0xbc, 0xb3, 0x38, 0x83, 0x4a, 0x8d, 0xde, 0x28, 0x88, 0xd6, 0x96, 0x9f, 0xe4, 0xb4, 0x9a,
3452 0x89, 0xf2, 0x3b, 0x23, 0xcd, 0x2d, 0xbf, 0x79, 0x28, 0x65, 0xe1, 0x4d, 0xe7, 0x12, 0x81, 0x78,
3453 0xb2, 0x20, 0xff, 0xa3, 0xbb, 0xf5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x16, 0x0b, 0x32,
3454 0xb6, 0x1b, 0x00, 0x00,
3455}
diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto
deleted file mode 100644
index 370faf7..0000000
--- a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto
+++ /dev/null
@@ -1,351 +0,0 @@
1// Terraform Plugin RPC protocol version 5.0
2//
3// This file defines version 5.0 of the RPC protocol. To implement a plugin
4// against this protocol, copy this definition into your own codebase and
5// use protoc to generate stubs for your target language.
6//
7// This file will be updated in-place in the source Terraform repository for
8// any minor versions of protocol 5, but later minor versions will always be
9// backwards compatible. Breaking changes, if any are required, will come
10// in a subsequent major version with its own separate proto definition.
11//
12// Note that only the proto files included in a release tag of Terraform are
13// official protocol releases. Proto files taken from other commits may include
14// incomplete changes or features that did not make it into a final release.
15// In all reasonable cases, plugin developers should take the proto file from
16// the tag of the most recent release of Terraform, and not from the master
17// branch or any other development branch.
18//
19syntax = "proto3";
20
21package tfplugin5;
22
23// DynamicValue is an opaque encoding of terraform data, with the field name
24// indicating the encoding scheme used.
25message DynamicValue {
26 bytes msgpack = 1;
27 bytes json = 2;
28}
29
30message Diagnostic {
31 enum Severity {
32 INVALID = 0;
33 ERROR = 1;
34 WARNING = 2;
35 }
36 Severity severity = 1;
37 string summary = 2;
38 string detail = 3;
39 AttributePath attribute = 4;
40}
41
42message AttributePath {
43 message Step {
44 oneof selector {
45 // Set "attribute_name" to represent looking up an attribute
46 // in the current object value.
47 string attribute_name = 1;
48 // Set "element_key_*" to represent looking up an element in
49 // an indexable collection type.
50 string element_key_string = 2;
51 int64 element_key_int = 3;
52 }
53 }
54 repeated Step steps = 1;
55}
56
57message Stop {
58 message Request {
59 }
60 message Response {
61 string Error = 1;
62 }
63}
64
65// RawState holds the stored state for a resource to be upgraded by the
66// provider. It can be in one of two formats, the current json encoded format
67// in bytes, or the legacy flatmap format as a map of strings.
68message RawState {
69 bytes json = 1;
70 map<string, string> flatmap = 2;
71}
72
73// Schema is the configuration schema for a Resource, Provider, or Provisioner.
74message Schema {
75 message Block {
76 int64 version = 1;
77 repeated Attribute attributes = 2;
78 repeated NestedBlock block_types = 3;
79 }
80
81 message Attribute {
82 string name = 1;
83 bytes type = 2;
84 string description = 3;
85 bool required = 4;
86 bool optional = 5;
87 bool computed = 6;
88 bool sensitive = 7;
89 }
90
91 message NestedBlock {
92 enum NestingMode {
93 INVALID = 0;
94 SINGLE = 1;
95 LIST = 2;
96 SET = 3;
97 MAP = 4;
98 GROUP = 5;
99 }
100
101 string type_name = 1;
102 Block block = 2;
103 NestingMode nesting = 3;
104 int64 min_items = 4;
105 int64 max_items = 5;
106 }
107
108 // The version of the schema.
109 // Schemas are versioned, so that providers can upgrade a saved resource
110 // state when the schema is changed.
111 int64 version = 1;
112
113 // Block is the top level configuration block for this schema.
114 Block block = 2;
115}
116
117service Provider {
118 //////// Information about what a provider supports/expects
119 rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response);
120 rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response);
121 rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response);
122 rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response);
123 rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response);
124
125 //////// One-time initialization, called before other functions below
126 rpc Configure(Configure.Request) returns (Configure.Response);
127
128 //////// Managed Resource Lifecycle
129 rpc ReadResource(ReadResource.Request) returns (ReadResource.Response);
130 rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response);
131 rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response);
132 rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response);
133
134 rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response);
135
136 //////// Graceful Shutdown
137 rpc Stop(Stop.Request) returns (Stop.Response);
138}
139
140message GetProviderSchema {
141 message Request {
142 }
143 message Response {
144 Schema provider = 1;
145 map<string, Schema> resource_schemas = 2;
146 map<string, Schema> data_source_schemas = 3;
147 repeated Diagnostic diagnostics = 4;
148 }
149}
150
151message PrepareProviderConfig {
152 message Request {
153 DynamicValue config = 1;
154 }
155 message Response {
156 DynamicValue prepared_config = 1;
157 repeated Diagnostic diagnostics = 2;
158 }
159}
160
161message UpgradeResourceState {
162 message Request {
163 string type_name = 1;
164
165 // version is the schema_version number recorded in the state file
166 int64 version = 2;
167
168 // raw_state is the raw states as stored for the resource. Core does
169 // not have access to the schema of prior_version, so it's the
170 // provider's responsibility to interpret this value using the
171 // appropriate older schema. The raw_state will be the json encoded
172 // state, or a legacy flat-mapped format.
173 RawState raw_state = 3;
174 }
175 message Response {
176 // new_state is a msgpack-encoded data structure that, when interpreted with
177 // the _current_ schema for this resource type, is functionally equivalent to
178 // that which was given in prior_state_raw.
179 DynamicValue upgraded_state = 1;
180
181 // diagnostics describes any errors encountered during migration that could not
182 // be safely resolved, and warnings about any possibly-risky assumptions made
183 // in the upgrade process.
184 repeated Diagnostic diagnostics = 2;
185 }
186}
187
188message ValidateResourceTypeConfig {
189 message Request {
190 string type_name = 1;
191 DynamicValue config = 2;
192 }
193 message Response {
194 repeated Diagnostic diagnostics = 1;
195 }
196}
197
198message ValidateDataSourceConfig {
199 message Request {
200 string type_name = 1;
201 DynamicValue config = 2;
202 }
203 message Response {
204 repeated Diagnostic diagnostics = 1;
205 }
206}
207
208message Configure {
209 message Request {
210 string terraform_version = 1;
211 DynamicValue config = 2;
212 }
213 message Response {
214 repeated Diagnostic diagnostics = 1;
215 }
216}
217
218message ReadResource {
219 message Request {
220 string type_name = 1;
221 DynamicValue current_state = 2;
222 }
223 message Response {
224 DynamicValue new_state = 1;
225 repeated Diagnostic diagnostics = 2;
226 }
227}
228
229message PlanResourceChange {
230 message Request {
231 string type_name = 1;
232 DynamicValue prior_state = 2;
233 DynamicValue proposed_new_state = 3;
234 DynamicValue config = 4;
235 bytes prior_private = 5;
236 }
237
238 message Response {
239 DynamicValue planned_state = 1;
240 repeated AttributePath requires_replace = 2;
241 bytes planned_private = 3;
242 repeated Diagnostic diagnostics = 4;
243
244
245 // This may be set only by the helper/schema "SDK" in the main Terraform
246 // repository, to request that Terraform Core >=0.12 permit additional
247 // inconsistencies that can result from the legacy SDK type system
248 // and its imprecise mapping to the >=0.12 type system.
249 // The change in behavior implied by this flag makes sense only for the
250 // specific details of the legacy SDK type system, and are not a general
251 // mechanism to avoid proper type handling in providers.
252 //
253 // ==== DO NOT USE THIS ====
254 // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
255 // ==== DO NOT USE THIS ====
256 bool legacy_type_system = 5;
257 }
258}
259
260message ApplyResourceChange {
261 message Request {
262 string type_name = 1;
263 DynamicValue prior_state = 2;
264 DynamicValue planned_state = 3;
265 DynamicValue config = 4;
266 bytes planned_private = 5;
267 }
268 message Response {
269 DynamicValue new_state = 1;
270 bytes private = 2;
271 repeated Diagnostic diagnostics = 3;
272
273 // This may be set only by the helper/schema "SDK" in the main Terraform
274 // repository, to request that Terraform Core >=0.12 permit additional
275 // inconsistencies that can result from the legacy SDK type system
276 // and its imprecise mapping to the >=0.12 type system.
277 // The change in behavior implied by this flag makes sense only for the
278 // specific details of the legacy SDK type system, and are not a general
279 // mechanism to avoid proper type handling in providers.
280 //
281 // ==== DO NOT USE THIS ====
282 // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
283 // ==== DO NOT USE THIS ====
284 bool legacy_type_system = 4;
285 }
286}
287
288message ImportResourceState {
289 message Request {
290 string type_name = 1;
291 string id = 2;
292 }
293
294 message ImportedResource {
295 string type_name = 1;
296 DynamicValue state = 2;
297 bytes private = 3;
298 }
299
300 message Response {
301 repeated ImportedResource imported_resources = 1;
302 repeated Diagnostic diagnostics = 2;
303 }
304}
305
306message ReadDataSource {
307 message Request {
308 string type_name = 1;
309 DynamicValue config = 2;
310 }
311 message Response {
312 DynamicValue state = 1;
313 repeated Diagnostic diagnostics = 2;
314 }
315}
316
317service Provisioner {
318 rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response);
319 rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response);
320 rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response);
321 rpc Stop(Stop.Request) returns (Stop.Response);
322}
323
324message GetProvisionerSchema {
325 message Request {
326 }
327 message Response {
328 Schema provisioner = 1;
329 repeated Diagnostic diagnostics = 2;
330 }
331}
332
333message ValidateProvisionerConfig {
334 message Request {
335 DynamicValue config = 1;
336 }
337 message Response {
338 repeated Diagnostic diagnostics = 1;
339 }
340}
341
342message ProvisionResource {
343 message Request {
344 DynamicValue config = 1;
345 DynamicValue connection = 2;
346 }
347 message Response {
348 string output = 1;
349 repeated Diagnostic diagnostics = 2;
350 }
351}
diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go
index 2f2463a..47a0256 100644
--- a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go
+++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go
@@ -55,10 +55,11 @@ func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[st
55 }, 55 },
56 } 56 }
57 content, _, _ = body.PartialContent(&probeSchema) 57 content, _, _ = body.PartialContent(&probeSchema)
58 if len(content.Blocks) > 0 { 58 if len(content.Blocks) > 0 || dynamicExpanded {
59 // No attribute present and at least one block present, so 59 // A dynamic block with an empty iterator returns nothing.
60 // we'll need to rewrite this one as a block for a successful 60 // If there's no attribute and we have either a block or a
61 // result. 61 // dynamic expansion, we need to rewrite this one as a
62 // block for a successful result.
62 appearsAsBlock[name] = struct{}{} 63 appearsAsBlock[name] = struct{}{}
63 } 64 }
64 } 65 }
diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go
index e123b8a..b172805 100644
--- a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go
+++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go
@@ -33,7 +33,7 @@ func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *confi
33 for _, child := range children { 33 for _, child := range children {
34 if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { 34 if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists {
35 vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) 35 vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...)
36 } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists { 36 } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.ElementType().IsObjectType() {
37 synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) 37 synthSchema := SchemaForCtyElementType(attrS.Type.ElementType())
38 vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) 38 vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...)
39 } 39 }
diff --git a/vendor/github.com/hashicorp/terraform/lang/data.go b/vendor/github.com/hashicorp/terraform/lang/data.go
index 80313d6..eca588e 100644
--- a/vendor/github.com/hashicorp/terraform/lang/data.go
+++ b/vendor/github.com/hashicorp/terraform/lang/data.go
@@ -23,6 +23,7 @@ type Data interface {
23 StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics 23 StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics
24 24
25 GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) 25 GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
26 GetForEachAttr(addrs.ForEachAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
26 GetResourceInstance(addrs.ResourceInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) 27 GetResourceInstance(addrs.ResourceInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
27 GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) 28 GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
28 GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) 29 GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
diff --git a/vendor/github.com/hashicorp/terraform/lang/eval.go b/vendor/github.com/hashicorp/terraform/lang/eval.go
index a3fb363..a8fe8b6 100644
--- a/vendor/github.com/hashicorp/terraform/lang/eval.go
+++ b/vendor/github.com/hashicorp/terraform/lang/eval.go
@@ -203,6 +203,7 @@ func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceabl
203 pathAttrs := map[string]cty.Value{} 203 pathAttrs := map[string]cty.Value{}
204 terraformAttrs := map[string]cty.Value{} 204 terraformAttrs := map[string]cty.Value{}
205 countAttrs := map[string]cty.Value{} 205 countAttrs := map[string]cty.Value{}
206 forEachAttrs := map[string]cty.Value{}
206 var self cty.Value 207 var self cty.Value
207 208
208 for _, ref := range refs { 209 for _, ref := range refs {
@@ -334,6 +335,14 @@ func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceabl
334 self = val 335 self = val
335 } 336 }
336 337
338 case addrs.ForEachAttr:
339 val, valDiags := normalizeRefValue(s.Data.GetForEachAttr(subj, rng))
340 diags = diags.Append(valDiags)
341 forEachAttrs[subj.Name] = val
342 if isSelf {
343 self = val
344 }
345
337 default: 346 default:
338 // Should never happen 347 // Should never happen
339 panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj)) 348 panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj))
@@ -350,6 +359,7 @@ func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceabl
350 vals["path"] = cty.ObjectVal(pathAttrs) 359 vals["path"] = cty.ObjectVal(pathAttrs)
351 vals["terraform"] = cty.ObjectVal(terraformAttrs) 360 vals["terraform"] = cty.ObjectVal(terraformAttrs)
352 vals["count"] = cty.ObjectVal(countAttrs) 361 vals["count"] = cty.ObjectVal(countAttrs)
362 vals["each"] = cty.ObjectVal(forEachAttrs)
353 if self != cty.NilVal { 363 if self != cty.NilVal {
354 vals["self"] = self 364 vals["self"] = self
355 } 365 }
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go
index 71b7a84..bcccc1f 100644
--- a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go
@@ -246,7 +246,7 @@ var CompactFunc = function.New(&function.Spec{
246 246
247 for it := listVal.ElementIterator(); it.Next(); { 247 for it := listVal.ElementIterator(); it.Next(); {
248 _, v := it.Element() 248 _, v := it.Element()
249 if v.AsString() == "" { 249 if v.IsNull() || v.AsString() == "" {
250 continue 250 continue
251 } 251 }
252 outputList = append(outputList, v) 252 outputList = append(outputList, v)
@@ -363,6 +363,9 @@ var DistinctFunc = function.New(&function.Spec{
363 } 363 }
364 } 364 }
365 365
366 if len(list) == 0 {
367 return cty.ListValEmpty(retType.ElementType()), nil
368 }
366 return cty.ListVal(list), nil 369 return cty.ListVal(list), nil
367 }, 370 },
368}) 371})
@@ -389,6 +392,10 @@ var ChunklistFunc = function.New(&function.Spec{
389 return cty.UnknownVal(retType), nil 392 return cty.UnknownVal(retType), nil
390 } 393 }
391 394
395 if listVal.LengthInt() == 0 {
396 return cty.ListValEmpty(listVal.Type()), nil
397 }
398
392 var size int 399 var size int
393 err = gocty.FromCtyValue(args[1], &size) 400 err = gocty.FromCtyValue(args[1], &size)
394 if err != nil { 401 if err != nil {
@@ -686,8 +693,10 @@ var LookupFunc = function.New(&function.Spec{
686 return cty.StringVal(v.AsString()), nil 693 return cty.StringVal(v.AsString()), nil
687 case ty.Equals(cty.Number): 694 case ty.Equals(cty.Number):
688 return cty.NumberVal(v.AsBigFloat()), nil 695 return cty.NumberVal(v.AsBigFloat()), nil
696 case ty.Equals(cty.Bool):
697 return cty.BoolVal(v.True()), nil
689 default: 698 default:
690 return cty.NilVal, errors.New("lookup() can only be used with flat lists") 699 return cty.NilVal, errors.New("lookup() can only be used with maps of primitive types")
691 } 700 }
692 } 701 }
693 } 702 }
@@ -797,10 +806,12 @@ var MatchkeysFunc = function.New(&function.Spec{
797 }, 806 },
798 }, 807 },
799 Type: func(args []cty.Value) (cty.Type, error) { 808 Type: func(args []cty.Value) (cty.Type, error) {
800 if !args[1].Type().Equals(args[2].Type()) { 809 ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()})
801 return cty.NilType, errors.New("lists must be of the same type") 810 if ty == cty.NilType {
811 return cty.NilType, errors.New("keys and searchset must be of the same type")
802 } 812 }
803 813
814 // the return type is based on args[0] (values)
804 return args[0].Type(), nil 815 return args[0].Type(), nil
805 }, 816 },
806 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { 817 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
@@ -813,10 +824,14 @@ var MatchkeysFunc = function.New(&function.Spec{
813 } 824 }
814 825
815 output := make([]cty.Value, 0) 826 output := make([]cty.Value, 0)
816
817 values := args[0] 827 values := args[0]
818 keys := args[1] 828
819 searchset := args[2] 829 // Keys and searchset must be the same type.
830 // We can skip error checking here because we've already verified that
831 // they can be unified in the Type function
832 ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()})
833 keys, _ := convert.Convert(args[1], ty)
834 searchset, _ := convert.Convert(args[2], ty)
820 835
821 // if searchset is empty, return an empty list. 836 // if searchset is empty, return an empty list.
822 if searchset.LengthInt() == 0 { 837 if searchset.LengthInt() == 0 {
@@ -867,7 +882,6 @@ var MergeFunc = function.New(&function.Spec{
867 Name: "maps", 882 Name: "maps",
868 Type: cty.DynamicPseudoType, 883 Type: cty.DynamicPseudoType,
869 AllowDynamicType: true, 884 AllowDynamicType: true,
870 AllowNull: true,
871 }, 885 },
872 Type: function.StaticReturnType(cty.DynamicPseudoType), 886 Type: function.StaticReturnType(cty.DynamicPseudoType),
873 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { 887 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go
index 5cb4bc5..be006f8 100644
--- a/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go
@@ -14,6 +14,7 @@ import (
14 "hash" 14 "hash"
15 15
16 uuid "github.com/hashicorp/go-uuid" 16 uuid "github.com/hashicorp/go-uuid"
17 uuidv5 "github.com/satori/go.uuid"
17 "github.com/zclconf/go-cty/cty" 18 "github.com/zclconf/go-cty/cty"
18 "github.com/zclconf/go-cty/cty/function" 19 "github.com/zclconf/go-cty/cty/function"
19 "github.com/zclconf/go-cty/cty/gocty" 20 "github.com/zclconf/go-cty/cty/gocty"
@@ -32,6 +33,39 @@ var UUIDFunc = function.New(&function.Spec{
32 }, 33 },
33}) 34})
34 35
36var UUIDV5Func = function.New(&function.Spec{
37 Params: []function.Parameter{
38 {
39 Name: "namespace",
40 Type: cty.String,
41 },
42 {
43 Name: "name",
44 Type: cty.String,
45 },
46 },
47 Type: function.StaticReturnType(cty.String),
48 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
49 var namespace uuidv5.UUID
50 switch {
51 case args[0].AsString() == "dns":
52 namespace = uuidv5.NamespaceDNS
53 case args[0].AsString() == "url":
54 namespace = uuidv5.NamespaceURL
55 case args[0].AsString() == "oid":
56 namespace = uuidv5.NamespaceOID
57 case args[0].AsString() == "x500":
58 namespace = uuidv5.NamespaceX500
59 default:
60 if namespace, err = uuidv5.FromString(args[0].AsString()); err != nil {
61 return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err)
62 }
63 }
64 val := args[1].AsString()
65 return cty.StringVal(uuidv5.NewV5(namespace, val).String()), nil
66 },
67})
68
35// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string 69// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string
36// and encodes it with Base64. 70// and encodes it with Base64.
37var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) 71var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString)
@@ -228,6 +262,12 @@ func UUID() (cty.Value, error) {
228 return UUIDFunc.Call(nil) 262 return UUIDFunc.Call(nil)
229} 263}
230 264
265// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string
266// format.
267func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) {
268 return UUIDV5Func.Call([]cty.Value{namespace, name})
269}
270
231// Base64Sha256 computes the SHA256 hash of a given string and encodes it with 271// Base64Sha256 computes the SHA256 hash of a given string and encodes it with
232// Base64. 272// Base64.
233// 273//
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go
index 7dfc905..016b102 100644
--- a/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go
@@ -237,6 +237,21 @@ var DirnameFunc = function.New(&function.Spec{
237 }, 237 },
238}) 238})
239 239
240// AbsPathFunc constructs a function that converts a filesystem path to an absolute path
241var AbsPathFunc = function.New(&function.Spec{
242 Params: []function.Parameter{
243 {
244 Name: "path",
245 Type: cty.String,
246 },
247 },
248 Type: function.StaticReturnType(cty.String),
249 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
250 absPath, err := filepath.Abs(args[0].AsString())
251 return cty.StringVal(filepath.ToSlash(absPath)), err
252 },
253})
254
240// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. 255// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory.
241var PathExpandFunc = function.New(&function.Spec{ 256var PathExpandFunc = function.New(&function.Spec{
242 Params: []function.Parameter{ 257 Params: []function.Parameter{
diff --git a/vendor/github.com/hashicorp/terraform/lang/functions.go b/vendor/github.com/hashicorp/terraform/lang/functions.go
index 2c7b548..b77a55f 100644
--- a/vendor/github.com/hashicorp/terraform/lang/functions.go
+++ b/vendor/github.com/hashicorp/terraform/lang/functions.go
@@ -3,6 +3,7 @@ package lang
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 ctyyaml "github.com/zclconf/go-cty-yaml"
6 "github.com/zclconf/go-cty/cty" 7 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/function" 8 "github.com/zclconf/go-cty/cty/function"
8 "github.com/zclconf/go-cty/cty/function/stdlib" 9 "github.com/zclconf/go-cty/cty/function/stdlib"
@@ -30,6 +31,7 @@ func (s *Scope) Functions() map[string]function.Function {
30 31
31 s.funcs = map[string]function.Function{ 32 s.funcs = map[string]function.Function{
32 "abs": stdlib.AbsoluteFunc, 33 "abs": stdlib.AbsoluteFunc,
34 "abspath": funcs.AbsPathFunc,
33 "basename": funcs.BasenameFunc, 35 "basename": funcs.BasenameFunc,
34 "base64decode": funcs.Base64DecodeFunc, 36 "base64decode": funcs.Base64DecodeFunc,
35 "base64encode": funcs.Base64EncodeFunc, 37 "base64encode": funcs.Base64EncodeFunc,
@@ -85,6 +87,7 @@ func (s *Scope) Functions() map[string]function.Function {
85 "min": stdlib.MinFunc, 87 "min": stdlib.MinFunc,
86 "pathexpand": funcs.PathExpandFunc, 88 "pathexpand": funcs.PathExpandFunc,
87 "pow": funcs.PowFunc, 89 "pow": funcs.PowFunc,
90 "range": stdlib.RangeFunc,
88 "replace": funcs.ReplaceFunc, 91 "replace": funcs.ReplaceFunc,
89 "reverse": funcs.ReverseFunc, 92 "reverse": funcs.ReverseFunc,
90 "rsadecrypt": funcs.RsaDecryptFunc, 93 "rsadecrypt": funcs.RsaDecryptFunc,
@@ -114,7 +117,10 @@ func (s *Scope) Functions() map[string]function.Function {
114 "upper": stdlib.UpperFunc, 117 "upper": stdlib.UpperFunc,
115 "urlencode": funcs.URLEncodeFunc, 118 "urlencode": funcs.URLEncodeFunc,
116 "uuid": funcs.UUIDFunc, 119 "uuid": funcs.UUIDFunc,
120 "uuidv5": funcs.UUIDV5Func,
117 "values": funcs.ValuesFunc, 121 "values": funcs.ValuesFunc,
122 "yamldecode": ctyyaml.YAMLDecodeFunc,
123 "yamlencode": ctyyaml.YAMLEncodeFunc,
118 "zipmap": funcs.ZipmapFunc, 124 "zipmap": funcs.ZipmapFunc,
119 } 125 }
120 126
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go
index 8b7ef43..d85086c 100644
--- a/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go
@@ -84,7 +84,7 @@ func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Valu
84 // whether there are dynamically-typed attributes inside. However, 84 // whether there are dynamically-typed attributes inside. However,
85 // both support a similar-enough API that we can treat them the 85 // both support a similar-enough API that we can treat them the
86 // same for our purposes here. 86 // same for our purposes here.
87 if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { 87 if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
88 continue 88 continue
89 } 89 }
90 90
@@ -169,6 +169,16 @@ func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Valu
169 }) 169 })
170 errs = append(errs, setErrs...) 170 errs = append(errs, setErrs...)
171 171
172 if maybeUnknownBlocks {
173 // When unknown blocks are present the final number of blocks
174 // may be different, either because the unknown set values
175 // become equal and are collapsed, or the count is unknown due
176 // a dynamic block. Unfortunately this means we can't do our
177 // usual checks in this case without generating false
178 // negatives.
179 continue
180 }
181
172 // There can be fewer elements in a set after its elements are all 182 // There can be fewer elements in a set after its elements are all
173 // known (values that turn out to be equal will coalesce) but the 183 // known (values that turn out to be equal will coalesce) but the
174 // number of elements must never get larger. 184 // number of elements must never get larger.
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
index b1d01fb..c1d9e3b 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
@@ -204,6 +204,9 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, t
204 } 204 }
205 205
206 downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version) 206 downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version)
207 if err != nil {
208 return PluginMeta{}, diags, err
209 }
207 providerURL := downloadURLs.DownloadURL 210 providerURL := downloadURLs.DownloadURL
208 211
209 if !i.SkipVerify { 212 if !i.SkipVerify {
diff --git a/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go
index ae9a400..5b190e2 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go
@@ -330,6 +330,7 @@ func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp provi
330 protoReq := &proto.ReadResource_Request{ 330 protoReq := &proto.ReadResource_Request{
331 TypeName: r.TypeName, 331 TypeName: r.TypeName,
332 CurrentState: &proto.DynamicValue{Msgpack: mp}, 332 CurrentState: &proto.DynamicValue{Msgpack: mp},
333 Private: r.Private,
333 } 334 }
334 335
335 protoResp, err := p.client.ReadResource(p.ctx, protoReq) 336 protoResp, err := p.client.ReadResource(p.ctx, protoReq)
@@ -348,6 +349,7 @@ func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp provi
348 } 349 }
349 } 350 }
350 resp.NewState = state 351 resp.NewState = state
352 resp.Private = protoResp.Private
351 353
352 return resp 354 return resp
353} 355}
diff --git a/vendor/github.com/hashicorp/terraform/providers/provider.go b/vendor/github.com/hashicorp/terraform/providers/provider.go
index 1aa08c2..7e0a74c 100644
--- a/vendor/github.com/hashicorp/terraform/providers/provider.go
+++ b/vendor/github.com/hashicorp/terraform/providers/provider.go
@@ -176,6 +176,10 @@ type ReadResourceRequest struct {
176 176
177 // PriorState contains the previously saved state value for this resource. 177 // PriorState contains the previously saved state value for this resource.
178 PriorState cty.Value 178 PriorState cty.Value
179
180 // Private is an opaque blob that will be stored in state along with the
181 // resource. It is intended only for interpretation by the provider itself.
182 Private []byte
179} 183}
180 184
181type ReadResourceResponse struct { 185type ReadResourceResponse struct {
@@ -184,6 +188,10 @@ type ReadResourceResponse struct {
184 188
185 // Diagnostics contains any warnings or errors from the method call. 189 // Diagnostics contains any warnings or errors from the method call.
186 Diagnostics tfdiags.Diagnostics 190 Diagnostics tfdiags.Diagnostics
191
192 // Private is an opaque blob that will be stored in state along with the
193 // resource. It is intended only for interpretation by the provider itself.
194 Private []byte
187} 195}
188 196
189type PlanResourceChangeRequest struct { 197type PlanResourceChangeRequest struct {
diff --git a/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go
index ea717d0..8664f3b 100644
--- a/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go
+++ b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go
@@ -147,7 +147,7 @@ func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc {
147 147
148 var private []byte 148 var private []byte
149 if obj.Private != nil { 149 if obj.Private != nil {
150 private := make([]byte, len(obj.Private)) 150 private = make([]byte, len(obj.Private))
151 copy(private, obj.Private) 151 copy(private, obj.Private)
152 } 152 }
153 153
@@ -181,14 +181,17 @@ func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject {
181 181
182 var private []byte 182 var private []byte
183 if obj.Private != nil { 183 if obj.Private != nil {
184 private := make([]byte, len(obj.Private)) 184 private = make([]byte, len(obj.Private))
185 copy(private, obj.Private) 185 copy(private, obj.Private)
186 } 186 }
187 187
188 // Some addrs.Referencable implementations are technically mutable, but 188 // Some addrs.Referenceable implementations are technically mutable, but
189 // we treat them as immutable by convention and so we don't deep-copy here. 189 // we treat them as immutable by convention and so we don't deep-copy here.
190 dependencies := make([]addrs.Referenceable, len(obj.Dependencies)) 190 var dependencies []addrs.Referenceable
191 copy(dependencies, obj.Dependencies) 191 if obj.Dependencies != nil {
192 dependencies = make([]addrs.Referenceable, len(obj.Dependencies))
193 copy(dependencies, obj.Dependencies)
194 }
192 195
193 return &ResourceInstanceObject{ 196 return &ResourceInstanceObject{
194 Value: obj.Value, 197 Value: obj.Value,
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go
index 6fe2ab8..be93924 100644
--- a/vendor/github.com/hashicorp/terraform/states/statefile/version2.go
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go
@@ -205,5 +205,5 @@ type instanceStateV2 struct {
205type backendStateV2 struct { 205type backendStateV2 struct {
206 Type string `json:"type"` // Backend type 206 Type string `json:"type"` // Backend type
207 ConfigRaw json.RawMessage `json:"config"` // Backend raw config 207 ConfigRaw json.RawMessage `json:"config"` // Backend raw config
208 Hash int `json:"hash"` // Hash of portion of configuration from config files 208 Hash uint64 `json:"hash"` // Hash of portion of configuration from config files
209} 209}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go
index 2cbe8a5..fbec547 100644
--- a/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go
@@ -79,7 +79,7 @@ func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) {
79 case addrs.DataResourceMode: 79 case addrs.DataResourceMode:
80 modeStr = "data" 80 modeStr = "data"
81 default: 81 default:
82 return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode", resAddr) 82 return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode %#v", resAddr, resAddr.Mode)
83 } 83 }
84 84
85 // In state versions prior to 4 we allowed each instance of a 85 // In state versions prior to 4 we allowed each instance of a
@@ -98,7 +98,7 @@ func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) {
98 var diags tfdiags.Diagnostics 98 var diags tfdiags.Diagnostics
99 providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr) 99 providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr)
100 if diags.HasErrors() { 100 if diags.HasErrors() {
101 return nil, diags.Err() 101 return nil, fmt.Errorf("invalid provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err())
102 } 102 }
103 } else { 103 } else {
104 // Smells like an old-style module-local provider address, 104 // Smells like an old-style module-local provider address,
@@ -109,7 +109,7 @@ func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) {
109 if oldProviderAddr != "" { 109 if oldProviderAddr != "" {
110 localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr) 110 localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr)
111 if diags.HasErrors() { 111 if diags.HasErrors() {
112 return nil, diags.Err() 112 return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err())
113 } 113 }
114 providerAddr = localAddr.Absolute(moduleAddr) 114 providerAddr = localAddr.Absolute(moduleAddr)
115 } else { 115 } else {
@@ -272,7 +272,7 @@ func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2,
272 instKeyRaw = string(tk) 272 instKeyRaw = string(tk)
273 default: 273 default:
274 if instKeyRaw != nil { 274 if instKeyRaw != nil {
275 return nil, fmt.Errorf("insupported instance key: %#v", instKey) 275 return nil, fmt.Errorf("unsupported instance key: %#v", instKey)
276 } 276 }
277 } 277 }
278 278
@@ -301,7 +301,11 @@ func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2,
301 301
302 dependencies := make([]string, len(rsOld.Dependencies)) 302 dependencies := make([]string, len(rsOld.Dependencies))
303 for i, v := range rsOld.Dependencies { 303 for i, v := range rsOld.Dependencies {
304 dependencies[i] = parseLegacyDependency(v) 304 depStr, err := parseLegacyDependency(v)
305 if err != nil {
306 return nil, fmt.Errorf("invalid dependency reference %q: %s", v, err)
307 }
308 dependencies[i] = depStr
305 } 309 }
306 310
307 return &instanceObjectStateV4{ 311 return &instanceObjectStateV4{
@@ -414,7 +418,7 @@ func simplifyImpliedValueType(ty cty.Type) cty.Type {
414 } 418 }
415} 419}
416 420
417func parseLegacyDependency(s string) string { 421func parseLegacyDependency(s string) (string, error) {
418 parts := strings.Split(s, ".") 422 parts := strings.Split(s, ".")
419 ret := parts[0] 423 ret := parts[0]
420 for _, part := range parts[1:] { 424 for _, part := range parts[1:] {
@@ -427,5 +431,14 @@ func parseLegacyDependency(s string) string {
427 } 431 }
428 ret = ret + "." + part 432 ret = ret + "." + part
429 } 433 }
430 return ret 434
435 // The result must parse as a reference, or else we'll create an invalid
436 // state file.
437 var diags tfdiags.Diagnostics
438 _, diags = addrs.ParseRefStr(ret)
439 if diags.HasErrors() {
440 return "", diags.Err()
441 }
442
443 return ret, nil
431} 444}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go
index 7a6ef3d..323462f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/diff.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go
@@ -13,7 +13,6 @@ import (
13 "sync" 13 "sync"
14 14
15 "github.com/hashicorp/terraform/addrs" 15 "github.com/hashicorp/terraform/addrs"
16 "github.com/hashicorp/terraform/config"
17 "github.com/hashicorp/terraform/config/hcl2shim" 16 "github.com/hashicorp/terraform/config/hcl2shim"
18 "github.com/hashicorp/terraform/configs/configschema" 17 "github.com/hashicorp/terraform/configs/configschema"
19 "github.com/zclconf/go-cty/cty" 18 "github.com/zclconf/go-cty/cty"
@@ -665,7 +664,7 @@ func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]strin
665 old, exists := attrs[currentKey] 664 old, exists := attrs[currentKey]
666 665
667 if diff != nil && diff.NewComputed { 666 if diff != nil && diff.NewComputed {
668 result[attr] = config.UnknownVariableValue 667 result[attr] = hcl2shim.UnknownVariableValue
669 return result, nil 668 return result, nil
670 } 669 }
671 670
@@ -673,7 +672,7 @@ func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]strin
673 // This only applied to top-level "id" fields. 672 // This only applied to top-level "id" fields.
674 if attr == "id" && len(path) == 1 { 673 if attr == "id" && len(path) == 1 {
675 if old == "" { 674 if old == "" {
676 result[attr] = config.UnknownVariableValue 675 result[attr] = hcl2shim.UnknownVariableValue
677 } else { 676 } else {
678 result[attr] = old 677 result[attr] = old
679 } 678 }
@@ -704,8 +703,8 @@ func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]strin
704 // check for missmatched diff values 703 // check for missmatched diff values
705 if exists && 704 if exists &&
706 old != diff.Old && 705 old != diff.Old &&
707 old != config.UnknownVariableValue && 706 old != hcl2shim.UnknownVariableValue &&
708 diff.Old != config.UnknownVariableValue { 707 diff.Old != hcl2shim.UnknownVariableValue {
709 return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) 708 return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old)
710 } 709 }
711 710
@@ -723,7 +722,7 @@ func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]strin
723 } 722 }
724 723
725 if attrSchema.Computed && diff.NewComputed { 724 if attrSchema.Computed && diff.NewComputed {
726 result[attr] = config.UnknownVariableValue 725 result[attr] = hcl2shim.UnknownVariableValue
727 return result, nil 726 return result, nil
728 } 727 }
729 728
@@ -756,7 +755,7 @@ func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]strin
756 } 755 }
757 756
758 if diff.NewComputed { 757 if diff.NewComputed {
759 result[k[len(prefix):]] = config.UnknownVariableValue 758 result[k[len(prefix):]] = hcl2shim.UnknownVariableValue
760 return result, nil 759 return result, nil
761 } 760 }
762 761
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
index 09313f7..422f372 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
@@ -61,7 +61,8 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
61 configVal := cty.NullVal(cty.DynamicPseudoType) 61 configVal := cty.NullVal(cty.DynamicPseudoType)
62 if n.Config != nil { 62 if n.Config != nil {
63 var configDiags tfdiags.Diagnostics 63 var configDiags tfdiags.Diagnostics
64 keyData := EvalDataForInstanceKey(n.Addr.Key) 64 forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
65 keyData := EvalDataForInstanceKey(n.Addr.Key, forEach)
65 configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) 66 configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData)
66 diags = diags.Append(configDiags) 67 diags = diags.Append(configDiags)
67 if configDiags.HasErrors() { 68 if configDiags.HasErrors() {
@@ -548,7 +549,8 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio
548 provisioner := ctx.Provisioner(prov.Type) 549 provisioner := ctx.Provisioner(prov.Type)
549 schema := ctx.ProvisionerSchema(prov.Type) 550 schema := ctx.ProvisionerSchema(prov.Type)
550 551
551 keyData := EvalDataForInstanceKey(instanceAddr.Key) 552 // TODO the for_each val is not added here, which might causes issues with provisioners
553 keyData := EvalDataForInstanceKey(instanceAddr.Key, nil)
552 554
553 // Evaluate the main provisioner configuration. 555 // Evaluate the main provisioner configuration.
554 config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData) 556 config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
index b7acfb0..695b5fe 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
@@ -4,7 +4,6 @@ import (
4 "bytes" 4 "bytes"
5 "fmt" 5 "fmt"
6 "log" 6 "log"
7 "reflect"
8 "strings" 7 "strings"
9 8
10 "github.com/hashicorp/hcl2/hcl" 9 "github.com/hashicorp/hcl2/hcl"
@@ -134,7 +133,8 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
134 // Should be caught during validation, so we don't bother with a pretty error here 133 // Should be caught during validation, so we don't bother with a pretty error here
135 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) 134 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
136 } 135 }
137 keyData := EvalDataForInstanceKey(n.Addr.Key) 136 forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
137 keyData := EvalDataForInstanceKey(n.Addr.Key, forEach)
138 configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) 138 configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData)
139 diags = diags.Append(configDiags) 139 diags = diags.Append(configDiags)
140 if configDiags.HasErrors() { 140 if configDiags.HasErrors() {
@@ -174,6 +174,20 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
174 } 174 }
175 } 175 }
176 176
177 log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path()))
178 // Allow the provider to validate the final set of values.
179 // The config was statically validated early on, but there may have been
180 // unknown values which the provider could not validate at the time.
181 validateResp := provider.ValidateResourceTypeConfig(
182 providers.ValidateResourceTypeConfigRequest{
183 TypeName: n.Addr.Resource.Type,
184 Config: configVal,
185 },
186 )
187 if validateResp.Diagnostics.HasErrors() {
188 return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err()
189 }
190
177 // The provider gets an opportunity to customize the proposed new value, 191 // The provider gets an opportunity to customize the proposed new value,
178 // which in turn produces the _planned_ new value. 192 // which in turn produces the _planned_ new value.
179 resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ 193 resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{
@@ -448,8 +462,9 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
448 // must _also_ record the returned change in the active plan, 462 // must _also_ record the returned change in the active plan,
449 // which the expression evaluator will use in preference to this 463 // which the expression evaluator will use in preference to this
450 // incomplete value recorded in the state. 464 // incomplete value recorded in the state.
451 Status: states.ObjectPlanned, 465 Status: states.ObjectPlanned,
452 Value: plannedNewVal, 466 Value: plannedNewVal,
467 Private: plannedPrivate,
453 } 468 }
454 } 469 }
455 470
@@ -517,7 +532,7 @@ func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []h
517 // away any deeper values we already produced at that point. 532 // away any deeper values we already produced at that point.
518 var ignoreTraversal hcl.Traversal 533 var ignoreTraversal hcl.Traversal
519 for i, candidate := range ignoreChangesPath { 534 for i, candidate := range ignoreChangesPath {
520 if reflect.DeepEqual(path, candidate) { 535 if path.Equals(candidate) {
521 ignoreTraversal = ignoreChanges[i] 536 ignoreTraversal = ignoreChanges[i]
522 } 537 }
523 } 538 }
@@ -790,6 +805,7 @@ func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
790 Before: state.Value, 805 Before: state.Value,
791 After: cty.NullVal(cty.DynamicPseudoType), 806 After: cty.NullVal(cty.DynamicPseudoType),
792 }, 807 },
808 Private: state.Private,
793 ProviderAddr: n.ProviderAddr, 809 ProviderAddr: n.ProviderAddr,
794 } 810 }
795 811
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go b/vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go
new file mode 100644
index 0000000..b86bf37
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go
@@ -0,0 +1,85 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/terraform/tfdiags"
8 "github.com/zclconf/go-cty/cty"
9)
10
11// evaluateResourceForEachExpression interprets a "for_each" argument on a resource.
12//
13// Returns a cty.Value map, and diagnostics if necessary. It will return nil if
14// the expression is nil, and is used to distinguish between an unset for_each and an
15// empty map
16func evaluateResourceForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, diags tfdiags.Diagnostics) {
17 forEachMap, known, diags := evaluateResourceForEachExpressionKnown(expr, ctx)
18 if !known {
19 // Attach a diag as we do with count, with the same downsides
20 diags = diags.Append(&hcl.Diagnostic{
21 Severity: hcl.DiagError,
22 Summary: "Invalid forEach argument",
23 Detail: `The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the for_each depends on.`,
24 })
25 }
26 return forEachMap, diags
27}
28
29// evaluateResourceForEachExpressionKnown is like evaluateResourceForEachExpression
30// except that it handles an unknown result by returning an empty map and
31// a known = false, rather than by reporting the unknown value as an error
32// diagnostic.
33func evaluateResourceForEachExpressionKnown(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, known bool, diags tfdiags.Diagnostics) {
34 if expr == nil {
35 return nil, true, nil
36 }
37
38 forEachVal, forEachDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil)
39 diags = diags.Append(forEachDiags)
40 if diags.HasErrors() {
41 return nil, true, diags
42 }
43
44 switch {
45 case forEachVal.IsNull():
46 diags = diags.Append(&hcl.Diagnostic{
47 Severity: hcl.DiagError,
48 Summary: "Invalid for_each argument",
49 Detail: `The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A map, or set of strings is allowed.`,
50 Subject: expr.Range().Ptr(),
51 })
52 return nil, true, diags
53 case !forEachVal.IsKnown():
54 return map[string]cty.Value{}, false, diags
55 }
56
57 if !forEachVal.CanIterateElements() || forEachVal.Type().IsListType() {
58 diags = diags.Append(&hcl.Diagnostic{
59 Severity: hcl.DiagError,
60 Summary: "Invalid for_each argument",
61 Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, or set of strings, and you have provided a value of type %s.`, forEachVal.Type().FriendlyName()),
62 Subject: expr.Range().Ptr(),
63 })
64 return nil, true, diags
65 }
66
67 if forEachVal.Type().IsSetType() {
68 if forEachVal.Type().ElementType() != cty.String {
69 diags = diags.Append(&hcl.Diagnostic{
70 Severity: hcl.DiagError,
71 Summary: "Invalid for_each set argument",
72 Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports maps and sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()),
73 Subject: expr.Range().Ptr(),
74 })
75 return nil, true, diags
76 }
77 }
78
79 // If the map is empty ({}), return an empty map, because cty will return nil when representing {} AsValueMap
80 if forEachVal.LengthInt() == 0 {
81 return map[string]cty.Value{}, true, diags
82 }
83
84 return forEachVal.AsValueMap(), true, nil
85}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
index 34f2d60..4999480 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
@@ -95,7 +95,8 @@ func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) {
95 objTy := schema.ImpliedType() 95 objTy := schema.ImpliedType()
96 priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time 96 priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time
97 97
98 keyData := EvalDataForInstanceKey(n.Addr.Key) 98 forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
99 keyData := EvalDataForInstanceKey(n.Addr.Key, forEach)
99 100
100 var configDiags tfdiags.Diagnostics 101 var configDiags tfdiags.Diagnostics
101 configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) 102 configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData)
@@ -179,6 +180,17 @@ func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) {
179 ) 180 )
180 } 181 }
181 182
183 log.Printf("[TRACE] Re-validating config for %s", absAddr)
184 validateResp := provider.ValidateDataSourceConfig(
185 providers.ValidateDataSourceConfigRequest{
186 TypeName: n.Addr.Resource.Type,
187 Config: configVal,
188 },
189 )
190 if validateResp.Diagnostics.HasErrors() {
191 return nil, validateResp.Diagnostics.InConfigBody(n.Config.Config).Err()
192 }
193
182 // If we get down here then our configuration is complete and we're read 194 // If we get down here then our configuration is complete and we're read
183 // to actually call the provider to read the data. 195 // to actually call the provider to read the data.
184 log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr) 196 log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
index 03bc948..4dfb5b4 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
@@ -55,6 +55,7 @@ func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
55 req := providers.ReadResourceRequest{ 55 req := providers.ReadResourceRequest{
56 TypeName: n.Addr.Resource.Type, 56 TypeName: n.Addr.Resource.Type,
57 PriorState: priorVal, 57 PriorState: priorVal,
58 Private: state.Private,
58 } 59 }
59 60
60 provider := *n.Provider 61 provider := *n.Provider
@@ -87,6 +88,7 @@ func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
87 88
88 newState := state.DeepCopy() 89 newState := state.DeepCopy()
89 newState.Value = resp.NewState 90 newState.Value = resp.NewState
91 newState.Private = resp.Private
90 92
91 // Call post-refresh hook 93 // Call post-refresh hook
92 err = ctx.Hook(func(h Hook) (HookAction, error) { 94 err = ctx.Hook(func(h Hook) (HookAction, error) {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
index d506ce3..b611113 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
@@ -424,15 +424,21 @@ func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) {
424 return nil, diags.Err() 424 return nil, diags.Err()
425 } 425 }
426 426
427 // Currently we ony support NoEach and EachList, because for_each support
428 // is not fully wired up across Terraform. Once for_each support is added,
429 // we'll need to handle that here too, setting states.EachMap if the
430 // assigned expression is a map.
431 eachMode := states.NoEach 427 eachMode := states.NoEach
432 if count >= 0 { // -1 signals "count not set" 428 if count >= 0 { // -1 signals "count not set"
433 eachMode = states.EachList 429 eachMode = states.EachList
434 } 430 }
435 431
432 forEach, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
433 diags = diags.Append(forEachDiags)
434 if forEachDiags.HasErrors() {
435 return nil, diags.Err()
436 }
437
438 if forEach != nil {
439 eachMode = states.EachMap
440 }
441
436 // This method takes care of all of the business logic of updating this 442 // This method takes care of all of the business logic of updating this
437 // while ensuring that any existing instances are preserved, etc. 443 // while ensuring that any existing instances are preserved, etc.
438 state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr) 444 state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
index 0033e01..6b809a2 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
@@ -112,11 +112,12 @@ func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
112// the configuration of a provisioner belonging to a resource. The provisioner 112// the configuration of a provisioner belonging to a resource. The provisioner
113// config is expected to contain the merged connection configurations. 113// config is expected to contain the merged connection configurations.
114type EvalValidateProvisioner struct { 114type EvalValidateProvisioner struct {
115 ResourceAddr addrs.Resource 115 ResourceAddr addrs.Resource
116 Provisioner *provisioners.Interface 116 Provisioner *provisioners.Interface
117 Schema **configschema.Block 117 Schema **configschema.Block
118 Config *configs.Provisioner 118 Config *configs.Provisioner
119 ResourceHasCount bool 119 ResourceHasCount bool
120 ResourceHasForEach bool
120} 121}
121 122
122func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { 123func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
@@ -198,6 +199,19 @@ func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body,
198 // expected type since none of these elements are known at this 199 // expected type since none of these elements are known at this
199 // point anyway. 200 // point anyway.
200 selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0)) 201 selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0))
202 } else if n.ResourceHasForEach {
203 // For a resource that has for_each, we allow each.value and each.key
204 // but don't know at this stage what it will return.
205 keyData = InstanceKeyEvalData{
206 EachKey: cty.UnknownVal(cty.String),
207 EachValue: cty.DynamicVal,
208 }
209
210 // "self" can't point to an unknown key, but we'll force it to be
211 // key "" here, which should return an unknown value of the
212 // expected type since none of these elements are known at
213 // this point anyway.
214 selfAddr = n.ResourceAddr.Instance(addrs.StringKey(""))
201 } 215 }
202 216
203 return ctx.EvaluateBlock(body, schema, selfAddr, keyData) 217 return ctx.EvaluateBlock(body, schema, selfAddr, keyData)
@@ -370,10 +384,21 @@ func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
370 diags = diags.Append(countDiags) 384 diags = diags.Append(countDiags)
371 } 385 }
372 386
387 if n.Config.ForEach != nil {
388 keyData = InstanceKeyEvalData{
389 EachKey: cty.UnknownVal(cty.String),
390 EachValue: cty.UnknownVal(cty.DynamicPseudoType),
391 }
392
393 // Evaluate the for_each expression here so we can expose the diagnostics
394 forEachDiags := n.validateForEach(ctx, n.Config.ForEach)
395 diags = diags.Append(forEachDiags)
396 }
397
373 for _, traversal := range n.Config.DependsOn { 398 for _, traversal := range n.Config.DependsOn {
374 ref, refDiags := addrs.ParseRef(traversal) 399 ref, refDiags := addrs.ParseRef(traversal)
375 diags = diags.Append(refDiags) 400 diags = diags.Append(refDiags)
376 if len(ref.Remaining) != 0 { 401 if !refDiags.HasErrors() && len(ref.Remaining) != 0 {
377 diags = diags.Append(&hcl.Diagnostic{ 402 diags = diags.Append(&hcl.Diagnostic{
378 Severity: hcl.DiagError, 403 Severity: hcl.DiagError,
379 Summary: "Invalid depends_on reference", 404 Summary: "Invalid depends_on reference",
@@ -542,3 +567,18 @@ func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expressio
542 567
543 return diags 568 return diags
544} 569}
570
571func (n *EvalValidateResource) validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) {
572 _, known, forEachDiags := evaluateResourceForEachExpressionKnown(expr, ctx)
573 // If the value isn't known then that's the best we can do for now, but
574 // we'll check more thoroughly during the plan walk
575 if !known {
576 return diags
577 }
578
579 if forEachDiags.HasErrors() {
580 diags = diags.Append(forEachDiags)
581 }
582
583 return diags
584}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
index 68adf76..ea46973 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
@@ -12,6 +12,7 @@ import (
12 "github.com/hashicorp/terraform/addrs" 12 "github.com/hashicorp/terraform/addrs"
13 13
14 "github.com/hashicorp/terraform/config" 14 "github.com/hashicorp/terraform/config"
15 "github.com/hashicorp/terraform/config/hcl2shim"
15 "github.com/hashicorp/terraform/config/module" 16 "github.com/hashicorp/terraform/config/module"
16 "github.com/zclconf/go-cty/cty" 17 "github.com/zclconf/go-cty/cty"
17 "github.com/zclconf/go-cty/cty/convert" 18 "github.com/zclconf/go-cty/cty/convert"
@@ -60,7 +61,7 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
60 continue 61 continue
61 } 62 }
62 63
63 if proposedValue == config.UnknownVariableValue { 64 if proposedValue == hcl2shim.UnknownVariableValue {
64 continue 65 continue
65 } 66 }
66 67
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go
index ab65d47..9bb6009 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/evaluate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go
@@ -120,20 +120,24 @@ type InstanceKeyEvalData struct {
120 120
121// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for 121// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for
122// evaluating in a context that has the given instance key. 122// evaluating in a context that has the given instance key.
123func EvalDataForInstanceKey(key addrs.InstanceKey) InstanceKeyEvalData { 123func EvalDataForInstanceKey(key addrs.InstanceKey, forEachMap map[string]cty.Value) InstanceKeyEvalData {
124 // At the moment we don't actually implement for_each, so we only
125 // ever populate CountIndex.
126 // (When we implement for_each later we may need to reorganize this some,
127 // so that we can resolve the ambiguity that an int key may either be
128 // a count.index or an each.key where for_each is over a list.)
129
130 var countIdx cty.Value 124 var countIdx cty.Value
125 var eachKey cty.Value
126 var eachVal cty.Value
127
131 if intKey, ok := key.(addrs.IntKey); ok { 128 if intKey, ok := key.(addrs.IntKey); ok {
132 countIdx = cty.NumberIntVal(int64(intKey)) 129 countIdx = cty.NumberIntVal(int64(intKey))
133 } 130 }
134 131
132 if stringKey, ok := key.(addrs.StringKey); ok {
133 eachKey = cty.StringVal(string(stringKey))
134 eachVal = forEachMap[string(stringKey)]
135 }
136
135 return InstanceKeyEvalData{ 137 return InstanceKeyEvalData{
136 CountIndex: countIdx, 138 CountIndex: countIdx,
139 EachKey: eachKey,
140 EachValue: eachVal,
137 } 141 }
138} 142}
139 143
@@ -173,6 +177,37 @@ func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.Sou
173 } 177 }
174} 178}
175 179
180func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
181 var diags tfdiags.Diagnostics
182 var returnVal cty.Value
183 switch addr.Name {
184
185 case "key":
186 returnVal = d.InstanceKeyData.EachKey
187 case "value":
188 returnVal = d.InstanceKeyData.EachValue
189 default:
190 diags = diags.Append(&hcl.Diagnostic{
191 Severity: hcl.DiagError,
192 Summary: `Invalid "each" attribute`,
193 Detail: fmt.Sprintf(`The "each" object does not have an attribute named %q. The supported attributes are each.key and each.value, the current key and value pair of the "for_each" attribute set.`, addr.Name),
194 Subject: rng.ToHCL().Ptr(),
195 })
196 return cty.DynamicVal, diags
197 }
198
199 if returnVal == cty.NilVal {
200 diags = diags.Append(&hcl.Diagnostic{
201 Severity: hcl.DiagError,
202 Summary: `Reference to "each" in context without for_each`,
203 Detail: fmt.Sprintf(`The "each" object can be used only in "resource" blocks, and only when the "for_each" argument is set.`),
204 Subject: rng.ToHCL().Ptr(),
205 })
206 return cty.UnknownVal(cty.DynamicPseudoType), diags
207 }
208 return returnVal, diags
209}
210
176func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { 211func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
177 var diags tfdiags.Diagnostics 212 var diags tfdiags.Diagnostics
178 213
@@ -569,7 +604,7 @@ func (d *evaluationStateData) GetResourceInstance(addr addrs.ResourceInstance, r
569 } 604 }
570 case states.EachMap: 605 case states.EachMap:
571 multi = key == addrs.NoKey 606 multi = key == addrs.NoKey
572 if _, ok := addr.Key.(addrs.IntKey); !multi && !ok { 607 if _, ok := addr.Key.(addrs.StringKey); !multi && !ok {
573 diags = diags.Append(&hcl.Diagnostic{ 608 diags = diags.Append(&hcl.Diagnostic{
574 Severity: hcl.DiagError, 609 Severity: hcl.DiagError,
575 Summary: "Invalid resource index", 610 Summary: "Invalid resource index",
@@ -696,7 +731,7 @@ func (d *evaluationStateData) getResourceInstancesAll(addr addrs.Resource, rng t
696 ty := schema.ImpliedType() 731 ty := schema.ImpliedType()
697 key := addrs.IntKey(i) 732 key := addrs.IntKey(i)
698 is, exists := rs.Instances[key] 733 is, exists := rs.Instances[key]
699 if exists { 734 if exists && is.Current != nil {
700 instAddr := addr.Instance(key).Absolute(d.ModulePath) 735 instAddr := addr.Instance(key).Absolute(d.ModulePath)
701 736
702 // Prefer pending value in plan if present. See getResourceInstanceSingle 737 // Prefer pending value in plan if present. See getResourceInstanceSingle
diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
index 26c1857..97bb1f6 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
@@ -11,16 +11,11 @@ import (
11 "github.com/hashicorp/hil" 11 "github.com/hashicorp/hil"
12 "github.com/hashicorp/hil/ast" 12 "github.com/hashicorp/hil/ast"
13 "github.com/hashicorp/terraform/config" 13 "github.com/hashicorp/terraform/config"
14 "github.com/hashicorp/terraform/config/hcl2shim"
14 "github.com/hashicorp/terraform/config/module" 15 "github.com/hashicorp/terraform/config/module"
15 "github.com/hashicorp/terraform/flatmap" 16 "github.com/hashicorp/terraform/flatmap"
16) 17)
17 18
18const (
19 // VarEnvPrefix is the prefix of variables that are read from
20 // the environment to set variables here.
21 VarEnvPrefix = "TF_VAR_"
22)
23
24// Interpolater is the structure responsible for determining the values 19// Interpolater is the structure responsible for determining the values
25// for interpolations such as `aws_instance.foo.bar`. 20// for interpolations such as `aws_instance.foo.bar`.
26type Interpolater struct { 21type Interpolater struct {
@@ -71,7 +66,7 @@ func (i *Interpolater) valueCountVar(
71func unknownVariable() ast.Variable { 66func unknownVariable() ast.Variable {
72 return ast.Variable{ 67 return ast.Variable{
73 Type: ast.TypeUnknown, 68 Type: ast.TypeUnknown,
74 Value: config.UnknownVariableValue, 69 Value: hcl2shim.UnknownVariableValue,
75 } 70 }
76} 71}
77 72
@@ -659,7 +654,7 @@ func (i *Interpolater) interpolateComplexTypeAttribute(
659 // ".#" count field is marked as unknown to indicate "this whole list is 654 // ".#" count field is marked as unknown to indicate "this whole list is
660 // unknown". We must honor that meaning here so computed references can be 655 // unknown". We must honor that meaning here so computed references can be
661 // treated properly during the plan phase. 656 // treated properly during the plan phase.
662 if lengthAttr == config.UnknownVariableValue { 657 if lengthAttr == hcl2shim.UnknownVariableValue {
663 return unknownVariable(), nil 658 return unknownVariable(), nil
664 } 659 }
665 660
@@ -675,7 +670,7 @@ func (i *Interpolater) interpolateComplexTypeAttribute(
675 // ".%" count field is marked as unknown to indicate "this whole list is 670 // ".%" count field is marked as unknown to indicate "this whole list is
676 // unknown". We must honor that meaning here so computed references can be 671 // unknown". We must honor that meaning here so computed references can be
677 // treated properly during the plan phase. 672 // treated properly during the plan phase.
678 if lengthAttr == config.UnknownVariableValue { 673 if lengthAttr == hcl2shim.UnknownVariableValue {
679 return unknownVariable(), nil 674 return unknownVariable(), nil
680 } 675 }
681 676
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
index ab82163..dd92866 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
@@ -38,6 +38,16 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
38 return nil, nil 38 return nil, nil
39 } 39 }
40 40
41 forEachMap, forEachKnown, forEachDiags := evaluateResourceForEachExpressionKnown(n.Config.ForEach, ctx)
42 if forEachDiags.HasErrors() {
43 return nil, diags.Err()
44 }
45 if !forEachKnown {
46 // If the for_each isn't known yet, we'll skip refreshing and try expansion
47 // again during the plan walk.
48 return nil, nil
49 }
50
41 // Next we need to potentially rename an instance address in the state 51 // Next we need to potentially rename an instance address in the state
42 // if we're transitioning whether "count" is set at all. 52 // if we're transitioning whether "count" is set at all.
43 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) 53 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
@@ -77,6 +87,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
77 Concrete: concreteResource, 87 Concrete: concreteResource,
78 Schema: n.Schema, 88 Schema: n.Schema,
79 Count: count, 89 Count: count,
90 ForEach: forEachMap,
80 Addr: n.ResourceAddr(), 91 Addr: n.ResourceAddr(),
81 }, 92 },
82 93
@@ -85,6 +96,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
85 &OrphanResourceCountTransformer{ 96 &OrphanResourceCountTransformer{
86 Concrete: concreteResourceDestroyable, 97 Concrete: concreteResourceDestroyable,
87 Count: count, 98 Count: count,
99 ForEach: forEachMap,
88 Addr: n.ResourceAddr(), 100 Addr: n.ResourceAddr(),
89 State: state, 101 State: state,
90 }, 102 },
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
index 3a0570c..d147b42 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
@@ -187,6 +187,8 @@ func (n *NodeAbstractResource) References() []*addrs.Reference {
187 187
188 refs, _ := lang.ReferencesInExpr(c.Count) 188 refs, _ := lang.ReferencesInExpr(c.Count)
189 result = append(result, refs...) 189 result = append(result, refs...)
190 refs, _ = lang.ReferencesInExpr(c.ForEach)
191 result = append(result, refs...)
190 refs, _ = lang.ReferencesInBlock(c.Config, n.Schema) 192 refs, _ = lang.ReferencesInBlock(c.Config, n.Schema)
191 result = append(result, refs...) 193 result = append(result, refs...)
192 if c.Managed != nil { 194 if c.Managed != nil {
@@ -238,21 +240,31 @@ func (n *NodeAbstractResourceInstance) References() []*addrs.Reference {
238 // need to do a little work here to massage this to the form we now 240 // need to do a little work here to massage this to the form we now
239 // want. 241 // want.
240 var result []*addrs.Reference 242 var result []*addrs.Reference
241 for _, addr := range s.Current.Dependencies {
242 if addr == nil {
243 // Should never happen; indicates a bug in the state loader
244 panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr()))
245 }
246 243
247 // This is a little weird: we need to manufacture an addrs.Reference 244 // It is (apparently) possible for s.Current to be nil. This proved
248 // with a fake range here because the state isn't something we can 245 // difficult to reproduce, so we will fix the symptom here and hope
249 // make source references into. 246 // to find the root cause another time.
250 result = append(result, &addrs.Reference{ 247 //
251 Subject: addr, 248 // https://github.com/hashicorp/terraform/issues/21407
252 SourceRange: tfdiags.SourceRange{ 249 if s.Current == nil {
253 Filename: "(state file)", 250 log.Printf("[WARN] no current state found for %s", n.Name())
254 }, 251 } else {
255 }) 252 for _, addr := range s.Current.Dependencies {
253 if addr == nil {
254 // Should never happen; indicates a bug in the state loader
255 panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr()))
256 }
257
258 // This is a little weird: we need to manufacture an addrs.Reference
259 // with a fake range here because the state isn't something we can
260 // make source references into.
261 result = append(result, &addrs.Reference{
262 Subject: addr,
263 SourceRange: tfdiags.SourceRange{
264 Filename: "(state file)",
265 },
266 })
267 }
256 } 268 }
257 return result 269 return result
258 } 270 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go
index dad7bfc..d795324 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go
@@ -101,13 +101,6 @@ func (n *NodeApplyableResourceInstance) References() []*addrs.Reference {
101func (n *NodeApplyableResourceInstance) EvalTree() EvalNode { 101func (n *NodeApplyableResourceInstance) EvalTree() EvalNode {
102 addr := n.ResourceInstanceAddr() 102 addr := n.ResourceInstanceAddr()
103 103
104 // State still uses legacy-style internal ids, so we need to shim to get
105 // a suitable key to use.
106 stateId := NewLegacyResourceInstanceAddress(addr).stateId()
107
108 // Determine the dependencies for the state.
109 stateDeps := n.StateReferences()
110
111 if n.Config == nil { 104 if n.Config == nil {
112 // This should not be possible, but we've got here in at least one 105 // This should not be possible, but we've got here in at least one
113 // case as discussed in the following issue: 106 // case as discussed in the following issue:
@@ -132,15 +125,15 @@ func (n *NodeApplyableResourceInstance) EvalTree() EvalNode {
132 // Eval info is different depending on what kind of resource this is 125 // Eval info is different depending on what kind of resource this is
133 switch n.Config.Mode { 126 switch n.Config.Mode {
134 case addrs.ManagedResourceMode: 127 case addrs.ManagedResourceMode:
135 return n.evalTreeManagedResource(addr, stateId, stateDeps) 128 return n.evalTreeManagedResource(addr)
136 case addrs.DataResourceMode: 129 case addrs.DataResourceMode:
137 return n.evalTreeDataResource(addr, stateId, stateDeps) 130 return n.evalTreeDataResource(addr)
138 default: 131 default:
139 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) 132 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
140 } 133 }
141} 134}
142 135
143func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { 136func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode {
144 var provider providers.Interface 137 var provider providers.Interface
145 var providerSchema *ProviderSchema 138 var providerSchema *ProviderSchema
146 var change *plans.ResourceInstanceChange 139 var change *plans.ResourceInstanceChange
@@ -206,7 +199,7 @@ func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResou
206 } 199 }
207} 200}
208 201
209func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { 202func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode {
210 // Declare a bunch of variables that are used for state during 203 // Declare a bunch of variables that are used for state during
211 // evaluation. Most of this are written to by-address below. 204 // evaluation. Most of this are written to by-address below.
212 var provider providers.Interface 205 var provider providers.Interface
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
index 633c1c4..ec4aa93 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
@@ -77,6 +77,11 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
77 return nil, diags.Err() 77 return nil, diags.Err()
78 } 78 }
79 79
80 forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
81 if forEachDiags.HasErrors() {
82 return nil, diags.Err()
83 }
84
80 // Next we need to potentially rename an instance address in the state 85 // Next we need to potentially rename an instance address in the state
81 // if we're transitioning whether "count" is set at all. 86 // if we're transitioning whether "count" is set at all.
82 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) 87 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
@@ -119,18 +124,20 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
119 124
120 // Start creating the steps 125 // Start creating the steps
121 steps := []GraphTransformer{ 126 steps := []GraphTransformer{
122 // Expand the count. 127 // Expand the count or for_each (if present)
123 &ResourceCountTransformer{ 128 &ResourceCountTransformer{
124 Concrete: concreteResource, 129 Concrete: concreteResource,
125 Schema: n.Schema, 130 Schema: n.Schema,
126 Count: count, 131 Count: count,
132 ForEach: forEachMap,
127 Addr: n.ResourceAddr(), 133 Addr: n.ResourceAddr(),
128 }, 134 },
129 135
130 // Add the count orphans 136 // Add the count/for_each orphans
131 &OrphanResourceCountTransformer{ 137 &OrphanResourceCountTransformer{
132 Concrete: concreteResourceOrphan, 138 Concrete: concreteResourceOrphan,
133 Count: count, 139 Count: count,
140 ForEach: forEachMap,
134 Addr: n.ResourceAddr(), 141 Addr: n.ResourceAddr(),
135 State: state, 142 State: state,
136 }, 143 },
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
index 75e0bcd..0f74bbe 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
@@ -34,25 +34,18 @@ var (
34func (n *NodePlannableResourceInstance) EvalTree() EvalNode { 34func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
35 addr := n.ResourceInstanceAddr() 35 addr := n.ResourceInstanceAddr()
36 36
37 // State still uses legacy-style internal ids, so we need to shim to get
38 // a suitable key to use.
39 stateId := NewLegacyResourceInstanceAddress(addr).stateId()
40
41 // Determine the dependencies for the state.
42 stateDeps := n.StateReferences()
43
44 // Eval info is different depending on what kind of resource this is 37 // Eval info is different depending on what kind of resource this is
45 switch addr.Resource.Resource.Mode { 38 switch addr.Resource.Resource.Mode {
46 case addrs.ManagedResourceMode: 39 case addrs.ManagedResourceMode:
47 return n.evalTreeManagedResource(addr, stateId, stateDeps) 40 return n.evalTreeManagedResource(addr)
48 case addrs.DataResourceMode: 41 case addrs.DataResourceMode:
49 return n.evalTreeDataResource(addr, stateId, stateDeps) 42 return n.evalTreeDataResource(addr)
50 default: 43 default:
51 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) 44 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
52 } 45 }
53} 46}
54 47
55func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { 48func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode {
56 config := n.Config 49 config := n.Config
57 var provider providers.Interface 50 var provider providers.Interface
58 var providerSchema *ProviderSchema 51 var providerSchema *ProviderSchema
@@ -147,7 +140,7 @@ func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResou
147 } 140 }
148} 141}
149 142
150func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { 143func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode {
151 config := n.Config 144 config := n.Config
152 var provider providers.Interface 145 var provider providers.Interface
153 var providerSchema *ProviderSchema 146 var providerSchema *ProviderSchema
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
index 9506023..9daeabf 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
@@ -39,6 +39,11 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
39 return nil, diags.Err() 39 return nil, diags.Err()
40 } 40 }
41 41
42 forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
43 if forEachDiags.HasErrors() {
44 return nil, diags.Err()
45 }
46
42 // Next we need to potentially rename an instance address in the state 47 // Next we need to potentially rename an instance address in the state
43 // if we're transitioning whether "count" is set at all. 48 // if we're transitioning whether "count" is set at all.
44 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) 49 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
@@ -66,6 +71,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
66 Concrete: concreteResource, 71 Concrete: concreteResource,
67 Schema: n.Schema, 72 Schema: n.Schema,
68 Count: count, 73 Count: count,
74 ForEach: forEachMap,
69 Addr: n.ResourceAddr(), 75 Addr: n.ResourceAddr(),
70 }, 76 },
71 77
@@ -74,6 +80,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
74 &OrphanResourceCountTransformer{ 80 &OrphanResourceCountTransformer{
75 Concrete: concreteResource, 81 Concrete: concreteResource,
76 Count: count, 82 Count: count,
83 ForEach: forEachMap,
77 Addr: n.ResourceAddr(), 84 Addr: n.ResourceAddr(),
78 State: state, 85 State: state,
79 }, 86 },
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
index 734ec9e..efa657b 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
@@ -54,6 +54,7 @@ func (n *NodeValidatableResource) EvalTree() EvalNode {
54 54
55 if managed := n.Config.Managed; managed != nil { 55 if managed := n.Config.Managed; managed != nil {
56 hasCount := n.Config.Count != nil 56 hasCount := n.Config.Count != nil
57 hasForEach := n.Config.ForEach != nil
57 58
58 // Validate all the provisioners 59 // Validate all the provisioners
59 for _, p := range managed.Provisioners { 60 for _, p := range managed.Provisioners {
@@ -74,11 +75,12 @@ func (n *NodeValidatableResource) EvalTree() EvalNode {
74 Schema: &provisionerSchema, 75 Schema: &provisionerSchema,
75 }, 76 },
76 &EvalValidateProvisioner{ 77 &EvalValidateProvisioner{
77 ResourceAddr: addr.Resource, 78 ResourceAddr: addr.Resource,
78 Provisioner: &provisioner, 79 Provisioner: &provisioner,
79 Schema: &provisionerSchema, 80 Schema: &provisionerSchema,
80 Config: p, 81 Config: p,
81 ResourceHasCount: hasCount, 82 ResourceHasCount: hasCount,
83 ResourceHasForEach: hasForEach,
82 }, 84 },
83 ) 85 )
84 } 86 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go
index 4ae346d..8eede48 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go
@@ -8,7 +8,6 @@ import (
8 "github.com/zclconf/go-cty/cty" 8 "github.com/zclconf/go-cty/cty"
9 ctyjson "github.com/zclconf/go-cty/cty/json" 9 ctyjson "github.com/zclconf/go-cty/cty/json"
10 10
11 "github.com/hashicorp/terraform/config"
12 "github.com/hashicorp/terraform/config/hcl2shim" 11 "github.com/hashicorp/terraform/config/hcl2shim"
13 "github.com/hashicorp/terraform/providers" 12 "github.com/hashicorp/terraform/providers"
14 "github.com/hashicorp/terraform/tfdiags" 13 "github.com/hashicorp/terraform/tfdiags"
@@ -391,7 +390,7 @@ func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeReques
391 for k, new := range plannedMap { 390 for k, new := range plannedMap {
392 old := priorMap[k] 391 old := priorMap[k]
393 newComputed := false 392 newComputed := false
394 if new == config.UnknownVariableValue { 393 if new == hcl2shim.UnknownVariableValue {
395 new = "" 394 new = ""
396 newComputed = true 395 newComputed = true
397 } 396 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
index 156ecf5..5d8261a 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
@@ -365,6 +365,8 @@ func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceA
365 ret.Index = -1 365 ret.Index = -1
366 } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok { 366 } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok {
367 ret.Index = int(ik) 367 ret.Index = int(ik)
368 } else if _, ok := addr.Resource.Key.(addrs.StringKey); ok {
369 ret.Index = -1
368 } else { 370 } else {
369 panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key)) 371 panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key))
370 } 372 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go
index 092b690..6280fb0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/state.go
@@ -1201,7 +1201,7 @@ func (m *ModuleState) prune() {
1201 } 1201 }
1202 1202
1203 for k, v := range m.Outputs { 1203 for k, v := range m.Outputs {
1204 if v.Value == config.UnknownVariableValue { 1204 if v.Value == hcl2shim.UnknownVariableValue {
1205 delete(m.Outputs, k) 1205 delete(m.Outputs, k)
1206 } 1206 }
1207 } 1207 }
@@ -1827,7 +1827,7 @@ func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState {
1827 continue 1827 continue
1828 } 1828 }
1829 if diff.NewComputed { 1829 if diff.NewComputed {
1830 result.Attributes[k] = config.UnknownVariableValue 1830 result.Attributes[k] = hcl2shim.UnknownVariableValue
1831 continue 1831 continue
1832 } 1832 }
1833 1833
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
index eec762e..4f323a7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
@@ -6,6 +6,7 @@ import (
6 "github.com/hashicorp/terraform/addrs" 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/states" 8 "github.com/hashicorp/terraform/states"
9 "github.com/zclconf/go-cty/cty"
9) 10)
10 11
11// OrphanResourceCountTransformer is a GraphTransformer that adds orphans 12// OrphanResourceCountTransformer is a GraphTransformer that adds orphans
@@ -18,9 +19,10 @@ import (
18type OrphanResourceCountTransformer struct { 19type OrphanResourceCountTransformer struct {
19 Concrete ConcreteResourceInstanceNodeFunc 20 Concrete ConcreteResourceInstanceNodeFunc
20 21
21 Count int // Actual count of the resource, or -1 if count is not set at all 22 Count int // Actual count of the resource, or -1 if count is not set at all
22 Addr addrs.AbsResource // Addr of the resource to look for orphans 23 ForEach map[string]cty.Value // The ForEach map on the resource
23 State *states.State // Full global state 24 Addr addrs.AbsResource // Addr of the resource to look for orphans
25 State *states.State // Full global state
24} 26}
25 27
26func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { 28func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
@@ -34,6 +36,10 @@ func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
34 haveKeys[key] = struct{}{} 36 haveKeys[key] = struct{}{}
35 } 37 }
36 38
39 // if for_each is set, use that transformer
40 if t.ForEach != nil {
41 return t.transformForEach(haveKeys, g)
42 }
37 if t.Count < 0 { 43 if t.Count < 0 {
38 return t.transformNoCount(haveKeys, g) 44 return t.transformNoCount(haveKeys, g)
39 } 45 }
@@ -43,6 +49,25 @@ func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
43 return t.transformCount(haveKeys, g) 49 return t.transformCount(haveKeys, g)
44} 50}
45 51
52func (t *OrphanResourceCountTransformer) transformForEach(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error {
53 for key := range haveKeys {
54 s, _ := key.(addrs.StringKey)
55 // If the key is present in our current for_each, carry on
56 if _, ok := t.ForEach[string(s)]; ok {
57 continue
58 }
59
60 abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key))
61 var node dag.Vertex = abstract
62 if f := t.Concrete; f != nil {
63 node = f(abstract)
64 }
65 log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node)
66 g.Add(node)
67 }
68 return nil
69}
70
46func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { 71func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error {
47 // Due to the logic in Transform, we only get in here if our count is 72 // Due to the logic in Transform, we only get in here if our count is
48 // at least one. 73 // at least one.
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
index 1123790..c70a3c1 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
@@ -4,6 +4,7 @@ import (
4 "github.com/hashicorp/terraform/addrs" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs/configschema" 5 "github.com/hashicorp/terraform/configs/configschema"
6 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7 "github.com/zclconf/go-cty/cty"
7) 8)
8 9
9// ResourceCountTransformer is a GraphTransformer that expands the count 10// ResourceCountTransformer is a GraphTransformer that expands the count
@@ -17,12 +18,13 @@ type ResourceCountTransformer struct {
17 // Count is either the number of indexed instances to create, or -1 to 18 // Count is either the number of indexed instances to create, or -1 to
18 // indicate that count is not set at all and thus a no-key instance should 19 // indicate that count is not set at all and thus a no-key instance should
19 // be created. 20 // be created.
20 Count int 21 Count int
21 Addr addrs.AbsResource 22 ForEach map[string]cty.Value
23 Addr addrs.AbsResource
22} 24}
23 25
24func (t *ResourceCountTransformer) Transform(g *Graph) error { 26func (t *ResourceCountTransformer) Transform(g *Graph) error {
25 if t.Count < 0 { 27 if t.Count < 0 && t.ForEach == nil {
26 // Negative count indicates that count is not set at all. 28 // Negative count indicates that count is not set at all.
27 addr := t.Addr.Instance(addrs.NoKey) 29 addr := t.Addr.Instance(addrs.NoKey)
28 30
@@ -37,6 +39,19 @@ func (t *ResourceCountTransformer) Transform(g *Graph) error {
37 return nil 39 return nil
38 } 40 }
39 41
42 // Add nodes related to the for_each expression
43 for key := range t.ForEach {
44 addr := t.Addr.Instance(addrs.StringKey(key))
45 abstract := NewNodeAbstractResourceInstance(addr)
46 abstract.Schema = t.Schema
47 var node dag.Vertex = abstract
48 if f := t.Concrete; f != nil {
49 node = f(abstract)
50 }
51
52 g.Add(node)
53 }
54
40 // For each count, build and add the node 55 // For each count, build and add the node
41 for i := 0; i < t.Count; i++ { 56 for i := 0; i < t.Count; i++ {
42 key := addrs.IntKey(i) 57 key := addrs.IntKey(i)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go
index 752241a..5428cd5 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/util.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/util.go
@@ -2,8 +2,6 @@ package terraform
2 2
3import ( 3import (
4 "sort" 4 "sort"
5
6 "github.com/hashicorp/terraform/config"
7) 5)
8 6
9// Semaphore is a wrapper around a channel to provide 7// Semaphore is a wrapper around a channel to provide
@@ -48,10 +46,6 @@ func (s Semaphore) Release() {
48 } 46 }
49} 47}
50 48
51func resourceProvider(resourceType, explicitProvider string) string {
52 return config.ResourceProviderFullName(resourceType, explicitProvider)
53}
54
55// strSliceContains checks if a given string is contained in a slice 49// strSliceContains checks if a given string is contained in a slice
56// When anybody asks why Go needs generics, here you go. 50// When anybody asks why Go needs generics, here you go.
57func strSliceContains(haystack []string, needle string) bool { 51func strSliceContains(haystack []string, needle string) bool {
diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go
index 30d7284..c30595c 100644
--- a/vendor/github.com/hashicorp/terraform/version/version.go
+++ b/vendor/github.com/hashicorp/terraform/version/version.go
@@ -11,7 +11,7 @@ import (
11) 11)
12 12
13// The main version number that is being run at the moment. 13// The main version number that is being run at the moment.
14var Version = "0.12.0" 14var Version = "0.12.6"
15 15
16// A pre-release marker for the version. If this is "" (empty string) 16// A pre-release marker for the version. If this is "" (empty string)
17// then it means that it is a final release. Otherwise, this is a pre-release 17// then it means that it is a final release. Otherwise, this is a pre-release
diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml
new file mode 100644
index 0000000..20dd53b
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/.travis.yml
@@ -0,0 +1,23 @@
1language: go
2sudo: false
3go:
4 - 1.2
5 - 1.3
6 - 1.4
7 - 1.5
8 - 1.6
9 - 1.7
10 - 1.8
11 - 1.9
12 - tip
13matrix:
14 allow_failures:
15 - go: tip
16 fast_finish: true
17before_install:
18 - go get github.com/mattn/goveralls
19 - go get golang.org/x/tools/cmd/cover
20script:
21 - $HOME/gopath/bin/goveralls -service=travis-ci
22notifications:
23 email: false
diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE
new file mode 100644
index 0000000..926d549
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/LICENSE
@@ -0,0 +1,20 @@
1Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
2
3Permission is hereby granted, free of charge, to any person obtaining
4a copy of this software and associated documentation files (the
5"Software"), to deal in the Software without restriction, including
6without limitation the rights to use, copy, modify, merge, publish,
7distribute, sublicense, and/or sell copies of the Software, and to
8permit persons to whom the Software is furnished to do so, subject to
9the following conditions:
10
11The above copyright notice and this permission notice shall be
12included in all copies or substantial portions of the Software.
13
14THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md
new file mode 100644
index 0000000..7b1a722
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/README.md
@@ -0,0 +1,65 @@
1# UUID package for Go language
2
3[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid)
4[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid)
5[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid)
6
7This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs.
8
9With 100% test coverage and benchmarks out of box.
10
11Supported versions:
12* Version 1, based on timestamp and MAC address (RFC 4122)
13* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1)
14* Version 3, based on MD5 hashing (RFC 4122)
15* Version 4, based on random numbers (RFC 4122)
16* Version 5, based on SHA-1 hashing (RFC 4122)
17
18## Installation
19
20Use the `go` command:
21
22 $ go get github.com/satori/go.uuid
23
24## Requirements
25
26UUID package requires Go >= 1.2.
27
28## Example
29
30```go
31package main
32
33import (
34 "fmt"
35 "github.com/satori/go.uuid"
36)
37
38func main() {
39 // Creating UUID Version 4
40 u1 := uuid.NewV4()
41 fmt.Printf("UUIDv4: %s\n", u1)
42
43 // Parsing UUID from string input
44 u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
45 if err != nil {
46 fmt.Printf("Something gone wrong: %s", err)
47 }
48 fmt.Printf("Successfully parsed: %s", u2)
49}
50```
51
52## Documentation
53
54[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project.
55
56## Links
57* [RFC 4122](http://tools.ietf.org/html/rfc4122)
58* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
59
60## Copyright
61
62Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>.
63
64UUID package released under MIT License.
65See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go
new file mode 100644
index 0000000..656892c
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/codec.go
@@ -0,0 +1,206 @@
1// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
2//
3// Permission is hereby granted, free of charge, to any person obtaining
4// a copy of this software and associated documentation files (the
5// "Software"), to deal in the Software without restriction, including
6// without limitation the rights to use, copy, modify, merge, publish,
7// distribute, sublicense, and/or sell copies of the Software, and to
8// permit persons to whom the Software is furnished to do so, subject to
9// the following conditions:
10//
11// The above copyright notice and this permission notice shall be
12// included in all copies or substantial portions of the Software.
13//
14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
22package uuid
23
24import (
25 "bytes"
26 "encoding/hex"
27 "fmt"
28)
29
30// FromBytes returns UUID converted from raw byte slice input.
31// It will return error if the slice isn't 16 bytes long.
32func FromBytes(input []byte) (u UUID, err error) {
33 err = u.UnmarshalBinary(input)
34 return
35}
36
37// FromBytesOrNil returns UUID converted from raw byte slice input.
38// Same behavior as FromBytes, but returns a Nil UUID on error.
39func FromBytesOrNil(input []byte) UUID {
40 uuid, err := FromBytes(input)
41 if err != nil {
42 return Nil
43 }
44 return uuid
45}
46
47// FromString returns UUID parsed from string input.
48// Input is expected in a form accepted by UnmarshalText.
49func FromString(input string) (u UUID, err error) {
50 err = u.UnmarshalText([]byte(input))
51 return
52}
53
54// FromStringOrNil returns UUID parsed from string input.
55// Same behavior as FromString, but returns a Nil UUID on error.
56func FromStringOrNil(input string) UUID {
57 uuid, err := FromString(input)
58 if err != nil {
59 return Nil
60 }
61 return uuid
62}
63
64// MarshalText implements the encoding.TextMarshaler interface.
65// The encoding is the same as returned by String.
66func (u UUID) MarshalText() (text []byte, err error) {
67 text = []byte(u.String())
68 return
69}
70
71// UnmarshalText implements the encoding.TextUnmarshaler interface.
72// Following formats are supported:
73// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
74// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
75// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
76// "6ba7b8109dad11d180b400c04fd430c8"
77// ABNF for supported UUID text representation follows:
78// uuid := canonical | hashlike | braced | urn
79// plain := canonical | hashlike
80// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct
81// hashlike := 12hexoct
82// braced := '{' plain '}'
83// urn := URN ':' UUID-NID ':' plain
84// URN := 'urn'
85// UUID-NID := 'uuid'
86// 12hexoct := 6hexoct 6hexoct
87// 6hexoct := 4hexoct 2hexoct
88// 4hexoct := 2hexoct 2hexoct
89// 2hexoct := hexoct hexoct
90// hexoct := hexdig hexdig
91// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' |
92// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' |
93// 'A' | 'B' | 'C' | 'D' | 'E' | 'F'
94func (u *UUID) UnmarshalText(text []byte) (err error) {
95 switch len(text) {
96 case 32:
97 return u.decodeHashLike(text)
98 case 36:
99 return u.decodeCanonical(text)
100 case 38:
101 return u.decodeBraced(text)
102 case 41:
103 fallthrough
104 case 45:
105 return u.decodeURN(text)
106 default:
107 return fmt.Errorf("uuid: incorrect UUID length: %s", text)
108 }
109}
110
111// decodeCanonical decodes UUID string in format
112// "6ba7b810-9dad-11d1-80b4-00c04fd430c8".
113func (u *UUID) decodeCanonical(t []byte) (err error) {
114 if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' {
115 return fmt.Errorf("uuid: incorrect UUID format %s", t)
116 }
117
118 src := t[:]
119 dst := u[:]
120
121 for i, byteGroup := range byteGroups {
122 if i > 0 {
123 src = src[1:] // skip dash
124 }
125 _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup])
126 if err != nil {
127 return
128 }
129 src = src[byteGroup:]
130 dst = dst[byteGroup/2:]
131 }
132
133 return
134}
135
136// decodeHashLike decodes UUID string in format
137// "6ba7b8109dad11d180b400c04fd430c8".
138func (u *UUID) decodeHashLike(t []byte) (err error) {
139 src := t[:]
140 dst := u[:]
141
142 if _, err = hex.Decode(dst, src); err != nil {
143 return err
144 }
145 return
146}
147
148// decodeBraced decodes UUID string in format
149// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format
150// "{6ba7b8109dad11d180b400c04fd430c8}".
151func (u *UUID) decodeBraced(t []byte) (err error) {
152 l := len(t)
153
154 if t[0] != '{' || t[l-1] != '}' {
155 return fmt.Errorf("uuid: incorrect UUID format %s", t)
156 }
157
158 return u.decodePlain(t[1 : l-1])
159}
160
161// decodeURN decodes UUID string in format
162// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format
163// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8".
164func (u *UUID) decodeURN(t []byte) (err error) {
165 total := len(t)
166
167 urn_uuid_prefix := t[:9]
168
169 if !bytes.Equal(urn_uuid_prefix, urnPrefix) {
170 return fmt.Errorf("uuid: incorrect UUID format: %s", t)
171 }
172
173 return u.decodePlain(t[9:total])
174}
175
176// decodePlain decodes UUID string in canonical format
177// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format
178// "6ba7b8109dad11d180b400c04fd430c8".
179func (u *UUID) decodePlain(t []byte) (err error) {
180 switch len(t) {
181 case 32:
182 return u.decodeHashLike(t)
183 case 36:
184 return u.decodeCanonical(t)
185 default:
186 return fmt.Errorf("uuid: incorrrect UUID length: %s", t)
187 }
188}
189
190// MarshalBinary implements the encoding.BinaryMarshaler interface.
191func (u UUID) MarshalBinary() (data []byte, err error) {
192 data = u.Bytes()
193 return
194}
195
196// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
197// It will return error if the slice isn't 16 bytes long.
198func (u *UUID) UnmarshalBinary(data []byte) (err error) {
199 if len(data) != Size {
200 err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
201 return
202 }
203 copy(u[:], data)
204
205 return
206}
diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go
new file mode 100644
index 0000000..3f2f1da
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/generator.go
@@ -0,0 +1,239 @@
1// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
2//
3// Permission is hereby granted, free of charge, to any person obtaining
4// a copy of this software and associated documentation files (the
5// "Software"), to deal in the Software without restriction, including
6// without limitation the rights to use, copy, modify, merge, publish,
7// distribute, sublicense, and/or sell copies of the Software, and to
8// permit persons to whom the Software is furnished to do so, subject to
9// the following conditions:
10//
11// The above copyright notice and this permission notice shall be
12// included in all copies or substantial portions of the Software.
13//
14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
22package uuid
23
24import (
25 "crypto/md5"
26 "crypto/rand"
27 "crypto/sha1"
28 "encoding/binary"
29 "hash"
30 "net"
31 "os"
32 "sync"
33 "time"
34)
35
36// Difference in 100-nanosecond intervals between
37// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
38const epochStart = 122192928000000000
39
40var (
41 global = newDefaultGenerator()
42
43 epochFunc = unixTimeFunc
44 posixUID = uint32(os.Getuid())
45 posixGID = uint32(os.Getgid())
46)
47
48// NewV1 returns UUID based on current timestamp and MAC address.
49func NewV1() UUID {
50 return global.NewV1()
51}
52
53// NewV2 returns DCE Security UUID based on POSIX UID/GID.
54func NewV2(domain byte) UUID {
55 return global.NewV2(domain)
56}
57
58// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
59func NewV3(ns UUID, name string) UUID {
60 return global.NewV3(ns, name)
61}
62
63// NewV4 returns random generated UUID.
64func NewV4() UUID {
65 return global.NewV4()
66}
67
68// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
69func NewV5(ns UUID, name string) UUID {
70 return global.NewV5(ns, name)
71}
72
73// Generator provides interface for generating UUIDs.
74type Generator interface {
75 NewV1() UUID
76 NewV2(domain byte) UUID
77 NewV3(ns UUID, name string) UUID
78 NewV4() UUID
79 NewV5(ns UUID, name string) UUID
80}
81
82// Default generator implementation.
83type generator struct {
84 storageOnce sync.Once
85 storageMutex sync.Mutex
86
87 lastTime uint64
88 clockSequence uint16
89 hardwareAddr [6]byte
90}
91
92func newDefaultGenerator() Generator {
93 return &generator{}
94}
95
96// NewV1 returns UUID based on current timestamp and MAC address.
97func (g *generator) NewV1() UUID {
98 u := UUID{}
99
100 timeNow, clockSeq, hardwareAddr := g.getStorage()
101
102 binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
103 binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
104 binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
105 binary.BigEndian.PutUint16(u[8:], clockSeq)
106
107 copy(u[10:], hardwareAddr)
108
109 u.SetVersion(V1)
110 u.SetVariant(VariantRFC4122)
111
112 return u
113}
114
115// NewV2 returns DCE Security UUID based on POSIX UID/GID.
116func (g *generator) NewV2(domain byte) UUID {
117 u := UUID{}
118
119 timeNow, clockSeq, hardwareAddr := g.getStorage()
120
121 switch domain {
122 case DomainPerson:
123 binary.BigEndian.PutUint32(u[0:], posixUID)
124 case DomainGroup:
125 binary.BigEndian.PutUint32(u[0:], posixGID)
126 }
127
128 binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
129 binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
130 binary.BigEndian.PutUint16(u[8:], clockSeq)
131 u[9] = domain
132
133 copy(u[10:], hardwareAddr)
134
135 u.SetVersion(V2)
136 u.SetVariant(VariantRFC4122)
137
138 return u
139}
140
141// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
142func (g *generator) NewV3(ns UUID, name string) UUID {
143 u := newFromHash(md5.New(), ns, name)
144 u.SetVersion(V3)
145 u.SetVariant(VariantRFC4122)
146
147 return u
148}
149
150// NewV4 returns random generated UUID.
151func (g *generator) NewV4() UUID {
152 u := UUID{}
153 g.safeRandom(u[:])
154 u.SetVersion(V4)
155 u.SetVariant(VariantRFC4122)
156
157 return u
158}
159
160// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
161func (g *generator) NewV5(ns UUID, name string) UUID {
162 u := newFromHash(sha1.New(), ns, name)
163 u.SetVersion(V5)
164 u.SetVariant(VariantRFC4122)
165
166 return u
167}
168
169func (g *generator) initStorage() {
170 g.initClockSequence()
171 g.initHardwareAddr()
172}
173
174func (g *generator) initClockSequence() {
175 buf := make([]byte, 2)
176 g.safeRandom(buf)
177 g.clockSequence = binary.BigEndian.Uint16(buf)
178}
179
180func (g *generator) initHardwareAddr() {
181 interfaces, err := net.Interfaces()
182 if err == nil {
183 for _, iface := range interfaces {
184 if len(iface.HardwareAddr) >= 6 {
185 copy(g.hardwareAddr[:], iface.HardwareAddr)
186 return
187 }
188 }
189 }
190
191 // Initialize hardwareAddr randomly in case
192 // of real network interfaces absence
193 g.safeRandom(g.hardwareAddr[:])
194
195 // Set multicast bit as recommended in RFC 4122
196 g.hardwareAddr[0] |= 0x01
197}
198
199func (g *generator) safeRandom(dest []byte) {
200 if _, err := rand.Read(dest); err != nil {
201 panic(err)
202 }
203}
204
205// Returns UUID v1/v2 storage state.
206// Returns epoch timestamp, clock sequence, and hardware address.
207func (g *generator) getStorage() (uint64, uint16, []byte) {
208 g.storageOnce.Do(g.initStorage)
209
210 g.storageMutex.Lock()
211 defer g.storageMutex.Unlock()
212
213 timeNow := epochFunc()
214 // Clock changed backwards since last UUID generation.
215 // Should increase clock sequence.
216 if timeNow <= g.lastTime {
217 g.clockSequence++
218 }
219 g.lastTime = timeNow
220
221 return timeNow, g.clockSequence, g.hardwareAddr[:]
222}
223
224// Returns difference in 100-nanosecond intervals between
225// UUID epoch (October 15, 1582) and current time.
226// This is default epoch calculation function.
227func unixTimeFunc() uint64 {
228 return epochStart + uint64(time.Now().UnixNano()/100)
229}
230
231// Returns UUID based on hashing of namespace UUID and name.
232func newFromHash(h hash.Hash, ns UUID, name string) UUID {
233 u := UUID{}
234 h.Write(ns[:])
235 h.Write([]byte(name))
236 copy(u[:], h.Sum(nil))
237
238 return u
239}
diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go
new file mode 100644
index 0000000..56759d3
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/sql.go
@@ -0,0 +1,78 @@
1// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
2//
3// Permission is hereby granted, free of charge, to any person obtaining
4// a copy of this software and associated documentation files (the
5// "Software"), to deal in the Software without restriction, including
6// without limitation the rights to use, copy, modify, merge, publish,
7// distribute, sublicense, and/or sell copies of the Software, and to
8// permit persons to whom the Software is furnished to do so, subject to
9// the following conditions:
10//
11// The above copyright notice and this permission notice shall be
12// included in all copies or substantial portions of the Software.
13//
14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
22package uuid
23
24import (
25 "database/sql/driver"
26 "fmt"
27)
28
29// Value implements the driver.Valuer interface.
30func (u UUID) Value() (driver.Value, error) {
31 return u.String(), nil
32}
33
34// Scan implements the sql.Scanner interface.
35// A 16-byte slice is handled by UnmarshalBinary, while
36// a longer byte slice or a string is handled by UnmarshalText.
37func (u *UUID) Scan(src interface{}) error {
38 switch src := src.(type) {
39 case []byte:
40 if len(src) == Size {
41 return u.UnmarshalBinary(src)
42 }
43 return u.UnmarshalText(src)
44
45 case string:
46 return u.UnmarshalText([]byte(src))
47 }
48
49 return fmt.Errorf("uuid: cannot convert %T to UUID", src)
50}
51
52// NullUUID can be used with the standard sql package to represent a
53// UUID value that can be NULL in the database
54type NullUUID struct {
55 UUID UUID
56 Valid bool
57}
58
59// Value implements the driver.Valuer interface.
60func (u NullUUID) Value() (driver.Value, error) {
61 if !u.Valid {
62 return nil, nil
63 }
64 // Delegate to UUID Value function
65 return u.UUID.Value()
66}
67
68// Scan implements the sql.Scanner interface.
69func (u *NullUUID) Scan(src interface{}) error {
70 if src == nil {
71 u.UUID, u.Valid = Nil, false
72 return nil
73 }
74
75 // Delegate to UUID Scan function
76 u.Valid = true
77 return u.UUID.Scan(src)
78}
diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go
new file mode 100644
index 0000000..a2b8e2c
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/uuid.go
@@ -0,0 +1,161 @@
1// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
2//
3// Permission is hereby granted, free of charge, to any person obtaining
4// a copy of this software and associated documentation files (the
5// "Software"), to deal in the Software without restriction, including
6// without limitation the rights to use, copy, modify, merge, publish,
7// distribute, sublicense, and/or sell copies of the Software, and to
8// permit persons to whom the Software is furnished to do so, subject to
9// the following conditions:
10//
11// The above copyright notice and this permission notice shall be
12// included in all copies or substantial portions of the Software.
13//
14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
22// Package uuid provides implementation of Universally Unique Identifier (UUID).
23// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and
24// version 2 (as specified in DCE 1.1).
25package uuid
26
27import (
28 "bytes"
29 "encoding/hex"
30)
31
32// Size of a UUID in bytes.
33const Size = 16
34
35// UUID representation compliant with specification
36// described in RFC 4122.
37type UUID [Size]byte
38
39// UUID versions
40const (
41 _ byte = iota
42 V1
43 V2
44 V3
45 V4
46 V5
47)
48
49// UUID layout variants.
50const (
51 VariantNCS byte = iota
52 VariantRFC4122
53 VariantMicrosoft
54 VariantFuture
55)
56
57// UUID DCE domains.
58const (
59 DomainPerson = iota
60 DomainGroup
61 DomainOrg
62)
63
64// String parse helpers.
65var (
66 urnPrefix = []byte("urn:uuid:")
67 byteGroups = []int{8, 4, 4, 4, 12}
68)
69
70// Nil is special form of UUID that is specified to have all
71// 128 bits set to zero.
72var Nil = UUID{}
73
74// Predefined namespace UUIDs.
75var (
76 NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
77 NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
78 NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
79 NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
80)
81
82// Equal returns true if u1 and u2 equals, otherwise returns false.
83func Equal(u1 UUID, u2 UUID) bool {
84 return bytes.Equal(u1[:], u2[:])
85}
86
87// Version returns algorithm version used to generate UUID.
88func (u UUID) Version() byte {
89 return u[6] >> 4
90}
91
92// Variant returns UUID layout variant.
93func (u UUID) Variant() byte {
94 switch {
95 case (u[8] >> 7) == 0x00:
96 return VariantNCS
97 case (u[8] >> 6) == 0x02:
98 return VariantRFC4122
99 case (u[8] >> 5) == 0x06:
100 return VariantMicrosoft
101 case (u[8] >> 5) == 0x07:
102 fallthrough
103 default:
104 return VariantFuture
105 }
106}
107
108// Bytes returns bytes slice representation of UUID.
109func (u UUID) Bytes() []byte {
110 return u[:]
111}
112
113// Returns canonical string representation of UUID:
114// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
115func (u UUID) String() string {
116 buf := make([]byte, 36)
117
118 hex.Encode(buf[0:8], u[0:4])
119 buf[8] = '-'
120 hex.Encode(buf[9:13], u[4:6])
121 buf[13] = '-'
122 hex.Encode(buf[14:18], u[6:8])
123 buf[18] = '-'
124 hex.Encode(buf[19:23], u[8:10])
125 buf[23] = '-'
126 hex.Encode(buf[24:], u[10:])
127
128 return string(buf)
129}
130
131// SetVersion sets version bits.
132func (u *UUID) SetVersion(v byte) {
133 u[6] = (u[6] & 0x0f) | (v << 4)
134}
135
136// SetVariant sets variant bits.
137func (u *UUID) SetVariant(v byte) {
138 switch v {
139 case VariantNCS:
140 u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
141 case VariantRFC4122:
142 u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
143 case VariantMicrosoft:
144 u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
145 case VariantFuture:
146 fallthrough
147 default:
148 u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
149 }
150}
151
152// Must is a helper that wraps a call to a function returning (UUID, error)
153// and panics if the error is non-nil. It is intended for use in variable
154// initializations such as
155// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"));
156func Must(u UUID, err error) UUID {
157 if err != nil {
158 panic(err)
159 }
160 return u
161}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml b/vendor/github.com/zclconf/go-cty-yaml/.travis.yml
new file mode 100644
index 0000000..13ff998
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/.travis.yml
@@ -0,0 +1,5 @@
1language: go
2
3go:
4 - 1.12
5
diff --git a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md b/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md
new file mode 100644
index 0000000..b3bc3b6
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md
@@ -0,0 +1,10 @@
1# 1.0.1 (July 30, 2019)
2
3* The YAML decoder is now correctly treating quoted scalars as verbatim literal
4 strings rather than using the fuzzy type selection rules for them. Fuzzy
5 type selection rules still apply to unquoted scalars.
6 ([#4](https://github.com/zclconf/go-cty-yaml/pull/4))
7
8# 1.0.0 (May 26, 2019)
9
10Initial release.
diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE b/vendor/github.com/zclconf/go-cty-yaml/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/LICENSE
@@ -0,0 +1,201 @@
1 Apache License
2 Version 2.0, January 2004
3 http://www.apache.org/licenses/
4
5 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
7 1. Definitions.
8
9 "License" shall mean the terms and conditions for use, reproduction,
10 and distribution as defined by Sections 1 through 9 of this document.
11
12 "Licensor" shall mean the copyright owner or entity authorized by
13 the copyright owner that is granting the License.
14
15 "Legal Entity" shall mean the union of the acting entity and all
16 other entities that control, are controlled by, or are under common
17 control with that entity. For the purposes of this definition,
18 "control" means (i) the power, direct or indirect, to cause the
19 direction or management of such entity, whether by contract or
20 otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 outstanding shares, or (iii) beneficial ownership of such entity.
22
23 "You" (or "Your") shall mean an individual or Legal Entity
24 exercising permissions granted by this License.
25
26 "Source" form shall mean the preferred form for making modifications,
27 including but not limited to software source code, documentation
28 source, and configuration files.
29
30 "Object" form shall mean any form resulting from mechanical
31 transformation or translation of a Source form, including but
32 not limited to compiled object code, generated documentation,
33 and conversions to other media types.
34
35 "Work" shall mean the work of authorship, whether in Source or
36 Object form, made available under the License, as indicated by a
37 copyright notice that is included in or attached to the work
38 (an example is provided in the Appendix below).
39
40 "Derivative Works" shall mean any work, whether in Source or Object
41 form, that is based on (or derived from) the Work and for which the
42 editorial revisions, annotations, elaborations, or other modifications
43 represent, as a whole, an original work of authorship. For the purposes
44 of this License, Derivative Works shall not include works that remain
45 separable from, or merely link (or bind by name) to the interfaces of,
46 the Work and Derivative Works thereof.
47
48 "Contribution" shall mean any work of authorship, including
49 the original version of the Work and any modifications or additions
50 to that Work or Derivative Works thereof, that is intentionally
51 submitted to Licensor for inclusion in the Work by the copyright owner
52 or by an individual or Legal Entity authorized to submit on behalf of
53 the copyright owner. For the purposes of this definition, "submitted"
54 means any form of electronic, verbal, or written communication sent
55 to the Licensor or its representatives, including but not limited to
56 communication on electronic mailing lists, source code control systems,
57 and issue tracking systems that are managed by, or on behalf of, the
58 Licensor for the purpose of discussing and improving the Work, but
59 excluding communication that is conspicuously marked or otherwise
60 designated in writing by the copyright owner as "Not a Contribution."
61
62 "Contributor" shall mean Licensor and any individual or Legal Entity
63 on behalf of whom a Contribution has been received by Licensor and
64 subsequently incorporated within the Work.
65
66 2. Grant of Copyright License. Subject to the terms and conditions of
67 this License, each Contributor hereby grants to You a perpetual,
68 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 copyright license to reproduce, prepare Derivative Works of,
70 publicly display, publicly perform, sublicense, and distribute the
71 Work and such Derivative Works in Source or Object form.
72
73 3. Grant of Patent License. Subject to the terms and conditions of
74 this License, each Contributor hereby grants to You a perpetual,
75 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 (except as stated in this section) patent license to make, have made,
77 use, offer to sell, sell, import, and otherwise transfer the Work,
78 where such license applies only to those patent claims licensable
79 by such Contributor that are necessarily infringed by their
80 Contribution(s) alone or by combination of their Contribution(s)
81 with the Work to which such Contribution(s) was submitted. If You
82 institute patent litigation against any entity (including a
83 cross-claim or counterclaim in a lawsuit) alleging that the Work
84 or a Contribution incorporated within the Work constitutes direct
85 or contributory patent infringement, then any patent licenses
86 granted to You under this License for that Work shall terminate
87 as of the date such litigation is filed.
88
89 4. Redistribution. You may reproduce and distribute copies of the
90 Work or Derivative Works thereof in any medium, with or without
91 modifications, and in Source or Object form, provided that You
92 meet the following conditions:
93
94 (a) You must give any other recipients of the Work or
95 Derivative Works a copy of this License; and
96
97 (b) You must cause any modified files to carry prominent notices
98 stating that You changed the files; and
99
100 (c) You must retain, in the Source form of any Derivative Works
101 that You distribute, all copyright, patent, trademark, and
102 attribution notices from the Source form of the Work,
103 excluding those notices that do not pertain to any part of
104 the Derivative Works; and
105
106 (d) If the Work includes a "NOTICE" text file as part of its
107 distribution, then any Derivative Works that You distribute must
108 include a readable copy of the attribution notices contained
109 within such NOTICE file, excluding those notices that do not
110 pertain to any part of the Derivative Works, in at least one
111 of the following places: within a NOTICE text file distributed
112 as part of the Derivative Works; within the Source form or
113 documentation, if provided along with the Derivative Works; or,
114 within a display generated by the Derivative Works, if and
115 wherever such third-party notices normally appear. The contents
116 of the NOTICE file are for informational purposes only and
117 do not modify the License. You may add Your own attribution
118 notices within Derivative Works that You distribute, alongside
119 or as an addendum to the NOTICE text from the Work, provided
120 that such additional attribution notices cannot be construed
121 as modifying the License.
122
123 You may add Your own copyright statement to Your modifications and
124 may provide additional or different license terms and conditions
125 for use, reproduction, or distribution of Your modifications, or
126 for any such Derivative Works as a whole, provided Your use,
127 reproduction, and distribution of the Work otherwise complies with
128 the conditions stated in this License.
129
130 5. Submission of Contributions. Unless You explicitly state otherwise,
131 any Contribution intentionally submitted for inclusion in the Work
132 by You to the Licensor shall be under the terms and conditions of
133 this License, without any additional terms or conditions.
134 Notwithstanding the above, nothing herein shall supersede or modify
135 the terms of any separate license agreement you may have executed
136 with Licensor regarding such Contributions.
137
138 6. Trademarks. This License does not grant permission to use the trade
139 names, trademarks, service marks, or product names of the Licensor,
140 except as required for reasonable and customary use in describing the
141 origin of the Work and reproducing the content of the NOTICE file.
142
143 7. Disclaimer of Warranty. Unless required by applicable law or
144 agreed to in writing, Licensor provides the Work (and each
145 Contributor provides its Contributions) on an "AS IS" BASIS,
146 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 implied, including, without limitation, any warranties or conditions
148 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 PARTICULAR PURPOSE. You are solely responsible for determining the
150 appropriateness of using or redistributing the Work and assume any
151 risks associated with Your exercise of permissions under this License.
152
153 8. Limitation of Liability. In no event and under no legal theory,
154 whether in tort (including negligence), contract, or otherwise,
155 unless required by applicable law (such as deliberate and grossly
156 negligent acts) or agreed to in writing, shall any Contributor be
157 liable to You for damages, including any direct, indirect, special,
158 incidental, or consequential damages of any character arising as a
159 result of this License or out of the use or inability to use the
160 Work (including but not limited to damages for loss of goodwill,
161 work stoppage, computer failure or malfunction, or any and all
162 other commercial damages or losses), even if such Contributor
163 has been advised of the possibility of such damages.
164
165 9. Accepting Warranty or Additional Liability. While redistributing
166 the Work or Derivative Works thereof, You may choose to offer,
167 and charge a fee for, acceptance of support, warranty, indemnity,
168 or other liability obligations and/or rights consistent with this
169 License. However, in accepting such obligations, You may act only
170 on Your own behalf and on Your sole responsibility, not on behalf
171 of any other Contributor, and only if You agree to indemnify,
172 defend, and hold each Contributor harmless for any liability
173 incurred by, or claims asserted against, such Contributor by reason
174 of your accepting any such warranty or additional liability.
175
176 END OF TERMS AND CONDITIONS
177
178 APPENDIX: How to apply the Apache License to your work.
179
180 To apply the Apache License to your work, attach the following
181 boilerplate notice, with the fields enclosed by brackets "{}"
182 replaced with your own identifying information. (Don't include
183 the brackets!) The text should be enclosed in the appropriate
184 comment syntax for the file format. We also recommend that a
185 file or class name and description of purpose be included on the
186 same "printed page" as the copyright notice for easier
187 identification within third-party archives.
188
189 Copyright {yyyy} {name of copyright owner}
190
191 Licensed under the Apache License, Version 2.0 (the "License");
192 you may not use this file except in compliance with the License.
193 You may obtain a copy of the License at
194
195 http://www.apache.org/licenses/LICENSE-2.0
196
197 Unless required by applicable law or agreed to in writing, software
198 distributed under the License is distributed on an "AS IS" BASIS,
199 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 See the License for the specific language governing permissions and
201 limitations under the License.
diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml b/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml
new file mode 100644
index 0000000..8da58fb
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml
@@ -0,0 +1,31 @@
1The following files were ported to Go from C files of libyaml, and thus
2are still covered by their original copyright and license:
3
4 apic.go
5 emitterc.go
6 parserc.go
7 readerc.go
8 scannerc.go
9 writerc.go
10 yamlh.go
11 yamlprivateh.go
12
13Copyright (c) 2006 Kirill Simonov
14
15Permission is hereby granted, free of charge, to any person obtaining a copy of
16this software and associated documentation files (the "Software"), to deal in
17the Software without restriction, including without limitation the rights to
18use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
19of the Software, and to permit persons to whom the Software is furnished to do
20so, subject to the following conditions:
21
22The above copyright notice and this permission notice shall be included in all
23copies or substantial portions of the Software.
24
25THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
30OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31SOFTWARE.
diff --git a/vendor/github.com/zclconf/go-cty-yaml/NOTICE b/vendor/github.com/zclconf/go-cty-yaml/NOTICE
new file mode 100644
index 0000000..4e6c00a
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/NOTICE
@@ -0,0 +1,20 @@
1This package is derived from gopkg.in/yaml.v2, which is copyright
22011-2016 Canonical Ltd.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15
16Includes mechanical ports of code from libyaml, distributed under its original
17license. See LICENSE.libyaml for more information.
18
19Modifications for cty interfacing copyright 2019 Martin Atkins, and
20distributed under the same license terms.
diff --git a/vendor/github.com/zclconf/go-cty-yaml/apic.go b/vendor/github.com/zclconf/go-cty-yaml/apic.go
new file mode 100644
index 0000000..1f7e87e
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/apic.go
@@ -0,0 +1,739 @@
1package yaml
2
3import (
4 "io"
5)
6
7func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
8 //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
9
10 // Check if we can move the queue at the beginning of the buffer.
11 if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
12 if parser.tokens_head != len(parser.tokens) {
13 copy(parser.tokens, parser.tokens[parser.tokens_head:])
14 }
15 parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
16 parser.tokens_head = 0
17 }
18 parser.tokens = append(parser.tokens, *token)
19 if pos < 0 {
20 return
21 }
22 copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
23 parser.tokens[parser.tokens_head+pos] = *token
24}
25
26// Create a new parser object.
27func yaml_parser_initialize(parser *yaml_parser_t) bool {
28 *parser = yaml_parser_t{
29 raw_buffer: make([]byte, 0, input_raw_buffer_size),
30 buffer: make([]byte, 0, input_buffer_size),
31 }
32 return true
33}
34
35// Destroy a parser object.
36func yaml_parser_delete(parser *yaml_parser_t) {
37 *parser = yaml_parser_t{}
38}
39
40// String read handler.
41func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
42 if parser.input_pos == len(parser.input) {
43 return 0, io.EOF
44 }
45 n = copy(buffer, parser.input[parser.input_pos:])
46 parser.input_pos += n
47 return n, nil
48}
49
50// Reader read handler.
51func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
52 return parser.input_reader.Read(buffer)
53}
54
55// Set a string input.
56func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
57 if parser.read_handler != nil {
58 panic("must set the input source only once")
59 }
60 parser.read_handler = yaml_string_read_handler
61 parser.input = input
62 parser.input_pos = 0
63}
64
65// Set a file input.
66func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
67 if parser.read_handler != nil {
68 panic("must set the input source only once")
69 }
70 parser.read_handler = yaml_reader_read_handler
71 parser.input_reader = r
72}
73
74// Set the source encoding.
75func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
76 if parser.encoding != yaml_ANY_ENCODING {
77 panic("must set the encoding only once")
78 }
79 parser.encoding = encoding
80}
81
82// Create a new emitter object.
83func yaml_emitter_initialize(emitter *yaml_emitter_t) {
84 *emitter = yaml_emitter_t{
85 buffer: make([]byte, output_buffer_size),
86 raw_buffer: make([]byte, 0, output_raw_buffer_size),
87 states: make([]yaml_emitter_state_t, 0, initial_stack_size),
88 events: make([]yaml_event_t, 0, initial_queue_size),
89 }
90}
91
92// Destroy an emitter object.
93func yaml_emitter_delete(emitter *yaml_emitter_t) {
94 *emitter = yaml_emitter_t{}
95}
96
97// String write handler.
98func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
99 *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
100 return nil
101}
102
103// yaml_writer_write_handler uses emitter.output_writer to write the
104// emitted text.
105func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
106 _, err := emitter.output_writer.Write(buffer)
107 return err
108}
109
110// Set a string output.
111func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
112 if emitter.write_handler != nil {
113 panic("must set the output target only once")
114 }
115 emitter.write_handler = yaml_string_write_handler
116 emitter.output_buffer = output_buffer
117}
118
119// Set a file output.
120func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
121 if emitter.write_handler != nil {
122 panic("must set the output target only once")
123 }
124 emitter.write_handler = yaml_writer_write_handler
125 emitter.output_writer = w
126}
127
128// Set the output encoding.
129func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
130 if emitter.encoding != yaml_ANY_ENCODING {
131 panic("must set the output encoding only once")
132 }
133 emitter.encoding = encoding
134}
135
136// Set the canonical output style.
137func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
138 emitter.canonical = canonical
139}
140
141//// Set the indentation increment.
142func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
143 if indent < 2 || indent > 9 {
144 indent = 2
145 }
146 emitter.best_indent = indent
147}
148
149// Set the preferred line width.
150func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
151 if width < 0 {
152 width = -1
153 }
154 emitter.best_width = width
155}
156
157// Set if unescaped non-ASCII characters are allowed.
158func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
159 emitter.unicode = unicode
160}
161
162// Set the preferred line break character.
163func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
164 emitter.line_break = line_break
165}
166
167///*
168// * Destroy a token object.
169// */
170//
171//YAML_DECLARE(void)
172//yaml_token_delete(yaml_token_t *token)
173//{
174// assert(token); // Non-NULL token object expected.
175//
176// switch (token.type)
177// {
178// case YAML_TAG_DIRECTIVE_TOKEN:
179// yaml_free(token.data.tag_directive.handle);
180// yaml_free(token.data.tag_directive.prefix);
181// break;
182//
183// case YAML_ALIAS_TOKEN:
184// yaml_free(token.data.alias.value);
185// break;
186//
187// case YAML_ANCHOR_TOKEN:
188// yaml_free(token.data.anchor.value);
189// break;
190//
191// case YAML_TAG_TOKEN:
192// yaml_free(token.data.tag.handle);
193// yaml_free(token.data.tag.suffix);
194// break;
195//
196// case YAML_SCALAR_TOKEN:
197// yaml_free(token.data.scalar.value);
198// break;
199//
200// default:
201// break;
202// }
203//
204// memset(token, 0, sizeof(yaml_token_t));
205//}
206//
207///*
208// * Check if a string is a valid UTF-8 sequence.
209// *
210// * Check 'reader.c' for more details on UTF-8 encoding.
211// */
212//
213//static int
214//yaml_check_utf8(yaml_char_t *start, size_t length)
215//{
216// yaml_char_t *end = start+length;
217// yaml_char_t *pointer = start;
218//
219// while (pointer < end) {
220// unsigned char octet;
221// unsigned int width;
222// unsigned int value;
223// size_t k;
224//
225// octet = pointer[0];
226// width = (octet & 0x80) == 0x00 ? 1 :
227// (octet & 0xE0) == 0xC0 ? 2 :
228// (octet & 0xF0) == 0xE0 ? 3 :
229// (octet & 0xF8) == 0xF0 ? 4 : 0;
230// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
231// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
232// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
233// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
234// if (!width) return 0;
235// if (pointer+width > end) return 0;
236// for (k = 1; k < width; k ++) {
237// octet = pointer[k];
238// if ((octet & 0xC0) != 0x80) return 0;
239// value = (value << 6) + (octet & 0x3F);
240// }
241// if (!((width == 1) ||
242// (width == 2 && value >= 0x80) ||
243// (width == 3 && value >= 0x800) ||
244// (width == 4 && value >= 0x10000))) return 0;
245//
246// pointer += width;
247// }
248//
249// return 1;
250//}
251//
252
253// Create STREAM-START.
254func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
255 *event = yaml_event_t{
256 typ: yaml_STREAM_START_EVENT,
257 encoding: encoding,
258 }
259}
260
261// Create STREAM-END.
262func yaml_stream_end_event_initialize(event *yaml_event_t) {
263 *event = yaml_event_t{
264 typ: yaml_STREAM_END_EVENT,
265 }
266}
267
268// Create DOCUMENT-START.
269func yaml_document_start_event_initialize(
270 event *yaml_event_t,
271 version_directive *yaml_version_directive_t,
272 tag_directives []yaml_tag_directive_t,
273 implicit bool,
274) {
275 *event = yaml_event_t{
276 typ: yaml_DOCUMENT_START_EVENT,
277 version_directive: version_directive,
278 tag_directives: tag_directives,
279 implicit: implicit,
280 }
281}
282
283// Create DOCUMENT-END.
284func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
285 *event = yaml_event_t{
286 typ: yaml_DOCUMENT_END_EVENT,
287 implicit: implicit,
288 }
289}
290
291///*
292// * Create ALIAS.
293// */
294//
295//YAML_DECLARE(int)
296//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
297//{
298// mark yaml_mark_t = { 0, 0, 0 }
299// anchor_copy *yaml_char_t = NULL
300//
301// assert(event) // Non-NULL event object is expected.
302// assert(anchor) // Non-NULL anchor is expected.
303//
304// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
305//
306// anchor_copy = yaml_strdup(anchor)
307// if (!anchor_copy)
308// return 0
309//
310// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
311//
312// return 1
313//}
314
315// Create SCALAR.
316func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
317 *event = yaml_event_t{
318 typ: yaml_SCALAR_EVENT,
319 anchor: anchor,
320 tag: tag,
321 value: value,
322 implicit: plain_implicit,
323 quoted_implicit: quoted_implicit,
324 style: yaml_style_t(style),
325 }
326 return true
327}
328
329// Create SEQUENCE-START.
330func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
331 *event = yaml_event_t{
332 typ: yaml_SEQUENCE_START_EVENT,
333 anchor: anchor,
334 tag: tag,
335 implicit: implicit,
336 style: yaml_style_t(style),
337 }
338 return true
339}
340
341// Create SEQUENCE-END.
342func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
343 *event = yaml_event_t{
344 typ: yaml_SEQUENCE_END_EVENT,
345 }
346 return true
347}
348
349// Create MAPPING-START.
350func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
351 *event = yaml_event_t{
352 typ: yaml_MAPPING_START_EVENT,
353 anchor: anchor,
354 tag: tag,
355 implicit: implicit,
356 style: yaml_style_t(style),
357 }
358}
359
360// Create MAPPING-END.
361func yaml_mapping_end_event_initialize(event *yaml_event_t) {
362 *event = yaml_event_t{
363 typ: yaml_MAPPING_END_EVENT,
364 }
365}
366
367// Destroy an event object.
368func yaml_event_delete(event *yaml_event_t) {
369 *event = yaml_event_t{}
370}
371
372///*
373// * Create a document object.
374// */
375//
376//YAML_DECLARE(int)
377//yaml_document_initialize(document *yaml_document_t,
378// version_directive *yaml_version_directive_t,
379// tag_directives_start *yaml_tag_directive_t,
380// tag_directives_end *yaml_tag_directive_t,
381// start_implicit int, end_implicit int)
382//{
383// struct {
384// error yaml_error_type_t
385// } context
386// struct {
387// start *yaml_node_t
388// end *yaml_node_t
389// top *yaml_node_t
390// } nodes = { NULL, NULL, NULL }
391// version_directive_copy *yaml_version_directive_t = NULL
392// struct {
393// start *yaml_tag_directive_t
394// end *yaml_tag_directive_t
395// top *yaml_tag_directive_t
396// } tag_directives_copy = { NULL, NULL, NULL }
397// value yaml_tag_directive_t = { NULL, NULL }
398// mark yaml_mark_t = { 0, 0, 0 }
399//
400// assert(document) // Non-NULL document object is expected.
401// assert((tag_directives_start && tag_directives_end) ||
402// (tag_directives_start == tag_directives_end))
403// // Valid tag directives are expected.
404//
405// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
406//
407// if (version_directive) {
408// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
409// if (!version_directive_copy) goto error
410// version_directive_copy.major = version_directive.major
411// version_directive_copy.minor = version_directive.minor
412// }
413//
414// if (tag_directives_start != tag_directives_end) {
415// tag_directive *yaml_tag_directive_t
416// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
417// goto error
418// for (tag_directive = tag_directives_start
419// tag_directive != tag_directives_end; tag_directive ++) {
420// assert(tag_directive.handle)
421// assert(tag_directive.prefix)
422// if (!yaml_check_utf8(tag_directive.handle,
423// strlen((char *)tag_directive.handle)))
424// goto error
425// if (!yaml_check_utf8(tag_directive.prefix,
426// strlen((char *)tag_directive.prefix)))
427// goto error
428// value.handle = yaml_strdup(tag_directive.handle)
429// value.prefix = yaml_strdup(tag_directive.prefix)
430// if (!value.handle || !value.prefix) goto error
431// if (!PUSH(&context, tag_directives_copy, value))
432// goto error
433// value.handle = NULL
434// value.prefix = NULL
435// }
436// }
437//
438// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
439// tag_directives_copy.start, tag_directives_copy.top,
440// start_implicit, end_implicit, mark, mark)
441//
442// return 1
443//
444//error:
445// STACK_DEL(&context, nodes)
446// yaml_free(version_directive_copy)
447// while (!STACK_EMPTY(&context, tag_directives_copy)) {
448// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
449// yaml_free(value.handle)
450// yaml_free(value.prefix)
451// }
452// STACK_DEL(&context, tag_directives_copy)
453// yaml_free(value.handle)
454// yaml_free(value.prefix)
455//
456// return 0
457//}
458//
459///*
460// * Destroy a document object.
461// */
462//
463//YAML_DECLARE(void)
464//yaml_document_delete(document *yaml_document_t)
465//{
466// struct {
467// error yaml_error_type_t
468// } context
469// tag_directive *yaml_tag_directive_t
470//
471// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
472//
473// assert(document) // Non-NULL document object is expected.
474//
475// while (!STACK_EMPTY(&context, document.nodes)) {
476// node yaml_node_t = POP(&context, document.nodes)
477// yaml_free(node.tag)
478// switch (node.type) {
479// case YAML_SCALAR_NODE:
480// yaml_free(node.data.scalar.value)
481// break
482// case YAML_SEQUENCE_NODE:
483// STACK_DEL(&context, node.data.sequence.items)
484// break
485// case YAML_MAPPING_NODE:
486// STACK_DEL(&context, node.data.mapping.pairs)
487// break
488// default:
489// assert(0) // Should not happen.
490// }
491// }
492// STACK_DEL(&context, document.nodes)
493//
494// yaml_free(document.version_directive)
495// for (tag_directive = document.tag_directives.start
496// tag_directive != document.tag_directives.end
497// tag_directive++) {
498// yaml_free(tag_directive.handle)
499// yaml_free(tag_directive.prefix)
500// }
501// yaml_free(document.tag_directives.start)
502//
503// memset(document, 0, sizeof(yaml_document_t))
504//}
505//
506///**
507// * Get a document node.
508// */
509//
510//YAML_DECLARE(yaml_node_t *)
511//yaml_document_get_node(document *yaml_document_t, index int)
512//{
513// assert(document) // Non-NULL document object is expected.
514//
515// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
516// return document.nodes.start + index - 1
517// }
518// return NULL
519//}
520//
521///**
522// * Get the root object.
523// */
524//
525//YAML_DECLARE(yaml_node_t *)
526//yaml_document_get_root_node(document *yaml_document_t)
527//{
528// assert(document) // Non-NULL document object is expected.
529//
530// if (document.nodes.top != document.nodes.start) {
531// return document.nodes.start
532// }
533// return NULL
534//}
535//
536///*
537// * Add a scalar node to a document.
538// */
539//
540//YAML_DECLARE(int)
541//yaml_document_add_scalar(document *yaml_document_t,
542// tag *yaml_char_t, value *yaml_char_t, length int,
543// style yaml_scalar_style_t)
544//{
545// struct {
546// error yaml_error_type_t
547// } context
548// mark yaml_mark_t = { 0, 0, 0 }
549// tag_copy *yaml_char_t = NULL
550// value_copy *yaml_char_t = NULL
551// node yaml_node_t
552//
553// assert(document) // Non-NULL document object is expected.
554// assert(value) // Non-NULL value is expected.
555//
556// if (!tag) {
557// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
558// }
559//
560// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
561// tag_copy = yaml_strdup(tag)
562// if (!tag_copy) goto error
563//
564// if (length < 0) {
565// length = strlen((char *)value)
566// }
567//
568// if (!yaml_check_utf8(value, length)) goto error
569// value_copy = yaml_malloc(length+1)
570// if (!value_copy) goto error
571// memcpy(value_copy, value, length)
572// value_copy[length] = '\0'
573//
574// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
575// if (!PUSH(&context, document.nodes, node)) goto error
576//
577// return document.nodes.top - document.nodes.start
578//
579//error:
580// yaml_free(tag_copy)
581// yaml_free(value_copy)
582//
583// return 0
584//}
585//
586///*
587// * Add a sequence node to a document.
588// */
589//
590//YAML_DECLARE(int)
591//yaml_document_add_sequence(document *yaml_document_t,
592// tag *yaml_char_t, style yaml_sequence_style_t)
593//{
594// struct {
595// error yaml_error_type_t
596// } context
597// mark yaml_mark_t = { 0, 0, 0 }
598// tag_copy *yaml_char_t = NULL
599// struct {
600// start *yaml_node_item_t
601// end *yaml_node_item_t
602// top *yaml_node_item_t
603// } items = { NULL, NULL, NULL }
604// node yaml_node_t
605//
606// assert(document) // Non-NULL document object is expected.
607//
608// if (!tag) {
609// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
610// }
611//
612// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
613// tag_copy = yaml_strdup(tag)
614// if (!tag_copy) goto error
615//
616// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
617//
618// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
619// style, mark, mark)
620// if (!PUSH(&context, document.nodes, node)) goto error
621//
622// return document.nodes.top - document.nodes.start
623//
624//error:
625// STACK_DEL(&context, items)
626// yaml_free(tag_copy)
627//
628// return 0
629//}
630//
631///*
632// * Add a mapping node to a document.
633// */
634//
635//YAML_DECLARE(int)
636//yaml_document_add_mapping(document *yaml_document_t,
637// tag *yaml_char_t, style yaml_mapping_style_t)
638//{
639// struct {
640// error yaml_error_type_t
641// } context
642// mark yaml_mark_t = { 0, 0, 0 }
643// tag_copy *yaml_char_t = NULL
644// struct {
645// start *yaml_node_pair_t
646// end *yaml_node_pair_t
647// top *yaml_node_pair_t
648// } pairs = { NULL, NULL, NULL }
649// node yaml_node_t
650//
651// assert(document) // Non-NULL document object is expected.
652//
653// if (!tag) {
654// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
655// }
656//
657// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
658// tag_copy = yaml_strdup(tag)
659// if (!tag_copy) goto error
660//
661// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
662//
663// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
664// style, mark, mark)
665// if (!PUSH(&context, document.nodes, node)) goto error
666//
667// return document.nodes.top - document.nodes.start
668//
669//error:
670// STACK_DEL(&context, pairs)
671// yaml_free(tag_copy)
672//
673// return 0
674//}
675//
676///*
677// * Append an item to a sequence node.
678// */
679//
680//YAML_DECLARE(int)
681//yaml_document_append_sequence_item(document *yaml_document_t,
682// sequence int, item int)
683//{
684// struct {
685// error yaml_error_type_t
686// } context
687//
688// assert(document) // Non-NULL document is required.
689// assert(sequence > 0
690// && document.nodes.start + sequence <= document.nodes.top)
691// // Valid sequence id is required.
692// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
693// // A sequence node is required.
694// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
695// // Valid item id is required.
696//
697// if (!PUSH(&context,
698// document.nodes.start[sequence-1].data.sequence.items, item))
699// return 0
700//
701// return 1
702//}
703//
704///*
705// * Append a pair of a key and a value to a mapping node.
706// */
707//
708//YAML_DECLARE(int)
709//yaml_document_append_mapping_pair(document *yaml_document_t,
710// mapping int, key int, value int)
711//{
712// struct {
713// error yaml_error_type_t
714// } context
715//
716// pair yaml_node_pair_t
717//
718// assert(document) // Non-NULL document is required.
719// assert(mapping > 0
720// && document.nodes.start + mapping <= document.nodes.top)
721// // Valid mapping id is required.
722// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
723// // A mapping node is required.
724// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
725// // Valid key id is required.
726// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
727// // Valid value id is required.
728//
729// pair.key = key
730// pair.value = value
731//
732// if (!PUSH(&context,
733// document.nodes.start[mapping-1].data.mapping.pairs, pair))
734// return 0
735//
736// return 1
737//}
738//
739//
diff --git a/vendor/github.com/zclconf/go-cty-yaml/converter.go b/vendor/github.com/zclconf/go-cty-yaml/converter.go
new file mode 100644
index 0000000..a73b34a
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/converter.go
@@ -0,0 +1,69 @@
1package yaml
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// ConverterConfig is used to configure a new converter, using NewConverter.
8type ConverterConfig struct {
9 // EncodeAsFlow, when set to true, causes Marshal to produce flow-style
10 // mapping and sequence serializations.
11 EncodeAsFlow bool
12}
13
14// A Converter can marshal and unmarshal between cty values and YAML bytes.
15//
16// Because there are many different ways to map cty to YAML and vice-versa,
17// a converter is configurable using the settings in ConverterConfig, which
18// allow for a few different permutations of mapping to YAML.
19//
20// If you are just trying to work with generic, standard YAML, the predefined
21// converter in Standard should be good enough.
22type Converter struct {
23 encodeAsFlow bool
24}
25
26// NewConverter creates a new Converter with the given configuration.
27func NewConverter(config *ConverterConfig) *Converter {
28 return &Converter{
29 encodeAsFlow: config.EncodeAsFlow,
30 }
31}
32
33// Standard is a predefined Converter that produces and consumes generic YAML
34// using only built-in constructs that any other YAML implementation ought to
35// understand.
36var Standard *Converter = NewConverter(&ConverterConfig{})
37
38// ImpliedType analyzes the given source code and returns a suitable type that
39// it could be decoded into.
40//
41// For a converter that is using standard YAML rather than cty-specific custom
42// tags, only a subset of cty types can be produced: strings, numbers, bools,
43// tuple types, and object types.
44func (c *Converter) ImpliedType(src []byte) (cty.Type, error) {
45 return c.impliedType(src)
46}
47
48// Marshal serializes the given value into a YAML document, using a fixed
49// mapping from cty types to YAML constructs.
50//
51// Note that unlike the function of the same name in the cty JSON package,
52// this does not take a type constraint and therefore the YAML serialization
53// cannot preserve late-bound type information in the serialization to be
54// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type
55// constraint given to Unmarshal will be decoded as if the corresponding portion
56// of the input were processed with ImpliedType to find a target type.
57func (c *Converter) Marshal(v cty.Value) ([]byte, error) {
58 return c.marshal(v)
59}
60
61// Unmarshal reads the document found within the given source buffer
62// and attempts to convert it into a value conforming to the given type
63// constraint.
64//
65// An error is returned if the given source contains any YAML document
66// delimiters.
67func (c *Converter) Unmarshal(src []byte, ty cty.Type) (cty.Value, error) {
68 return c.unmarshal(src, ty)
69}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go b/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go
new file mode 100644
index 0000000..b91141c
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go
@@ -0,0 +1,57 @@
1package yaml
2
3import (
4 "github.com/zclconf/go-cty/cty"
5 "github.com/zclconf/go-cty/cty/function"
6)
7
8// YAMLDecodeFunc is a cty function for decoding arbitrary YAML source code
9// into a cty Value, using the ImpliedType and Unmarshal methods of the
10// Standard pre-defined converter.
11var YAMLDecodeFunc = function.New(&function.Spec{
12 Params: []function.Parameter{
13 {
14 Name: "src",
15 Type: cty.String,
16 },
17 },
18 Type: func(args []cty.Value) (cty.Type, error) {
19 if !args[0].IsKnown() {
20 return cty.DynamicPseudoType, nil
21 }
22 if args[0].IsNull() {
23 return cty.NilType, function.NewArgErrorf(0, "YAML source code cannot be null")
24 }
25 return Standard.ImpliedType([]byte(args[0].AsString()))
26 },
27 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
28 if retType == cty.DynamicPseudoType {
29 return cty.DynamicVal, nil
30 }
31 return Standard.Unmarshal([]byte(args[0].AsString()), retType)
32 },
33})
34
35// YAMLEncodeFunc is a cty function for encoding an arbitrary cty value
36// into YAML.
37var YAMLEncodeFunc = function.New(&function.Spec{
38 Params: []function.Parameter{
39 {
40 Name: "value",
41 Type: cty.DynamicPseudoType,
42 AllowNull: true,
43 AllowDynamicType: true,
44 },
45 },
46 Type: function.StaticReturnType(cty.String),
47 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
48 if !args[0].IsWhollyKnown() {
49 return cty.UnknownVal(retType), nil
50 }
51 raw, err := Standard.Marshal(args[0])
52 if err != nil {
53 return cty.NilVal, err
54 }
55 return cty.StringVal(string(raw)), nil
56 },
57})
diff --git a/vendor/github.com/zclconf/go-cty-yaml/decode.go b/vendor/github.com/zclconf/go-cty-yaml/decode.go
new file mode 100644
index 0000000..e369ff2
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/decode.go
@@ -0,0 +1,261 @@
1package yaml
2
3import (
4 "errors"
5 "fmt"
6
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/convert"
9)
10
11func (c *Converter) unmarshal(src []byte, ty cty.Type) (cty.Value, error) {
12 p := &yaml_parser_t{}
13 if !yaml_parser_initialize(p) {
14 return cty.NilVal, errors.New("failed to initialize YAML parser")
15 }
16 if len(src) == 0 {
17 src = []byte{'\n'}
18 }
19
20 an := &valueAnalysis{
21 anchorsPending: map[string]int{},
22 anchorVals: map[string]cty.Value{},
23 }
24
25 yaml_parser_set_input_string(p, src)
26
27 var evt yaml_event_t
28 if !yaml_parser_parse(p, &evt) {
29 return cty.NilVal, parserError(p)
30 }
31 if evt.typ != yaml_STREAM_START_EVENT {
32 return cty.NilVal, parseEventErrorf(&evt, "missing stream start token")
33 }
34 if !yaml_parser_parse(p, &evt) {
35 return cty.NilVal, parserError(p)
36 }
37 if evt.typ != yaml_DOCUMENT_START_EVENT {
38 return cty.NilVal, parseEventErrorf(&evt, "missing start of document")
39 }
40
41 v, err := c.unmarshalParse(an, p)
42 if err != nil {
43 return cty.NilVal, err
44 }
45
46 if !yaml_parser_parse(p, &evt) {
47 return cty.NilVal, parserError(p)
48 }
49 if evt.typ == yaml_DOCUMENT_START_EVENT {
50 return cty.NilVal, parseEventErrorf(&evt, "only a single document is allowed")
51 }
52 if evt.typ != yaml_DOCUMENT_END_EVENT {
53 return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String())
54 }
55 if !yaml_parser_parse(p, &evt) {
56 return cty.NilVal, parserError(p)
57 }
58 if evt.typ != yaml_STREAM_END_EVENT {
59 return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content after value")
60 }
61
62 return convert.Convert(v, ty)
63}
64
65func (c *Converter) unmarshalParse(an *valueAnalysis, p *yaml_parser_t) (cty.Value, error) {
66 var evt yaml_event_t
67 if !yaml_parser_parse(p, &evt) {
68 return cty.NilVal, parserError(p)
69 }
70 return c.unmarshalParseRemainder(an, &evt, p)
71}
72
73func (c *Converter) unmarshalParseRemainder(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
74 switch evt.typ {
75 case yaml_SCALAR_EVENT:
76 return c.unmarshalScalar(an, evt, p)
77 case yaml_ALIAS_EVENT:
78 return c.unmarshalAlias(an, evt, p)
79 case yaml_MAPPING_START_EVENT:
80 return c.unmarshalMapping(an, evt, p)
81 case yaml_SEQUENCE_START_EVENT:
82 return c.unmarshalSequence(an, evt, p)
83 case yaml_DOCUMENT_START_EVENT:
84 return cty.NilVal, parseEventErrorf(evt, "only a single document is allowed")
85 case yaml_STREAM_END_EVENT:
86 // Decoding an empty buffer, probably
87 return cty.NilVal, parseEventErrorf(evt, "expecting value but found end of stream")
88 default:
89 // Should never happen; the above should be comprehensive
90 return cty.NilVal, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String())
91 }
92}
93
94func (c *Converter) unmarshalScalar(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
95 src := evt.value
96 tag := string(evt.tag)
97 anchor := string(evt.anchor)
98
99 if len(anchor) > 0 {
100 an.beginAnchor(anchor)
101 }
102
103 val, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style))
104 if err != nil {
105 return cty.NilVal, parseEventErrorWrap(evt, err)
106 }
107
108 if val.RawEquals(mergeMappingVal) {
109 // In any context other than a mapping key, this is just a plain string
110 val = cty.StringVal("<<")
111 }
112
113 if len(anchor) > 0 {
114 an.completeAnchor(anchor, val)
115 }
116 return val, nil
117}
118
119func (c *Converter) unmarshalMapping(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
120 tag := string(evt.tag)
121 anchor := string(evt.anchor)
122
123 if tag != "" && tag != yaml_MAP_TAG {
124 return cty.NilVal, parseEventErrorf(evt, "can't interpret mapping as %s", tag)
125 }
126
127 if anchor != "" {
128 an.beginAnchor(anchor)
129 }
130
131 vals := make(map[string]cty.Value)
132 for {
133 var nextEvt yaml_event_t
134 if !yaml_parser_parse(p, &nextEvt) {
135 return cty.NilVal, parserError(p)
136 }
137 if nextEvt.typ == yaml_MAPPING_END_EVENT {
138 v := cty.ObjectVal(vals)
139 if anchor != "" {
140 an.completeAnchor(anchor, v)
141 }
142 return v, nil
143 }
144
145 if nextEvt.typ != yaml_SCALAR_EVENT {
146 return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
147 }
148 keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style))
149 if err != nil {
150 return cty.NilVal, err
151 }
152 if keyVal.RawEquals(mergeMappingVal) {
153 // Merging the value (which must be a mapping) into our mapping,
154 // then.
155 val, err := c.unmarshalParse(an, p)
156 if err != nil {
157 return cty.NilVal, err
158 }
159 ty := val.Type()
160 if !(ty.IsObjectType() || ty.IsMapType()) {
161 return cty.NilVal, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName())
162 }
163 for it := val.ElementIterator(); it.Next(); {
164 k, v := it.Element()
165 vals[k.AsString()] = v
166 }
167 continue
168 }
169 if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil {
170 keyVal = keyValStr
171 } else {
172 return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
173 }
174 if keyVal.IsNull() {
175 return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key cannot be null")
176 }
177 if !keyVal.IsKnown() {
178 return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key must be known")
179 }
180 val, err := c.unmarshalParse(an, p)
181 if err != nil {
182 return cty.NilVal, err
183 }
184
185 vals[keyVal.AsString()] = val
186 }
187}
188
189func (c *Converter) unmarshalSequence(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
190 tag := string(evt.tag)
191 anchor := string(evt.anchor)
192
193 if tag != "" && tag != yaml_SEQ_TAG {
194 return cty.NilVal, parseEventErrorf(evt, "can't interpret sequence as %s", tag)
195 }
196
197 if anchor != "" {
198 an.beginAnchor(anchor)
199 }
200
201 var vals []cty.Value
202 for {
203 var nextEvt yaml_event_t
204 if !yaml_parser_parse(p, &nextEvt) {
205 return cty.NilVal, parserError(p)
206 }
207 if nextEvt.typ == yaml_SEQUENCE_END_EVENT {
208 ty := cty.TupleVal(vals)
209 if anchor != "" {
210 an.completeAnchor(anchor, ty)
211 }
212 return ty, nil
213 }
214
215 val, err := c.unmarshalParseRemainder(an, &nextEvt, p)
216 if err != nil {
217 return cty.NilVal, err
218 }
219
220 vals = append(vals, val)
221 }
222}
223
224func (c *Converter) unmarshalAlias(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
225 v, err := an.anchorVal(string(evt.anchor))
226 if err != nil {
227 err = parseEventErrorWrap(evt, err)
228 }
229 return v, err
230}
231
232type valueAnalysis struct {
233 anchorsPending map[string]int
234 anchorVals map[string]cty.Value
235}
236
237func (an *valueAnalysis) beginAnchor(name string) {
238 an.anchorsPending[name]++
239}
240
241func (an *valueAnalysis) completeAnchor(name string, v cty.Value) {
242 an.anchorsPending[name]--
243 if an.anchorsPending[name] == 0 {
244 delete(an.anchorsPending, name)
245 }
246 an.anchorVals[name] = v
247}
248
249func (an *valueAnalysis) anchorVal(name string) (cty.Value, error) {
250 if _, pending := an.anchorsPending[name]; pending {
251 // YAML normally allows self-referencing structures, but cty cannot
252 // represent them (it requires all structures to be finite) so we
253 // must fail here.
254 return cty.NilVal, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name)
255 }
256 ty, ok := an.anchorVals[name]
257 if !ok {
258 return cty.NilVal, fmt.Errorf("reference to undefined anchor %q", name)
259 }
260 return ty, nil
261}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go b/vendor/github.com/zclconf/go-cty-yaml/emitterc.go
new file mode 100644
index 0000000..a1c2cc5
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/emitterc.go
@@ -0,0 +1,1685 @@
1package yaml
2
3import (
4 "bytes"
5 "fmt"
6)
7
8// Flush the buffer if needed.
9func flush(emitter *yaml_emitter_t) bool {
10 if emitter.buffer_pos+5 >= len(emitter.buffer) {
11 return yaml_emitter_flush(emitter)
12 }
13 return true
14}
15
16// Put a character to the output buffer.
17func put(emitter *yaml_emitter_t, value byte) bool {
18 if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
19 return false
20 }
21 emitter.buffer[emitter.buffer_pos] = value
22 emitter.buffer_pos++
23 emitter.column++
24 return true
25}
26
27// Put a line break to the output buffer.
28func put_break(emitter *yaml_emitter_t) bool {
29 if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
30 return false
31 }
32 switch emitter.line_break {
33 case yaml_CR_BREAK:
34 emitter.buffer[emitter.buffer_pos] = '\r'
35 emitter.buffer_pos += 1
36 case yaml_LN_BREAK:
37 emitter.buffer[emitter.buffer_pos] = '\n'
38 emitter.buffer_pos += 1
39 case yaml_CRLN_BREAK:
40 emitter.buffer[emitter.buffer_pos+0] = '\r'
41 emitter.buffer[emitter.buffer_pos+1] = '\n'
42 emitter.buffer_pos += 2
43 default:
44 panic("unknown line break setting")
45 }
46 emitter.column = 0
47 emitter.line++
48 return true
49}
50
51// Copy a character from a string into buffer.
52func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
53 if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
54 return false
55 }
56 p := emitter.buffer_pos
57 w := width(s[*i])
58 switch w {
59 case 4:
60 emitter.buffer[p+3] = s[*i+3]
61 fallthrough
62 case 3:
63 emitter.buffer[p+2] = s[*i+2]
64 fallthrough
65 case 2:
66 emitter.buffer[p+1] = s[*i+1]
67 fallthrough
68 case 1:
69 emitter.buffer[p+0] = s[*i+0]
70 default:
71 panic("unknown character width")
72 }
73 emitter.column++
74 emitter.buffer_pos += w
75 *i += w
76 return true
77}
78
79// Write a whole string into buffer.
80func write_all(emitter *yaml_emitter_t, s []byte) bool {
81 for i := 0; i < len(s); {
82 if !write(emitter, s, &i) {
83 return false
84 }
85 }
86 return true
87}
88
89// Copy a line break character from a string into buffer.
90func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
91 if s[*i] == '\n' {
92 if !put_break(emitter) {
93 return false
94 }
95 *i++
96 } else {
97 if !write(emitter, s, i) {
98 return false
99 }
100 emitter.column = 0
101 emitter.line++
102 }
103 return true
104}
105
106// Set an emitter error and return false.
107func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
108 emitter.error = yaml_EMITTER_ERROR
109 emitter.problem = problem
110 return false
111}
112
113// Emit an event.
114func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
115 emitter.events = append(emitter.events, *event)
116 for !yaml_emitter_need_more_events(emitter) {
117 event := &emitter.events[emitter.events_head]
118 if !yaml_emitter_analyze_event(emitter, event) {
119 return false
120 }
121 if !yaml_emitter_state_machine(emitter, event) {
122 return false
123 }
124 yaml_event_delete(event)
125 emitter.events_head++
126 }
127 return true
128}
129
130// Check if we need to accumulate more events before emitting.
131//
132// We accumulate extra
133// - 1 event for DOCUMENT-START
134// - 2 events for SEQUENCE-START
135// - 3 events for MAPPING-START
136//
137func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
138 if emitter.events_head == len(emitter.events) {
139 return true
140 }
141 var accumulate int
142 switch emitter.events[emitter.events_head].typ {
143 case yaml_DOCUMENT_START_EVENT:
144 accumulate = 1
145 break
146 case yaml_SEQUENCE_START_EVENT:
147 accumulate = 2
148 break
149 case yaml_MAPPING_START_EVENT:
150 accumulate = 3
151 break
152 default:
153 return false
154 }
155 if len(emitter.events)-emitter.events_head > accumulate {
156 return false
157 }
158 var level int
159 for i := emitter.events_head; i < len(emitter.events); i++ {
160 switch emitter.events[i].typ {
161 case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
162 level++
163 case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
164 level--
165 }
166 if level == 0 {
167 return false
168 }
169 }
170 return true
171}
172
173// Append a directive to the directives stack.
174func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
175 for i := 0; i < len(emitter.tag_directives); i++ {
176 if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
177 if allow_duplicates {
178 return true
179 }
180 return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
181 }
182 }
183
184 // [Go] Do we actually need to copy this given garbage collection
185 // and the lack of deallocating destructors?
186 tag_copy := yaml_tag_directive_t{
187 handle: make([]byte, len(value.handle)),
188 prefix: make([]byte, len(value.prefix)),
189 }
190 copy(tag_copy.handle, value.handle)
191 copy(tag_copy.prefix, value.prefix)
192 emitter.tag_directives = append(emitter.tag_directives, tag_copy)
193 return true
194}
195
196// Increase the indentation level.
197func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
198 emitter.indents = append(emitter.indents, emitter.indent)
199 if emitter.indent < 0 {
200 if flow {
201 emitter.indent = emitter.best_indent
202 } else {
203 emitter.indent = 0
204 }
205 } else if !indentless {
206 emitter.indent += emitter.best_indent
207 }
208 return true
209}
210
211// State dispatcher.
212func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
213 switch emitter.state {
214 default:
215 case yaml_EMIT_STREAM_START_STATE:
216 return yaml_emitter_emit_stream_start(emitter, event)
217
218 case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
219 return yaml_emitter_emit_document_start(emitter, event, true)
220
221 case yaml_EMIT_DOCUMENT_START_STATE:
222 return yaml_emitter_emit_document_start(emitter, event, false)
223
224 case yaml_EMIT_DOCUMENT_CONTENT_STATE:
225 return yaml_emitter_emit_document_content(emitter, event)
226
227 case yaml_EMIT_DOCUMENT_END_STATE:
228 return yaml_emitter_emit_document_end(emitter, event)
229
230 case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
231 return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
232
233 case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
234 return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
235
236 case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
237 return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
238
239 case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
240 return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
241
242 case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
243 return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
244
245 case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
246 return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
247
248 case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
249 return yaml_emitter_emit_block_sequence_item(emitter, event, true)
250
251 case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
252 return yaml_emitter_emit_block_sequence_item(emitter, event, false)
253
254 case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
255 return yaml_emitter_emit_block_mapping_key(emitter, event, true)
256
257 case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
258 return yaml_emitter_emit_block_mapping_key(emitter, event, false)
259
260 case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
261 return yaml_emitter_emit_block_mapping_value(emitter, event, true)
262
263 case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
264 return yaml_emitter_emit_block_mapping_value(emitter, event, false)
265
266 case yaml_EMIT_END_STATE:
267 return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
268 }
269 panic("invalid emitter state")
270}
271
272// Expect STREAM-START.
273func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
274 if event.typ != yaml_STREAM_START_EVENT {
275 return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
276 }
277 if emitter.encoding == yaml_ANY_ENCODING {
278 emitter.encoding = event.encoding
279 if emitter.encoding == yaml_ANY_ENCODING {
280 emitter.encoding = yaml_UTF8_ENCODING
281 }
282 }
283 if emitter.best_indent < 2 || emitter.best_indent > 9 {
284 emitter.best_indent = 2
285 }
286 if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
287 emitter.best_width = 80
288 }
289 if emitter.best_width < 0 {
290 emitter.best_width = 1<<31 - 1
291 }
292 if emitter.line_break == yaml_ANY_BREAK {
293 emitter.line_break = yaml_LN_BREAK
294 }
295
296 emitter.indent = -1
297 emitter.line = 0
298 emitter.column = 0
299 emitter.whitespace = true
300 emitter.indention = true
301
302 if emitter.encoding != yaml_UTF8_ENCODING {
303 if !yaml_emitter_write_bom(emitter) {
304 return false
305 }
306 }
307 emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
308 return true
309}
310
311// Expect DOCUMENT-START or STREAM-END.
312func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
313
314 if event.typ == yaml_DOCUMENT_START_EVENT {
315
316 if event.version_directive != nil {
317 if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
318 return false
319 }
320 }
321
322 for i := 0; i < len(event.tag_directives); i++ {
323 tag_directive := &event.tag_directives[i]
324 if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
325 return false
326 }
327 if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
328 return false
329 }
330 }
331
332 for i := 0; i < len(default_tag_directives); i++ {
333 tag_directive := &default_tag_directives[i]
334 if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
335 return false
336 }
337 }
338
339 implicit := event.implicit
340 if !first || emitter.canonical {
341 implicit = false
342 }
343
344 if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
345 if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
346 return false
347 }
348 if !yaml_emitter_write_indent(emitter) {
349 return false
350 }
351 }
352
353 if event.version_directive != nil {
354 implicit = false
355 if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
356 return false
357 }
358 if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
359 return false
360 }
361 if !yaml_emitter_write_indent(emitter) {
362 return false
363 }
364 }
365
366 if len(event.tag_directives) > 0 {
367 implicit = false
368 for i := 0; i < len(event.tag_directives); i++ {
369 tag_directive := &event.tag_directives[i]
370 if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
371 return false
372 }
373 if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
374 return false
375 }
376 if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
377 return false
378 }
379 if !yaml_emitter_write_indent(emitter) {
380 return false
381 }
382 }
383 }
384
385 if yaml_emitter_check_empty_document(emitter) {
386 implicit = false
387 }
388 if !implicit {
389 if !yaml_emitter_write_indent(emitter) {
390 return false
391 }
392 if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
393 return false
394 }
395 if emitter.canonical {
396 if !yaml_emitter_write_indent(emitter) {
397 return false
398 }
399 }
400 }
401
402 emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
403 return true
404 }
405
406 if event.typ == yaml_STREAM_END_EVENT {
407 if emitter.open_ended {
408 if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
409 return false
410 }
411 if !yaml_emitter_write_indent(emitter) {
412 return false
413 }
414 }
415 if !yaml_emitter_flush(emitter) {
416 return false
417 }
418 emitter.state = yaml_EMIT_END_STATE
419 return true
420 }
421
422 return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
423}
424
425// Expect the root node.
426func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
427 emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
428 return yaml_emitter_emit_node(emitter, event, true, false, false, false)
429}
430
431// Expect DOCUMENT-END.
432func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
433 if event.typ != yaml_DOCUMENT_END_EVENT {
434 return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
435 }
436 if !yaml_emitter_write_indent(emitter) {
437 return false
438 }
439 if !event.implicit {
440 // [Go] Allocate the slice elsewhere.
441 if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
442 return false
443 }
444 if !yaml_emitter_write_indent(emitter) {
445 return false
446 }
447 }
448 if !yaml_emitter_flush(emitter) {
449 return false
450 }
451 emitter.state = yaml_EMIT_DOCUMENT_START_STATE
452 emitter.tag_directives = emitter.tag_directives[:0]
453 return true
454}
455
456// Expect a flow item node.
457func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
458 if first {
459 if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
460 return false
461 }
462 if !yaml_emitter_increase_indent(emitter, true, false) {
463 return false
464 }
465 emitter.flow_level++
466 }
467
468 if event.typ == yaml_SEQUENCE_END_EVENT {
469 emitter.flow_level--
470 emitter.indent = emitter.indents[len(emitter.indents)-1]
471 emitter.indents = emitter.indents[:len(emitter.indents)-1]
472 if emitter.canonical && !first {
473 if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
474 return false
475 }
476 if !yaml_emitter_write_indent(emitter) {
477 return false
478 }
479 }
480 if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
481 return false
482 }
483 emitter.state = emitter.states[len(emitter.states)-1]
484 emitter.states = emitter.states[:len(emitter.states)-1]
485
486 return true
487 }
488
489 if !first {
490 if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
491 return false
492 }
493 }
494
495 if emitter.canonical || emitter.column > emitter.best_width {
496 if !yaml_emitter_write_indent(emitter) {
497 return false
498 }
499 }
500 emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
501 return yaml_emitter_emit_node(emitter, event, false, true, false, false)
502}
503
504// Expect a flow key node.
505func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
506 if first {
507 if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
508 return false
509 }
510 if !yaml_emitter_increase_indent(emitter, true, false) {
511 return false
512 }
513 emitter.flow_level++
514 }
515
516 if event.typ == yaml_MAPPING_END_EVENT {
517 emitter.flow_level--
518 emitter.indent = emitter.indents[len(emitter.indents)-1]
519 emitter.indents = emitter.indents[:len(emitter.indents)-1]
520 if emitter.canonical && !first {
521 if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
522 return false
523 }
524 if !yaml_emitter_write_indent(emitter) {
525 return false
526 }
527 }
528 if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
529 return false
530 }
531 emitter.state = emitter.states[len(emitter.states)-1]
532 emitter.states = emitter.states[:len(emitter.states)-1]
533 return true
534 }
535
536 if !first {
537 if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
538 return false
539 }
540 }
541 if emitter.canonical || emitter.column > emitter.best_width {
542 if !yaml_emitter_write_indent(emitter) {
543 return false
544 }
545 }
546
547 if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
548 emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
549 return yaml_emitter_emit_node(emitter, event, false, false, true, true)
550 }
551 if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
552 return false
553 }
554 emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
555 return yaml_emitter_emit_node(emitter, event, false, false, true, false)
556}
557
558// Expect a flow value node.
559func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
560 if simple {
561 if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
562 return false
563 }
564 } else {
565 if emitter.canonical || emitter.column > emitter.best_width {
566 if !yaml_emitter_write_indent(emitter) {
567 return false
568 }
569 }
570 if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
571 return false
572 }
573 }
574 emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
575 return yaml_emitter_emit_node(emitter, event, false, false, true, false)
576}
577
578// Expect a block item node.
579func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
580 if first {
581 if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
582 return false
583 }
584 }
585 if event.typ == yaml_SEQUENCE_END_EVENT {
586 emitter.indent = emitter.indents[len(emitter.indents)-1]
587 emitter.indents = emitter.indents[:len(emitter.indents)-1]
588 emitter.state = emitter.states[len(emitter.states)-1]
589 emitter.states = emitter.states[:len(emitter.states)-1]
590 return true
591 }
592 if !yaml_emitter_write_indent(emitter) {
593 return false
594 }
595 if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
596 return false
597 }
598 emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
599 return yaml_emitter_emit_node(emitter, event, false, true, false, false)
600}
601
602// Expect a block key node.
603func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
604 if first {
605 if !yaml_emitter_increase_indent(emitter, false, false) {
606 return false
607 }
608 }
609 if event.typ == yaml_MAPPING_END_EVENT {
610 emitter.indent = emitter.indents[len(emitter.indents)-1]
611 emitter.indents = emitter.indents[:len(emitter.indents)-1]
612 emitter.state = emitter.states[len(emitter.states)-1]
613 emitter.states = emitter.states[:len(emitter.states)-1]
614 return true
615 }
616 if !yaml_emitter_write_indent(emitter) {
617 return false
618 }
619 if yaml_emitter_check_simple_key(emitter) {
620 emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
621 return yaml_emitter_emit_node(emitter, event, false, false, true, true)
622 }
623 if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
624 return false
625 }
626 emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
627 return yaml_emitter_emit_node(emitter, event, false, false, true, false)
628}
629
630// Expect a block value node.
631func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
632 if simple {
633 if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
634 return false
635 }
636 } else {
637 if !yaml_emitter_write_indent(emitter) {
638 return false
639 }
640 if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
641 return false
642 }
643 }
644 emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
645 return yaml_emitter_emit_node(emitter, event, false, false, true, false)
646}
647
648// Expect a node.
649func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
650 root bool, sequence bool, mapping bool, simple_key bool) bool {
651
652 emitter.root_context = root
653 emitter.sequence_context = sequence
654 emitter.mapping_context = mapping
655 emitter.simple_key_context = simple_key
656
657 switch event.typ {
658 case yaml_ALIAS_EVENT:
659 return yaml_emitter_emit_alias(emitter, event)
660 case yaml_SCALAR_EVENT:
661 return yaml_emitter_emit_scalar(emitter, event)
662 case yaml_SEQUENCE_START_EVENT:
663 return yaml_emitter_emit_sequence_start(emitter, event)
664 case yaml_MAPPING_START_EVENT:
665 return yaml_emitter_emit_mapping_start(emitter, event)
666 default:
667 return yaml_emitter_set_emitter_error(emitter,
668 fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
669 }
670}
671
672// Expect ALIAS.
673func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
674 if !yaml_emitter_process_anchor(emitter) {
675 return false
676 }
677 emitter.state = emitter.states[len(emitter.states)-1]
678 emitter.states = emitter.states[:len(emitter.states)-1]
679 return true
680}
681
682// Expect SCALAR.
683func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
684 if !yaml_emitter_select_scalar_style(emitter, event) {
685 return false
686 }
687 if !yaml_emitter_process_anchor(emitter) {
688 return false
689 }
690 if !yaml_emitter_process_tag(emitter) {
691 return false
692 }
693 if !yaml_emitter_increase_indent(emitter, true, false) {
694 return false
695 }
696 if !yaml_emitter_process_scalar(emitter) {
697 return false
698 }
699 emitter.indent = emitter.indents[len(emitter.indents)-1]
700 emitter.indents = emitter.indents[:len(emitter.indents)-1]
701 emitter.state = emitter.states[len(emitter.states)-1]
702 emitter.states = emitter.states[:len(emitter.states)-1]
703 return true
704}
705
706// Expect SEQUENCE-START.
707func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
708 if !yaml_emitter_process_anchor(emitter) {
709 return false
710 }
711 if !yaml_emitter_process_tag(emitter) {
712 return false
713 }
714 if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
715 yaml_emitter_check_empty_sequence(emitter) {
716 emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
717 } else {
718 emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
719 }
720 return true
721}
722
723// Expect MAPPING-START.
724func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
725 if !yaml_emitter_process_anchor(emitter) {
726 return false
727 }
728 if !yaml_emitter_process_tag(emitter) {
729 return false
730 }
731 if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
732 yaml_emitter_check_empty_mapping(emitter) {
733 emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
734 } else {
735 emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
736 }
737 return true
738}
739
740// Check if the document content is an empty scalar.
741func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
742 return false // [Go] Huh?
743}
744
745// Check if the next events represent an empty sequence.
746func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
747 if len(emitter.events)-emitter.events_head < 2 {
748 return false
749 }
750 return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
751 emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
752}
753
754// Check if the next events represent an empty mapping.
755func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
756 if len(emitter.events)-emitter.events_head < 2 {
757 return false
758 }
759 return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
760 emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
761}
762
763// Check if the next node can be expressed as a simple key.
764func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
765 length := 0
766 switch emitter.events[emitter.events_head].typ {
767 case yaml_ALIAS_EVENT:
768 length += len(emitter.anchor_data.anchor)
769 case yaml_SCALAR_EVENT:
770 if emitter.scalar_data.multiline {
771 return false
772 }
773 length += len(emitter.anchor_data.anchor) +
774 len(emitter.tag_data.handle) +
775 len(emitter.tag_data.suffix) +
776 len(emitter.scalar_data.value)
777 case yaml_SEQUENCE_START_EVENT:
778 if !yaml_emitter_check_empty_sequence(emitter) {
779 return false
780 }
781 length += len(emitter.anchor_data.anchor) +
782 len(emitter.tag_data.handle) +
783 len(emitter.tag_data.suffix)
784 case yaml_MAPPING_START_EVENT:
785 if !yaml_emitter_check_empty_mapping(emitter) {
786 return false
787 }
788 length += len(emitter.anchor_data.anchor) +
789 len(emitter.tag_data.handle) +
790 len(emitter.tag_data.suffix)
791 default:
792 return false
793 }
794 return length <= 128
795}
796
797// Determine an acceptable scalar style.
798func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
799
800 no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
801 if no_tag && !event.implicit && !event.quoted_implicit {
802 return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
803 }
804
805 style := event.scalar_style()
806 if style == yaml_ANY_SCALAR_STYLE {
807 style = yaml_PLAIN_SCALAR_STYLE
808 }
809 if emitter.canonical {
810 style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
811 }
812 if emitter.simple_key_context && emitter.scalar_data.multiline {
813 style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
814 }
815
816 if style == yaml_PLAIN_SCALAR_STYLE {
817 if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
818 emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
819 style = yaml_SINGLE_QUOTED_SCALAR_STYLE
820 }
821 if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
822 style = yaml_SINGLE_QUOTED_SCALAR_STYLE
823 }
824 if no_tag && !event.implicit {
825 style = yaml_SINGLE_QUOTED_SCALAR_STYLE
826 }
827 }
828 if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
829 if !emitter.scalar_data.single_quoted_allowed {
830 style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
831 }
832 }
833 if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
834 if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
835 style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
836 }
837 }
838
839 if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
840 emitter.tag_data.handle = []byte{'!'}
841 }
842 emitter.scalar_data.style = style
843 return true
844}
845
846// Write an anchor.
847func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
848 if emitter.anchor_data.anchor == nil {
849 return true
850 }
851 c := []byte{'&'}
852 if emitter.anchor_data.alias {
853 c[0] = '*'
854 }
855 if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
856 return false
857 }
858 return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
859}
860
861// Write a tag.
862func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
863 if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
864 return true
865 }
866 if len(emitter.tag_data.handle) > 0 {
867 if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
868 return false
869 }
870 if len(emitter.tag_data.suffix) > 0 {
871 if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
872 return false
873 }
874 }
875 } else {
876 // [Go] Allocate these slices elsewhere.
877 if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
878 return false
879 }
880 if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
881 return false
882 }
883 if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
884 return false
885 }
886 }
887 return true
888}
889
890// Write a scalar.
891func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
892 switch emitter.scalar_data.style {
893 case yaml_PLAIN_SCALAR_STYLE:
894 return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
895
896 case yaml_SINGLE_QUOTED_SCALAR_STYLE:
897 return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
898
899 case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
900 return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
901
902 case yaml_LITERAL_SCALAR_STYLE:
903 return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
904
905 case yaml_FOLDED_SCALAR_STYLE:
906 return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
907 }
908 panic("unknown scalar style")
909}
910
911// Check if a %YAML directive is valid.
912func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
913 if version_directive.major != 1 || version_directive.minor != 1 {
914 return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
915 }
916 return true
917}
918
919// Check if a %TAG directive is valid.
920func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
921 handle := tag_directive.handle
922 prefix := tag_directive.prefix
923 if len(handle) == 0 {
924 return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
925 }
926 if handle[0] != '!' {
927 return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
928 }
929 if handle[len(handle)-1] != '!' {
930 return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
931 }
932 for i := 1; i < len(handle)-1; i += width(handle[i]) {
933 if !is_alpha(handle, i) {
934 return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
935 }
936 }
937 if len(prefix) == 0 {
938 return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
939 }
940 return true
941}
942
943// Check if an anchor is valid.
944func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
945 if len(anchor) == 0 {
946 problem := "anchor value must not be empty"
947 if alias {
948 problem = "alias value must not be empty"
949 }
950 return yaml_emitter_set_emitter_error(emitter, problem)
951 }
952 for i := 0; i < len(anchor); i += width(anchor[i]) {
953 if !is_alpha(anchor, i) {
954 problem := "anchor value must contain alphanumerical characters only"
955 if alias {
956 problem = "alias value must contain alphanumerical characters only"
957 }
958 return yaml_emitter_set_emitter_error(emitter, problem)
959 }
960 }
961 emitter.anchor_data.anchor = anchor
962 emitter.anchor_data.alias = alias
963 return true
964}
965
966// Check if a tag is valid.
967func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
968 if len(tag) == 0 {
969 return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
970 }
971 for i := 0; i < len(emitter.tag_directives); i++ {
972 tag_directive := &emitter.tag_directives[i]
973 if bytes.HasPrefix(tag, tag_directive.prefix) {
974 emitter.tag_data.handle = tag_directive.handle
975 emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
976 return true
977 }
978 }
979 emitter.tag_data.suffix = tag
980 return true
981}
982
983// Check if a scalar is valid.
984func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
985 var (
986 block_indicators = false
987 flow_indicators = false
988 line_breaks = false
989 special_characters = false
990
991 leading_space = false
992 leading_break = false
993 trailing_space = false
994 trailing_break = false
995 break_space = false
996 space_break = false
997
998 preceded_by_whitespace = false
999 followed_by_whitespace = false
1000 previous_space = false
1001 previous_break = false
1002 )
1003
1004 emitter.scalar_data.value = value
1005
1006 if len(value) == 0 {
1007 emitter.scalar_data.multiline = false
1008 emitter.scalar_data.flow_plain_allowed = false
1009 emitter.scalar_data.block_plain_allowed = true
1010 emitter.scalar_data.single_quoted_allowed = true
1011 emitter.scalar_data.block_allowed = false
1012 return true
1013 }
1014
1015 if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
1016 block_indicators = true
1017 flow_indicators = true
1018 }
1019
1020 preceded_by_whitespace = true
1021 for i, w := 0, 0; i < len(value); i += w {
1022 w = width(value[i])
1023 followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
1024
1025 if i == 0 {
1026 switch value[i] {
1027 case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
1028 flow_indicators = true
1029 block_indicators = true
1030 case '?', ':':
1031 flow_indicators = true
1032 if followed_by_whitespace {
1033 block_indicators = true
1034 }
1035 case '-':
1036 if followed_by_whitespace {
1037 flow_indicators = true
1038 block_indicators = true
1039 }
1040 }
1041 } else {
1042 switch value[i] {
1043 case ',', '?', '[', ']', '{', '}':
1044 flow_indicators = true
1045 case ':':
1046 flow_indicators = true
1047 if followed_by_whitespace {
1048 block_indicators = true
1049 }
1050 case '#':
1051 if preceded_by_whitespace {
1052 flow_indicators = true
1053 block_indicators = true
1054 }
1055 }
1056 }
1057
1058 if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
1059 special_characters = true
1060 }
1061 if is_space(value, i) {
1062 if i == 0 {
1063 leading_space = true
1064 }
1065 if i+width(value[i]) == len(value) {
1066 trailing_space = true
1067 }
1068 if previous_break {
1069 break_space = true
1070 }
1071 previous_space = true
1072 previous_break = false
1073 } else if is_break(value, i) {
1074 line_breaks = true
1075 if i == 0 {
1076 leading_break = true
1077 }
1078 if i+width(value[i]) == len(value) {
1079 trailing_break = true
1080 }
1081 if previous_space {
1082 space_break = true
1083 }
1084 previous_space = false
1085 previous_break = true
1086 } else {
1087 previous_space = false
1088 previous_break = false
1089 }
1090
1091 // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
1092 preceded_by_whitespace = is_blankz(value, i)
1093 }
1094
1095 emitter.scalar_data.multiline = line_breaks
1096 emitter.scalar_data.flow_plain_allowed = true
1097 emitter.scalar_data.block_plain_allowed = true
1098 emitter.scalar_data.single_quoted_allowed = true
1099 emitter.scalar_data.block_allowed = true
1100
1101 if leading_space || leading_break || trailing_space || trailing_break {
1102 emitter.scalar_data.flow_plain_allowed = false
1103 emitter.scalar_data.block_plain_allowed = false
1104 }
1105 if trailing_space {
1106 emitter.scalar_data.block_allowed = false
1107 }
1108 if break_space {
1109 emitter.scalar_data.flow_plain_allowed = false
1110 emitter.scalar_data.block_plain_allowed = false
1111 emitter.scalar_data.single_quoted_allowed = false
1112 }
1113 if space_break || special_characters {
1114 emitter.scalar_data.flow_plain_allowed = false
1115 emitter.scalar_data.block_plain_allowed = false
1116 emitter.scalar_data.single_quoted_allowed = false
1117 emitter.scalar_data.block_allowed = false
1118 }
1119 if line_breaks {
1120 emitter.scalar_data.flow_plain_allowed = false
1121 emitter.scalar_data.block_plain_allowed = false
1122 }
1123 if flow_indicators {
1124 emitter.scalar_data.flow_plain_allowed = false
1125 }
1126 if block_indicators {
1127 emitter.scalar_data.block_plain_allowed = false
1128 }
1129 return true
1130}
1131
1132// Check if the event data is valid.
1133func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
1134
1135 emitter.anchor_data.anchor = nil
1136 emitter.tag_data.handle = nil
1137 emitter.tag_data.suffix = nil
1138 emitter.scalar_data.value = nil
1139
1140 switch event.typ {
1141 case yaml_ALIAS_EVENT:
1142 if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
1143 return false
1144 }
1145
1146 case yaml_SCALAR_EVENT:
1147 if len(event.anchor) > 0 {
1148 if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
1149 return false
1150 }
1151 }
1152 if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
1153 if !yaml_emitter_analyze_tag(emitter, event.tag) {
1154 return false
1155 }
1156 }
1157 if !yaml_emitter_analyze_scalar(emitter, event.value) {
1158 return false
1159 }
1160
1161 case yaml_SEQUENCE_START_EVENT:
1162 if len(event.anchor) > 0 {
1163 if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
1164 return false
1165 }
1166 }
1167 if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
1168 if !yaml_emitter_analyze_tag(emitter, event.tag) {
1169 return false
1170 }
1171 }
1172
1173 case yaml_MAPPING_START_EVENT:
1174 if len(event.anchor) > 0 {
1175 if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
1176 return false
1177 }
1178 }
1179 if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
1180 if !yaml_emitter_analyze_tag(emitter, event.tag) {
1181 return false
1182 }
1183 }
1184 }
1185 return true
1186}
1187
1188// Write the BOM character.
1189func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
1190 if !flush(emitter) {
1191 return false
1192 }
1193 pos := emitter.buffer_pos
1194 emitter.buffer[pos+0] = '\xEF'
1195 emitter.buffer[pos+1] = '\xBB'
1196 emitter.buffer[pos+2] = '\xBF'
1197 emitter.buffer_pos += 3
1198 return true
1199}
1200
1201func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
1202 indent := emitter.indent
1203 if indent < 0 {
1204 indent = 0
1205 }
1206 if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
1207 if !put_break(emitter) {
1208 return false
1209 }
1210 }
1211 for emitter.column < indent {
1212 if !put(emitter, ' ') {
1213 return false
1214 }
1215 }
1216 emitter.whitespace = true
1217 emitter.indention = true
1218 return true
1219}
1220
1221func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
1222 if need_whitespace && !emitter.whitespace {
1223 if !put(emitter, ' ') {
1224 return false
1225 }
1226 }
1227 if !write_all(emitter, indicator) {
1228 return false
1229 }
1230 emitter.whitespace = is_whitespace
1231 emitter.indention = (emitter.indention && is_indention)
1232 emitter.open_ended = false
1233 return true
1234}
1235
1236func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
1237 if !write_all(emitter, value) {
1238 return false
1239 }
1240 emitter.whitespace = false
1241 emitter.indention = false
1242 return true
1243}
1244
1245func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
1246 if !emitter.whitespace {
1247 if !put(emitter, ' ') {
1248 return false
1249 }
1250 }
1251 if !write_all(emitter, value) {
1252 return false
1253 }
1254 emitter.whitespace = false
1255 emitter.indention = false
1256 return true
1257}
1258
1259func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
1260 if need_whitespace && !emitter.whitespace {
1261 if !put(emitter, ' ') {
1262 return false
1263 }
1264 }
1265 for i := 0; i < len(value); {
1266 var must_write bool
1267 switch value[i] {
1268 case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
1269 must_write = true
1270 default:
1271 must_write = is_alpha(value, i)
1272 }
1273 if must_write {
1274 if !write(emitter, value, &i) {
1275 return false
1276 }
1277 } else {
1278 w := width(value[i])
1279 for k := 0; k < w; k++ {
1280 octet := value[i]
1281 i++
1282 if !put(emitter, '%') {
1283 return false
1284 }
1285
1286 c := octet >> 4
1287 if c < 10 {
1288 c += '0'
1289 } else {
1290 c += 'A' - 10
1291 }
1292 if !put(emitter, c) {
1293 return false
1294 }
1295
1296 c = octet & 0x0f
1297 if c < 10 {
1298 c += '0'
1299 } else {
1300 c += 'A' - 10
1301 }
1302 if !put(emitter, c) {
1303 return false
1304 }
1305 }
1306 }
1307 }
1308 emitter.whitespace = false
1309 emitter.indention = false
1310 return true
1311}
1312
1313func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
1314 if !emitter.whitespace {
1315 if !put(emitter, ' ') {
1316 return false
1317 }
1318 }
1319
1320 spaces := false
1321 breaks := false
1322 for i := 0; i < len(value); {
1323 if is_space(value, i) {
1324 if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
1325 if !yaml_emitter_write_indent(emitter) {
1326 return false
1327 }
1328 i += width(value[i])
1329 } else {
1330 if !write(emitter, value, &i) {
1331 return false
1332 }
1333 }
1334 spaces = true
1335 } else if is_break(value, i) {
1336 if !breaks && value[i] == '\n' {
1337 if !put_break(emitter) {
1338 return false
1339 }
1340 }
1341 if !write_break(emitter, value, &i) {
1342 return false
1343 }
1344 emitter.indention = true
1345 breaks = true
1346 } else {
1347 if breaks {
1348 if !yaml_emitter_write_indent(emitter) {
1349 return false
1350 }
1351 }
1352 if !write(emitter, value, &i) {
1353 return false
1354 }
1355 emitter.indention = false
1356 spaces = false
1357 breaks = false
1358 }
1359 }
1360
1361 emitter.whitespace = false
1362 emitter.indention = false
1363 if emitter.root_context {
1364 emitter.open_ended = true
1365 }
1366
1367 return true
1368}
1369
1370func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
1371
1372 if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
1373 return false
1374 }
1375
1376 spaces := false
1377 breaks := false
1378 for i := 0; i < len(value); {
1379 if is_space(value, i) {
1380 if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
1381 if !yaml_emitter_write_indent(emitter) {
1382 return false
1383 }
1384 i += width(value[i])
1385 } else {
1386 if !write(emitter, value, &i) {
1387 return false
1388 }
1389 }
1390 spaces = true
1391 } else if is_break(value, i) {
1392 if !breaks && value[i] == '\n' {
1393 if !put_break(emitter) {
1394 return false
1395 }
1396 }
1397 if !write_break(emitter, value, &i) {
1398 return false
1399 }
1400 emitter.indention = true
1401 breaks = true
1402 } else {
1403 if breaks {
1404 if !yaml_emitter_write_indent(emitter) {
1405 return false
1406 }
1407 }
1408 if value[i] == '\'' {
1409 if !put(emitter, '\'') {
1410 return false
1411 }
1412 }
1413 if !write(emitter, value, &i) {
1414 return false
1415 }
1416 emitter.indention = false
1417 spaces = false
1418 breaks = false
1419 }
1420 }
1421 if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
1422 return false
1423 }
1424 emitter.whitespace = false
1425 emitter.indention = false
1426 return true
1427}
1428
1429func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
1430 spaces := false
1431 if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
1432 return false
1433 }
1434
1435 for i := 0; i < len(value); {
1436 if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
1437 is_bom(value, i) || is_break(value, i) ||
1438 value[i] == '"' || value[i] == '\\' {
1439
1440 octet := value[i]
1441
1442 var w int
1443 var v rune
1444 switch {
1445 case octet&0x80 == 0x00:
1446 w, v = 1, rune(octet&0x7F)
1447 case octet&0xE0 == 0xC0:
1448 w, v = 2, rune(octet&0x1F)
1449 case octet&0xF0 == 0xE0:
1450 w, v = 3, rune(octet&0x0F)
1451 case octet&0xF8 == 0xF0:
1452 w, v = 4, rune(octet&0x07)
1453 }
1454 for k := 1; k < w; k++ {
1455 octet = value[i+k]
1456 v = (v << 6) + (rune(octet) & 0x3F)
1457 }
1458 i += w
1459
1460 if !put(emitter, '\\') {
1461 return false
1462 }
1463
1464 var ok bool
1465 switch v {
1466 case 0x00:
1467 ok = put(emitter, '0')
1468 case 0x07:
1469 ok = put(emitter, 'a')
1470 case 0x08:
1471 ok = put(emitter, 'b')
1472 case 0x09:
1473 ok = put(emitter, 't')
1474 case 0x0A:
1475 ok = put(emitter, 'n')
1476 case 0x0b:
1477 ok = put(emitter, 'v')
1478 case 0x0c:
1479 ok = put(emitter, 'f')
1480 case 0x0d:
1481 ok = put(emitter, 'r')
1482 case 0x1b:
1483 ok = put(emitter, 'e')
1484 case 0x22:
1485 ok = put(emitter, '"')
1486 case 0x5c:
1487 ok = put(emitter, '\\')
1488 case 0x85:
1489 ok = put(emitter, 'N')
1490 case 0xA0:
1491 ok = put(emitter, '_')
1492 case 0x2028:
1493 ok = put(emitter, 'L')
1494 case 0x2029:
1495 ok = put(emitter, 'P')
1496 default:
1497 if v <= 0xFF {
1498 ok = put(emitter, 'x')
1499 w = 2
1500 } else if v <= 0xFFFF {
1501 ok = put(emitter, 'u')
1502 w = 4
1503 } else {
1504 ok = put(emitter, 'U')
1505 w = 8
1506 }
1507 for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
1508 digit := byte((v >> uint(k)) & 0x0F)
1509 if digit < 10 {
1510 ok = put(emitter, digit+'0')
1511 } else {
1512 ok = put(emitter, digit+'A'-10)
1513 }
1514 }
1515 }
1516 if !ok {
1517 return false
1518 }
1519 spaces = false
1520 } else if is_space(value, i) {
1521 if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
1522 if !yaml_emitter_write_indent(emitter) {
1523 return false
1524 }
1525 if is_space(value, i+1) {
1526 if !put(emitter, '\\') {
1527 return false
1528 }
1529 }
1530 i += width(value[i])
1531 } else if !write(emitter, value, &i) {
1532 return false
1533 }
1534 spaces = true
1535 } else {
1536 if !write(emitter, value, &i) {
1537 return false
1538 }
1539 spaces = false
1540 }
1541 }
1542 if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
1543 return false
1544 }
1545 emitter.whitespace = false
1546 emitter.indention = false
1547 return true
1548}
1549
1550func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
1551 if is_space(value, 0) || is_break(value, 0) {
1552 indent_hint := []byte{'0' + byte(emitter.best_indent)}
1553 if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
1554 return false
1555 }
1556 }
1557
1558 emitter.open_ended = false
1559
1560 var chomp_hint [1]byte
1561 if len(value) == 0 {
1562 chomp_hint[0] = '-'
1563 } else {
1564 i := len(value) - 1
1565 for value[i]&0xC0 == 0x80 {
1566 i--
1567 }
1568 if !is_break(value, i) {
1569 chomp_hint[0] = '-'
1570 } else if i == 0 {
1571 chomp_hint[0] = '+'
1572 emitter.open_ended = true
1573 } else {
1574 i--
1575 for value[i]&0xC0 == 0x80 {
1576 i--
1577 }
1578 if is_break(value, i) {
1579 chomp_hint[0] = '+'
1580 emitter.open_ended = true
1581 }
1582 }
1583 }
1584 if chomp_hint[0] != 0 {
1585 if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
1586 return false
1587 }
1588 }
1589 return true
1590}
1591
1592func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
1593 if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
1594 return false
1595 }
1596 if !yaml_emitter_write_block_scalar_hints(emitter, value) {
1597 return false
1598 }
1599 if !put_break(emitter) {
1600 return false
1601 }
1602 emitter.indention = true
1603 emitter.whitespace = true
1604 breaks := true
1605 for i := 0; i < len(value); {
1606 if is_break(value, i) {
1607 if !write_break(emitter, value, &i) {
1608 return false
1609 }
1610 emitter.indention = true
1611 breaks = true
1612 } else {
1613 if breaks {
1614 if !yaml_emitter_write_indent(emitter) {
1615 return false
1616 }
1617 }
1618 if !write(emitter, value, &i) {
1619 return false
1620 }
1621 emitter.indention = false
1622 breaks = false
1623 }
1624 }
1625
1626 return true
1627}
1628
1629func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
1630 if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
1631 return false
1632 }
1633 if !yaml_emitter_write_block_scalar_hints(emitter, value) {
1634 return false
1635 }
1636
1637 if !put_break(emitter) {
1638 return false
1639 }
1640 emitter.indention = true
1641 emitter.whitespace = true
1642
1643 breaks := true
1644 leading_spaces := true
1645 for i := 0; i < len(value); {
1646 if is_break(value, i) {
1647 if !breaks && !leading_spaces && value[i] == '\n' {
1648 k := 0
1649 for is_break(value, k) {
1650 k += width(value[k])
1651 }
1652 if !is_blankz(value, k) {
1653 if !put_break(emitter) {
1654 return false
1655 }
1656 }
1657 }
1658 if !write_break(emitter, value, &i) {
1659 return false
1660 }
1661 emitter.indention = true
1662 breaks = true
1663 } else {
1664 if breaks {
1665 if !yaml_emitter_write_indent(emitter) {
1666 return false
1667 }
1668 leading_spaces = is_blank(value, i)
1669 }
1670 if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
1671 if !yaml_emitter_write_indent(emitter) {
1672 return false
1673 }
1674 i += width(value[i])
1675 } else {
1676 if !write(emitter, value, &i) {
1677 return false
1678 }
1679 }
1680 emitter.indention = false
1681 breaks = false
1682 }
1683 }
1684 return true
1685}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/encode.go b/vendor/github.com/zclconf/go-cty-yaml/encode.go
new file mode 100644
index 0000000..daa1478
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/encode.go
@@ -0,0 +1,189 @@
1package yaml
2
3import (
4 "bytes"
5 "fmt"
6 "strings"
7
8 "github.com/zclconf/go-cty/cty"
9)
10
11func (c *Converter) marshal(v cty.Value) ([]byte, error) {
12 var buf bytes.Buffer
13
14 e := &yaml_emitter_t{}
15 yaml_emitter_initialize(e)
16 yaml_emitter_set_output_writer(e, &buf)
17 yaml_emitter_set_unicode(e, true)
18
19 var evt yaml_event_t
20 yaml_stream_start_event_initialize(&evt, yaml_UTF8_ENCODING)
21 if !yaml_emitter_emit(e, &evt) {
22 return nil, emitterError(e)
23 }
24 yaml_document_start_event_initialize(&evt, nil, nil, true)
25 if !yaml_emitter_emit(e, &evt) {
26 return nil, emitterError(e)
27 }
28
29 if err := c.marshalEmit(v, e); err != nil {
30 return nil, err
31 }
32
33 yaml_document_end_event_initialize(&evt, true)
34 if !yaml_emitter_emit(e, &evt) {
35 return nil, emitterError(e)
36 }
37 yaml_stream_end_event_initialize(&evt)
38 if !yaml_emitter_emit(e, &evt) {
39 return nil, emitterError(e)
40 }
41
42 return buf.Bytes(), nil
43}
44
45func (c *Converter) marshalEmit(v cty.Value, e *yaml_emitter_t) error {
46 ty := v.Type()
47 switch {
48 case v.IsNull():
49 return c.marshalPrimitive(v, e)
50 case !v.IsKnown():
51 return fmt.Errorf("cannot serialize unknown value as YAML")
52 case ty.IsPrimitiveType():
53 return c.marshalPrimitive(v, e)
54 case ty.IsTupleType(), ty.IsListType(), ty.IsSetType():
55 return c.marshalSequence(v, e)
56 case ty.IsObjectType(), ty.IsMapType():
57 return c.marshalMapping(v, e)
58 default:
59 return fmt.Errorf("can't marshal %s as YAML", ty.FriendlyName())
60 }
61}
62
63func (c *Converter) marshalPrimitive(v cty.Value, e *yaml_emitter_t) error {
64 var evt yaml_event_t
65
66 if v.IsNull() {
67 yaml_scalar_event_initialize(
68 &evt,
69 nil,
70 nil,
71 []byte("null"),
72 true,
73 true,
74 yaml_PLAIN_SCALAR_STYLE,
75 )
76 if !yaml_emitter_emit(e, &evt) {
77 return emitterError(e)
78 }
79 return nil
80 }
81
82 switch v.Type() {
83 case cty.String:
84 str := v.AsString()
85 style := yaml_DOUBLE_QUOTED_SCALAR_STYLE
86 if strings.Contains(str, "\n") {
87 style = yaml_LITERAL_SCALAR_STYLE
88 }
89 yaml_scalar_event_initialize(
90 &evt,
91 nil,
92 nil,
93 []byte(str),
94 true,
95 true,
96 style,
97 )
98 case cty.Number:
99 str := v.AsBigFloat().Text('f', -1)
100 yaml_scalar_event_initialize(
101 &evt,
102 nil,
103 nil,
104 []byte(str),
105 true,
106 true,
107 yaml_PLAIN_SCALAR_STYLE,
108 )
109 case cty.Bool:
110 var str string
111 switch v {
112 case cty.True:
113 str = "true"
114 case cty.False:
115 str = "false"
116 }
117 yaml_scalar_event_initialize(
118 &evt,
119 nil,
120 nil,
121 []byte(str),
122 true,
123 true,
124 yaml_PLAIN_SCALAR_STYLE,
125 )
126 }
127 if !yaml_emitter_emit(e, &evt) {
128 return emitterError(e)
129 }
130 return nil
131}
132
133func (c *Converter) marshalSequence(v cty.Value, e *yaml_emitter_t) error {
134 style := yaml_BLOCK_SEQUENCE_STYLE
135 if c.encodeAsFlow {
136 style = yaml_FLOW_SEQUENCE_STYLE
137 }
138
139 var evt yaml_event_t
140 yaml_sequence_start_event_initialize(&evt, nil, nil, true, style)
141 if !yaml_emitter_emit(e, &evt) {
142 return emitterError(e)
143 }
144
145 for it := v.ElementIterator(); it.Next(); {
146 _, v := it.Element()
147 err := c.marshalEmit(v, e)
148 if err != nil {
149 return err
150 }
151 }
152
153 yaml_sequence_end_event_initialize(&evt)
154 if !yaml_emitter_emit(e, &evt) {
155 return emitterError(e)
156 }
157 return nil
158}
159
160func (c *Converter) marshalMapping(v cty.Value, e *yaml_emitter_t) error {
161 style := yaml_BLOCK_MAPPING_STYLE
162 if c.encodeAsFlow {
163 style = yaml_FLOW_MAPPING_STYLE
164 }
165
166 var evt yaml_event_t
167 yaml_mapping_start_event_initialize(&evt, nil, nil, true, style)
168 if !yaml_emitter_emit(e, &evt) {
169 return emitterError(e)
170 }
171
172 for it := v.ElementIterator(); it.Next(); {
173 k, v := it.Element()
174 err := c.marshalEmit(k, e)
175 if err != nil {
176 return err
177 }
178 err = c.marshalEmit(v, e)
179 if err != nil {
180 return err
181 }
182 }
183
184 yaml_mapping_end_event_initialize(&evt)
185 if !yaml_emitter_emit(e, &evt) {
186 return emitterError(e)
187 }
188 return nil
189}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/error.go b/vendor/github.com/zclconf/go-cty-yaml/error.go
new file mode 100644
index 0000000..ae41c48
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/error.go
@@ -0,0 +1,97 @@
1package yaml
2
3import (
4 "errors"
5 "fmt"
6)
7
8// Error is an error implementation used to report errors that correspond to
9// a particular position in an input buffer.
10type Error struct {
11 cause error
12 Line, Column int
13}
14
15func (e Error) Error() string {
16 return fmt.Sprintf("on line %d, column %d: %s", e.Line, e.Column, e.cause.Error())
17}
18
19// Cause is an implementation of the interface used by
20// github.com/pkg/errors.Cause, returning the underlying error without the
21// position information.
22func (e Error) Cause() error {
23 return e.cause
24}
25
26// WrappedErrors is an implementation of github.com/hashicorp/errwrap.Wrapper
27// returning the underlying error without the position information.
28func (e Error) WrappedErrors() []error {
29 return []error{e.cause}
30}
31
32func parserError(p *yaml_parser_t) error {
33 var cause error
34 if len(p.problem) > 0 {
35 cause = errors.New(p.problem)
36 } else {
37 cause = errors.New("invalid YAML syntax") // useless generic error, then
38 }
39
40 return parserErrorWrap(p, cause)
41}
42
43func parserErrorWrap(p *yaml_parser_t, cause error) error {
44 switch {
45 case p.problem_mark.line != 0:
46 line := p.problem_mark.line
47 column := p.problem_mark.column
48 // Scanner errors don't iterate line before returning error
49 if p.error == yaml_SCANNER_ERROR {
50 line++
51 column = 0
52 }
53 return Error{
54 cause: cause,
55 Line: line,
56 Column: column + 1,
57 }
58 case p.context_mark.line != 0:
59 return Error{
60 cause: cause,
61 Line: p.context_mark.line,
62 Column: p.context_mark.column + 1,
63 }
64 default:
65 return cause
66 }
67}
68
69func parserErrorf(p *yaml_parser_t, f string, vals ...interface{}) error {
70 return parserErrorWrap(p, fmt.Errorf(f, vals...))
71}
72
73func parseEventErrorWrap(evt *yaml_event_t, cause error) error {
74 if evt.start_mark.line == 0 {
75 // Event does not have a start mark, so we won't wrap the error at all
76 return cause
77 }
78 return Error{
79 cause: cause,
80 Line: evt.start_mark.line,
81 Column: evt.start_mark.column + 1,
82 }
83}
84
85func parseEventErrorf(evt *yaml_event_t, f string, vals ...interface{}) error {
86 return parseEventErrorWrap(evt, fmt.Errorf(f, vals...))
87}
88
89func emitterError(e *yaml_emitter_t) error {
90 var cause error
91 if len(e.problem) > 0 {
92 cause = errors.New(e.problem)
93 } else {
94 cause = errors.New("failed to write YAML token") // useless generic error, then
95 }
96 return cause
97}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.mod b/vendor/github.com/zclconf/go-cty-yaml/go.mod
new file mode 100644
index 0000000..3d52268
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/go.mod
@@ -0,0 +1,3 @@
1module github.com/zclconf/go-cty-yaml
2
3require github.com/zclconf/go-cty v1.0.0
diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.sum b/vendor/github.com/zclconf/go-cty-yaml/go.sum
new file mode 100644
index 0000000..841f7fc
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/go.sum
@@ -0,0 +1,18 @@
1github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
2github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
3github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
4github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
5github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
6github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
7github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
8github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
9github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
10github.com/zclconf/go-cty v1.0.0 h1:EWtv3gKe2wPLIB9hQRQJa7k/059oIfAqcEkCNnaVckk=
11github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
12golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
13golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
14golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
15golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
16google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
17gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
18gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go b/vendor/github.com/zclconf/go-cty-yaml/implied_type.go
new file mode 100644
index 0000000..5b7b068
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/implied_type.go
@@ -0,0 +1,268 @@
1package yaml
2
3import (
4 "errors"
5 "fmt"
6
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/convert"
9)
10
11func (c *Converter) impliedType(src []byte) (cty.Type, error) {
12 p := &yaml_parser_t{}
13 if !yaml_parser_initialize(p) {
14 return cty.NilType, errors.New("failed to initialize YAML parser")
15 }
16 if len(src) == 0 {
17 src = []byte{'\n'}
18 }
19
20 an := &typeAnalysis{
21 anchorsPending: map[string]int{},
22 anchorTypes: map[string]cty.Type{},
23 }
24
25 yaml_parser_set_input_string(p, src)
26
27 var evt yaml_event_t
28 if !yaml_parser_parse(p, &evt) {
29 return cty.NilType, parserError(p)
30 }
31 if evt.typ != yaml_STREAM_START_EVENT {
32 return cty.NilType, parseEventErrorf(&evt, "missing stream start token")
33 }
34 if !yaml_parser_parse(p, &evt) {
35 return cty.NilType, parserError(p)
36 }
37 if evt.typ != yaml_DOCUMENT_START_EVENT {
38 return cty.NilType, parseEventErrorf(&evt, "missing start of document")
39 }
40
41 ty, err := c.impliedTypeParse(an, p)
42 if err != nil {
43 return cty.NilType, err
44 }
45
46 if !yaml_parser_parse(p, &evt) {
47 return cty.NilType, parserError(p)
48 }
49 if evt.typ == yaml_DOCUMENT_START_EVENT {
50 return cty.NilType, parseEventErrorf(&evt, "only a single document is allowed")
51 }
52 if evt.typ != yaml_DOCUMENT_END_EVENT {
53 return cty.NilType, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String())
54 }
55 if !yaml_parser_parse(p, &evt) {
56 return cty.NilType, parserError(p)
57 }
58 if evt.typ != yaml_STREAM_END_EVENT {
59 return cty.NilType, parseEventErrorf(&evt, "unexpected extra content after value")
60 }
61
62 return ty, err
63}
64
65func (c *Converter) impliedTypeParse(an *typeAnalysis, p *yaml_parser_t) (cty.Type, error) {
66 var evt yaml_event_t
67 if !yaml_parser_parse(p, &evt) {
68 return cty.NilType, parserError(p)
69 }
70 return c.impliedTypeParseRemainder(an, &evt, p)
71}
72
73func (c *Converter) impliedTypeParseRemainder(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
74 switch evt.typ {
75 case yaml_SCALAR_EVENT:
76 return c.impliedTypeScalar(an, evt, p)
77 case yaml_ALIAS_EVENT:
78 return c.impliedTypeAlias(an, evt, p)
79 case yaml_MAPPING_START_EVENT:
80 return c.impliedTypeMapping(an, evt, p)
81 case yaml_SEQUENCE_START_EVENT:
82 return c.impliedTypeSequence(an, evt, p)
83 case yaml_DOCUMENT_START_EVENT:
84 return cty.NilType, parseEventErrorf(evt, "only a single document is allowed")
85 case yaml_STREAM_END_EVENT:
86 // Decoding an empty buffer, probably
87 return cty.NilType, parseEventErrorf(evt, "expecting value but found end of stream")
88 default:
89 // Should never happen; the above should be comprehensive
90 return cty.NilType, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String())
91 }
92}
93
94func (c *Converter) impliedTypeScalar(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
95 src := evt.value
96 tag := string(evt.tag)
97 anchor := string(evt.anchor)
98 implicit := evt.implicit
99
100 if len(anchor) > 0 {
101 an.beginAnchor(anchor)
102 }
103
104 var ty cty.Type
105 switch {
106 case tag == "" && !implicit:
107 // Untagged explicit string
108 ty = cty.String
109 default:
110 v, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style))
111 if err != nil {
112 return cty.NilType, parseEventErrorWrap(evt, err)
113 }
114 if v.RawEquals(mergeMappingVal) {
115 // In any context other than a mapping key, this is just a plain string
116 ty = cty.String
117 } else {
118 ty = v.Type()
119 }
120 }
121
122 if len(anchor) > 0 {
123 an.completeAnchor(anchor, ty)
124 }
125 return ty, nil
126}
127
128func (c *Converter) impliedTypeMapping(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
129 tag := string(evt.tag)
130 anchor := string(evt.anchor)
131
132 if tag != "" && tag != yaml_MAP_TAG {
133 return cty.NilType, parseEventErrorf(evt, "can't interpret mapping as %s", tag)
134 }
135
136 if anchor != "" {
137 an.beginAnchor(anchor)
138 }
139
140 atys := make(map[string]cty.Type)
141 for {
142 var nextEvt yaml_event_t
143 if !yaml_parser_parse(p, &nextEvt) {
144 return cty.NilType, parserError(p)
145 }
146 if nextEvt.typ == yaml_MAPPING_END_EVENT {
147 ty := cty.Object(atys)
148 if anchor != "" {
149 an.completeAnchor(anchor, ty)
150 }
151 return ty, nil
152 }
153
154 if nextEvt.typ != yaml_SCALAR_EVENT {
155 return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
156 }
157 keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style))
158 if err != nil {
159 return cty.NilType, err
160 }
161 if keyVal.RawEquals(mergeMappingVal) {
162 // Merging the value (which must be a mapping) into our mapping,
163 // then.
164 ty, err := c.impliedTypeParse(an, p)
165 if err != nil {
166 return cty.NilType, err
167 }
168 if !ty.IsObjectType() {
169 return cty.NilType, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName())
170 }
171 for name, aty := range ty.AttributeTypes() {
172 atys[name] = aty
173 }
174 continue
175 }
176 if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil {
177 keyVal = keyValStr
178 } else {
179 return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
180 }
181 if keyVal.IsNull() {
182 return cty.NilType, parseEventErrorf(&nextEvt, "mapping key cannot be null")
183 }
184 if !keyVal.IsKnown() {
185 return cty.NilType, parseEventErrorf(&nextEvt, "mapping key must be known")
186 }
187 valTy, err := c.impliedTypeParse(an, p)
188 if err != nil {
189 return cty.NilType, err
190 }
191
192 atys[keyVal.AsString()] = valTy
193 }
194}
195
196func (c *Converter) impliedTypeSequence(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
197 tag := string(evt.tag)
198 anchor := string(evt.anchor)
199
200 if tag != "" && tag != yaml_SEQ_TAG {
201 return cty.NilType, parseEventErrorf(evt, "can't interpret sequence as %s", tag)
202 }
203
204 if anchor != "" {
205 an.beginAnchor(anchor)
206 }
207
208 var atys []cty.Type
209 for {
210 var nextEvt yaml_event_t
211 if !yaml_parser_parse(p, &nextEvt) {
212 return cty.NilType, parserError(p)
213 }
214 if nextEvt.typ == yaml_SEQUENCE_END_EVENT {
215 ty := cty.Tuple(atys)
216 if anchor != "" {
217 an.completeAnchor(anchor, ty)
218 }
219 return ty, nil
220 }
221
222 valTy, err := c.impliedTypeParseRemainder(an, &nextEvt, p)
223 if err != nil {
224 return cty.NilType, err
225 }
226
227 atys = append(atys, valTy)
228 }
229}
230
231func (c *Converter) impliedTypeAlias(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
232 ty, err := an.anchorType(string(evt.anchor))
233 if err != nil {
234 err = parseEventErrorWrap(evt, err)
235 }
236 return ty, err
237}
238
239type typeAnalysis struct {
240 anchorsPending map[string]int
241 anchorTypes map[string]cty.Type
242}
243
244func (an *typeAnalysis) beginAnchor(name string) {
245 an.anchorsPending[name]++
246}
247
248func (an *typeAnalysis) completeAnchor(name string, ty cty.Type) {
249 an.anchorsPending[name]--
250 if an.anchorsPending[name] == 0 {
251 delete(an.anchorsPending, name)
252 }
253 an.anchorTypes[name] = ty
254}
255
256func (an *typeAnalysis) anchorType(name string) (cty.Type, error) {
257 if _, pending := an.anchorsPending[name]; pending {
258 // YAML normally allows self-referencing structures, but cty cannot
259 // represent them (it requires all structures to be finite) so we
260 // must fail here.
261 return cty.NilType, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name)
262 }
263 ty, ok := an.anchorTypes[name]
264 if !ok {
265 return cty.NilType, fmt.Errorf("reference to undefined anchor %q", name)
266 }
267 return ty, nil
268}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/parserc.go b/vendor/github.com/zclconf/go-cty-yaml/parserc.go
new file mode 100644
index 0000000..81d05df
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/parserc.go
@@ -0,0 +1,1095 @@
1package yaml
2
3import (
4 "bytes"
5)
6
7// The parser implements the following grammar:
8//
9// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
10// implicit_document ::= block_node DOCUMENT-END*
11// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
12// block_node_or_indentless_sequence ::=
13// ALIAS
14// | properties (block_content | indentless_block_sequence)?
15// | block_content
16// | indentless_block_sequence
17// block_node ::= ALIAS
18// | properties block_content?
19// | block_content
20// flow_node ::= ALIAS
21// | properties flow_content?
22// | flow_content
23// properties ::= TAG ANCHOR? | ANCHOR TAG?
24// block_content ::= block_collection | flow_collection | SCALAR
25// flow_content ::= flow_collection | SCALAR
26// block_collection ::= block_sequence | block_mapping
27// flow_collection ::= flow_sequence | flow_mapping
28// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
29// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
30// block_mapping ::= BLOCK-MAPPING_START
31// ((KEY block_node_or_indentless_sequence?)?
32// (VALUE block_node_or_indentless_sequence?)?)*
33// BLOCK-END
34// flow_sequence ::= FLOW-SEQUENCE-START
35// (flow_sequence_entry FLOW-ENTRY)*
36// flow_sequence_entry?
37// FLOW-SEQUENCE-END
38// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
39// flow_mapping ::= FLOW-MAPPING-START
40// (flow_mapping_entry FLOW-ENTRY)*
41// flow_mapping_entry?
42// FLOW-MAPPING-END
43// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
44
45// Peek the next token in the token queue.
46func peek_token(parser *yaml_parser_t) *yaml_token_t {
47 if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
48 return &parser.tokens[parser.tokens_head]
49 }
50 return nil
51}
52
53// Remove the next token from the queue (must be called after peek_token).
54func skip_token(parser *yaml_parser_t) {
55 parser.token_available = false
56 parser.tokens_parsed++
57 parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
58 parser.tokens_head++
59}
60
61// Get the next event.
62func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
63 // Erase the event object.
64 *event = yaml_event_t{}
65
66 // No events after the end of the stream or error.
67 if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
68 return true
69 }
70
71 // Generate the next event.
72 return yaml_parser_state_machine(parser, event)
73}
74
75// Set parser error.
76func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
77 parser.error = yaml_PARSER_ERROR
78 parser.problem = problem
79 parser.problem_mark = problem_mark
80 return false
81}
82
83func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
84 parser.error = yaml_PARSER_ERROR
85 parser.context = context
86 parser.context_mark = context_mark
87 parser.problem = problem
88 parser.problem_mark = problem_mark
89 return false
90}
91
92// State dispatcher.
93func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
94 //trace("yaml_parser_state_machine", "state:", parser.state.String())
95
96 switch parser.state {
97 case yaml_PARSE_STREAM_START_STATE:
98 return yaml_parser_parse_stream_start(parser, event)
99
100 case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
101 return yaml_parser_parse_document_start(parser, event, true)
102
103 case yaml_PARSE_DOCUMENT_START_STATE:
104 return yaml_parser_parse_document_start(parser, event, false)
105
106 case yaml_PARSE_DOCUMENT_CONTENT_STATE:
107 return yaml_parser_parse_document_content(parser, event)
108
109 case yaml_PARSE_DOCUMENT_END_STATE:
110 return yaml_parser_parse_document_end(parser, event)
111
112 case yaml_PARSE_BLOCK_NODE_STATE:
113 return yaml_parser_parse_node(parser, event, true, false)
114
115 case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
116 return yaml_parser_parse_node(parser, event, true, true)
117
118 case yaml_PARSE_FLOW_NODE_STATE:
119 return yaml_parser_parse_node(parser, event, false, false)
120
121 case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
122 return yaml_parser_parse_block_sequence_entry(parser, event, true)
123
124 case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
125 return yaml_parser_parse_block_sequence_entry(parser, event, false)
126
127 case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
128 return yaml_parser_parse_indentless_sequence_entry(parser, event)
129
130 case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
131 return yaml_parser_parse_block_mapping_key(parser, event, true)
132
133 case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
134 return yaml_parser_parse_block_mapping_key(parser, event, false)
135
136 case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
137 return yaml_parser_parse_block_mapping_value(parser, event)
138
139 case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
140 return yaml_parser_parse_flow_sequence_entry(parser, event, true)
141
142 case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
143 return yaml_parser_parse_flow_sequence_entry(parser, event, false)
144
145 case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
146 return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
147
148 case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
149 return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
150
151 case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
152 return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
153
154 case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
155 return yaml_parser_parse_flow_mapping_key(parser, event, true)
156
157 case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
158 return yaml_parser_parse_flow_mapping_key(parser, event, false)
159
160 case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
161 return yaml_parser_parse_flow_mapping_value(parser, event, false)
162
163 case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
164 return yaml_parser_parse_flow_mapping_value(parser, event, true)
165
166 default:
167 panic("invalid parser state")
168 }
169}
170
171// Parse the production:
172// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
173// ************
174func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
175 token := peek_token(parser)
176 if token == nil {
177 return false
178 }
179 if token.typ != yaml_STREAM_START_TOKEN {
180 return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
181 }
182 parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
183 *event = yaml_event_t{
184 typ: yaml_STREAM_START_EVENT,
185 start_mark: token.start_mark,
186 end_mark: token.end_mark,
187 encoding: token.encoding,
188 }
189 skip_token(parser)
190 return true
191}
192
193// Parse the productions:
194// implicit_document ::= block_node DOCUMENT-END*
195// *
196// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
197// *************************
198func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
199
200 token := peek_token(parser)
201 if token == nil {
202 return false
203 }
204
205 // Parse extra document end indicators.
206 if !implicit {
207 for token.typ == yaml_DOCUMENT_END_TOKEN {
208 skip_token(parser)
209 token = peek_token(parser)
210 if token == nil {
211 return false
212 }
213 }
214 }
215
216 if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
217 token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
218 token.typ != yaml_DOCUMENT_START_TOKEN &&
219 token.typ != yaml_STREAM_END_TOKEN {
220 // Parse an implicit document.
221 if !yaml_parser_process_directives(parser, nil, nil) {
222 return false
223 }
224 parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
225 parser.state = yaml_PARSE_BLOCK_NODE_STATE
226
227 *event = yaml_event_t{
228 typ: yaml_DOCUMENT_START_EVENT,
229 start_mark: token.start_mark,
230 end_mark: token.end_mark,
231 }
232
233 } else if token.typ != yaml_STREAM_END_TOKEN {
234 // Parse an explicit document.
235 var version_directive *yaml_version_directive_t
236 var tag_directives []yaml_tag_directive_t
237 start_mark := token.start_mark
238 if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
239 return false
240 }
241 token = peek_token(parser)
242 if token == nil {
243 return false
244 }
245 if token.typ != yaml_DOCUMENT_START_TOKEN {
246 yaml_parser_set_parser_error(parser,
247 "did not find expected <document start>", token.start_mark)
248 return false
249 }
250 parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
251 parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
252 end_mark := token.end_mark
253
254 *event = yaml_event_t{
255 typ: yaml_DOCUMENT_START_EVENT,
256 start_mark: start_mark,
257 end_mark: end_mark,
258 version_directive: version_directive,
259 tag_directives: tag_directives,
260 implicit: false,
261 }
262 skip_token(parser)
263
264 } else {
265 // Parse the stream end.
266 parser.state = yaml_PARSE_END_STATE
267 *event = yaml_event_t{
268 typ: yaml_STREAM_END_EVENT,
269 start_mark: token.start_mark,
270 end_mark: token.end_mark,
271 }
272 skip_token(parser)
273 }
274
275 return true
276}
277
278// Parse the productions:
279// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
280// ***********
281//
282func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
283 token := peek_token(parser)
284 if token == nil {
285 return false
286 }
287 if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
288 token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
289 token.typ == yaml_DOCUMENT_START_TOKEN ||
290 token.typ == yaml_DOCUMENT_END_TOKEN ||
291 token.typ == yaml_STREAM_END_TOKEN {
292 parser.state = parser.states[len(parser.states)-1]
293 parser.states = parser.states[:len(parser.states)-1]
294 return yaml_parser_process_empty_scalar(parser, event,
295 token.start_mark)
296 }
297 return yaml_parser_parse_node(parser, event, true, false)
298}
299
300// Parse the productions:
301// implicit_document ::= block_node DOCUMENT-END*
302// *************
303// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
304//
305func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
306 token := peek_token(parser)
307 if token == nil {
308 return false
309 }
310
311 start_mark := token.start_mark
312 end_mark := token.start_mark
313
314 implicit := true
315 if token.typ == yaml_DOCUMENT_END_TOKEN {
316 end_mark = token.end_mark
317 skip_token(parser)
318 implicit = false
319 }
320
321 parser.tag_directives = parser.tag_directives[:0]
322
323 parser.state = yaml_PARSE_DOCUMENT_START_STATE
324 *event = yaml_event_t{
325 typ: yaml_DOCUMENT_END_EVENT,
326 start_mark: start_mark,
327 end_mark: end_mark,
328 implicit: implicit,
329 }
330 return true
331}
332
333// Parse the productions:
334// block_node_or_indentless_sequence ::=
335// ALIAS
336// *****
337// | properties (block_content | indentless_block_sequence)?
338// ********** *
339// | block_content | indentless_block_sequence
340// *
341// block_node ::= ALIAS
342// *****
343// | properties block_content?
344// ********** *
345// | block_content
346// *
347// flow_node ::= ALIAS
348// *****
349// | properties flow_content?
350// ********** *
351// | flow_content
352// *
353// properties ::= TAG ANCHOR? | ANCHOR TAG?
354// *************************
355// block_content ::= block_collection | flow_collection | SCALAR
356// ******
357// flow_content ::= flow_collection | SCALAR
358// ******
359func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
360 //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
361
362 token := peek_token(parser)
363 if token == nil {
364 return false
365 }
366
367 if token.typ == yaml_ALIAS_TOKEN {
368 parser.state = parser.states[len(parser.states)-1]
369 parser.states = parser.states[:len(parser.states)-1]
370 *event = yaml_event_t{
371 typ: yaml_ALIAS_EVENT,
372 start_mark: token.start_mark,
373 end_mark: token.end_mark,
374 anchor: token.value,
375 }
376 skip_token(parser)
377 return true
378 }
379
380 start_mark := token.start_mark
381 end_mark := token.start_mark
382
383 var tag_token bool
384 var tag_handle, tag_suffix, anchor []byte
385 var tag_mark yaml_mark_t
386 if token.typ == yaml_ANCHOR_TOKEN {
387 anchor = token.value
388 start_mark = token.start_mark
389 end_mark = token.end_mark
390 skip_token(parser)
391 token = peek_token(parser)
392 if token == nil {
393 return false
394 }
395 if token.typ == yaml_TAG_TOKEN {
396 tag_token = true
397 tag_handle = token.value
398 tag_suffix = token.suffix
399 tag_mark = token.start_mark
400 end_mark = token.end_mark
401 skip_token(parser)
402 token = peek_token(parser)
403 if token == nil {
404 return false
405 }
406 }
407 } else if token.typ == yaml_TAG_TOKEN {
408 tag_token = true
409 tag_handle = token.value
410 tag_suffix = token.suffix
411 start_mark = token.start_mark
412 tag_mark = token.start_mark
413 end_mark = token.end_mark
414 skip_token(parser)
415 token = peek_token(parser)
416 if token == nil {
417 return false
418 }
419 if token.typ == yaml_ANCHOR_TOKEN {
420 anchor = token.value
421 end_mark = token.end_mark
422 skip_token(parser)
423 token = peek_token(parser)
424 if token == nil {
425 return false
426 }
427 }
428 }
429
430 var tag []byte
431 if tag_token {
432 if len(tag_handle) == 0 {
433 tag = tag_suffix
434 tag_suffix = nil
435 } else {
436 for i := range parser.tag_directives {
437 if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
438 tag = append([]byte(nil), parser.tag_directives[i].prefix...)
439 tag = append(tag, tag_suffix...)
440 break
441 }
442 }
443 if len(tag) == 0 {
444 yaml_parser_set_parser_error_context(parser,
445 "while parsing a node", start_mark,
446 "found undefined tag handle", tag_mark)
447 return false
448 }
449 }
450 }
451
452 implicit := len(tag) == 0
453 if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
454 end_mark = token.end_mark
455 parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
456 *event = yaml_event_t{
457 typ: yaml_SEQUENCE_START_EVENT,
458 start_mark: start_mark,
459 end_mark: end_mark,
460 anchor: anchor,
461 tag: tag,
462 implicit: implicit,
463 style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
464 }
465 return true
466 }
467 if token.typ == yaml_SCALAR_TOKEN {
468 var plain_implicit, quoted_implicit bool
469 end_mark = token.end_mark
470 if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
471 plain_implicit = true
472 } else if len(tag) == 0 {
473 quoted_implicit = true
474 }
475 parser.state = parser.states[len(parser.states)-1]
476 parser.states = parser.states[:len(parser.states)-1]
477
478 *event = yaml_event_t{
479 typ: yaml_SCALAR_EVENT,
480 start_mark: start_mark,
481 end_mark: end_mark,
482 anchor: anchor,
483 tag: tag,
484 value: token.value,
485 implicit: plain_implicit,
486 quoted_implicit: quoted_implicit,
487 style: yaml_style_t(token.style),
488 }
489 skip_token(parser)
490 return true
491 }
492 if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
493 // [Go] Some of the events below can be merged as they differ only on style.
494 end_mark = token.end_mark
495 parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
496 *event = yaml_event_t{
497 typ: yaml_SEQUENCE_START_EVENT,
498 start_mark: start_mark,
499 end_mark: end_mark,
500 anchor: anchor,
501 tag: tag,
502 implicit: implicit,
503 style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
504 }
505 return true
506 }
507 if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
508 end_mark = token.end_mark
509 parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
510 *event = yaml_event_t{
511 typ: yaml_MAPPING_START_EVENT,
512 start_mark: start_mark,
513 end_mark: end_mark,
514 anchor: anchor,
515 tag: tag,
516 implicit: implicit,
517 style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
518 }
519 return true
520 }
521 if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
522 end_mark = token.end_mark
523 parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
524 *event = yaml_event_t{
525 typ: yaml_SEQUENCE_START_EVENT,
526 start_mark: start_mark,
527 end_mark: end_mark,
528 anchor: anchor,
529 tag: tag,
530 implicit: implicit,
531 style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
532 }
533 return true
534 }
535 if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
536 end_mark = token.end_mark
537 parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
538 *event = yaml_event_t{
539 typ: yaml_MAPPING_START_EVENT,
540 start_mark: start_mark,
541 end_mark: end_mark,
542 anchor: anchor,
543 tag: tag,
544 implicit: implicit,
545 style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
546 }
547 return true
548 }
549 if len(anchor) > 0 || len(tag) > 0 {
550 parser.state = parser.states[len(parser.states)-1]
551 parser.states = parser.states[:len(parser.states)-1]
552
553 *event = yaml_event_t{
554 typ: yaml_SCALAR_EVENT,
555 start_mark: start_mark,
556 end_mark: end_mark,
557 anchor: anchor,
558 tag: tag,
559 implicit: implicit,
560 quoted_implicit: false,
561 style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
562 }
563 return true
564 }
565
566 context := "while parsing a flow node"
567 if block {
568 context = "while parsing a block node"
569 }
570 yaml_parser_set_parser_error_context(parser, context, start_mark,
571 "did not find expected node content", token.start_mark)
572 return false
573}
574
575// Parse the productions:
576// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
577// ******************** *********** * *********
578//
579func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
580 if first {
581 token := peek_token(parser)
582 parser.marks = append(parser.marks, token.start_mark)
583 skip_token(parser)
584 }
585
586 token := peek_token(parser)
587 if token == nil {
588 return false
589 }
590
591 if token.typ == yaml_BLOCK_ENTRY_TOKEN {
592 mark := token.end_mark
593 skip_token(parser)
594 token = peek_token(parser)
595 if token == nil {
596 return false
597 }
598 if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
599 parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
600 return yaml_parser_parse_node(parser, event, true, false)
601 } else {
602 parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
603 return yaml_parser_process_empty_scalar(parser, event, mark)
604 }
605 }
606 if token.typ == yaml_BLOCK_END_TOKEN {
607 parser.state = parser.states[len(parser.states)-1]
608 parser.states = parser.states[:len(parser.states)-1]
609 parser.marks = parser.marks[:len(parser.marks)-1]
610
611 *event = yaml_event_t{
612 typ: yaml_SEQUENCE_END_EVENT,
613 start_mark: token.start_mark,
614 end_mark: token.end_mark,
615 }
616
617 skip_token(parser)
618 return true
619 }
620
621 context_mark := parser.marks[len(parser.marks)-1]
622 parser.marks = parser.marks[:len(parser.marks)-1]
623 return yaml_parser_set_parser_error_context(parser,
624 "while parsing a block collection", context_mark,
625 "did not find expected '-' indicator", token.start_mark)
626}
627
628// Parse the productions:
629// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
630// *********** *
631func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
632 token := peek_token(parser)
633 if token == nil {
634 return false
635 }
636
637 if token.typ == yaml_BLOCK_ENTRY_TOKEN {
638 mark := token.end_mark
639 skip_token(parser)
640 token = peek_token(parser)
641 if token == nil {
642 return false
643 }
644 if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
645 token.typ != yaml_KEY_TOKEN &&
646 token.typ != yaml_VALUE_TOKEN &&
647 token.typ != yaml_BLOCK_END_TOKEN {
648 parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
649 return yaml_parser_parse_node(parser, event, true, false)
650 }
651 parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
652 return yaml_parser_process_empty_scalar(parser, event, mark)
653 }
654 parser.state = parser.states[len(parser.states)-1]
655 parser.states = parser.states[:len(parser.states)-1]
656
657 *event = yaml_event_t{
658 typ: yaml_SEQUENCE_END_EVENT,
659 start_mark: token.start_mark,
660 end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
661 }
662 return true
663}
664
665// Parse the productions:
666// block_mapping ::= BLOCK-MAPPING_START
667// *******************
668// ((KEY block_node_or_indentless_sequence?)?
669// *** *
670// (VALUE block_node_or_indentless_sequence?)?)*
671//
672// BLOCK-END
673// *********
674//
675func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
676 if first {
677 token := peek_token(parser)
678 parser.marks = append(parser.marks, token.start_mark)
679 skip_token(parser)
680 }
681
682 token := peek_token(parser)
683 if token == nil {
684 return false
685 }
686
687 if token.typ == yaml_KEY_TOKEN {
688 mark := token.end_mark
689 skip_token(parser)
690 token = peek_token(parser)
691 if token == nil {
692 return false
693 }
694 if token.typ != yaml_KEY_TOKEN &&
695 token.typ != yaml_VALUE_TOKEN &&
696 token.typ != yaml_BLOCK_END_TOKEN {
697 parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
698 return yaml_parser_parse_node(parser, event, true, true)
699 } else {
700 parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
701 return yaml_parser_process_empty_scalar(parser, event, mark)
702 }
703 } else if token.typ == yaml_BLOCK_END_TOKEN {
704 parser.state = parser.states[len(parser.states)-1]
705 parser.states = parser.states[:len(parser.states)-1]
706 parser.marks = parser.marks[:len(parser.marks)-1]
707 *event = yaml_event_t{
708 typ: yaml_MAPPING_END_EVENT,
709 start_mark: token.start_mark,
710 end_mark: token.end_mark,
711 }
712 skip_token(parser)
713 return true
714 }
715
716 context_mark := parser.marks[len(parser.marks)-1]
717 parser.marks = parser.marks[:len(parser.marks)-1]
718 return yaml_parser_set_parser_error_context(parser,
719 "while parsing a block mapping", context_mark,
720 "did not find expected key", token.start_mark)
721}
722
723// Parse the productions:
724// block_mapping ::= BLOCK-MAPPING_START
725//
726// ((KEY block_node_or_indentless_sequence?)?
727//
728// (VALUE block_node_or_indentless_sequence?)?)*
729// ***** *
730// BLOCK-END
731//
732//
733func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
734 token := peek_token(parser)
735 if token == nil {
736 return false
737 }
738 if token.typ == yaml_VALUE_TOKEN {
739 mark := token.end_mark
740 skip_token(parser)
741 token = peek_token(parser)
742 if token == nil {
743 return false
744 }
745 if token.typ != yaml_KEY_TOKEN &&
746 token.typ != yaml_VALUE_TOKEN &&
747 token.typ != yaml_BLOCK_END_TOKEN {
748 parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
749 return yaml_parser_parse_node(parser, event, true, true)
750 }
751 parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
752 return yaml_parser_process_empty_scalar(parser, event, mark)
753 }
754 parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
755 return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
756}
757
758// Parse the productions:
759// flow_sequence ::= FLOW-SEQUENCE-START
760// *******************
761// (flow_sequence_entry FLOW-ENTRY)*
762// * **********
763// flow_sequence_entry?
764// *
765// FLOW-SEQUENCE-END
766// *****************
767// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
768// *
769//
770func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
771 if first {
772 token := peek_token(parser)
773 parser.marks = append(parser.marks, token.start_mark)
774 skip_token(parser)
775 }
776 token := peek_token(parser)
777 if token == nil {
778 return false
779 }
780 if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
781 if !first {
782 if token.typ == yaml_FLOW_ENTRY_TOKEN {
783 skip_token(parser)
784 token = peek_token(parser)
785 if token == nil {
786 return false
787 }
788 } else {
789 context_mark := parser.marks[len(parser.marks)-1]
790 parser.marks = parser.marks[:len(parser.marks)-1]
791 return yaml_parser_set_parser_error_context(parser,
792 "while parsing a flow sequence", context_mark,
793 "did not find expected ',' or ']'", token.start_mark)
794 }
795 }
796
797 if token.typ == yaml_KEY_TOKEN {
798 parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
799 *event = yaml_event_t{
800 typ: yaml_MAPPING_START_EVENT,
801 start_mark: token.start_mark,
802 end_mark: token.end_mark,
803 implicit: true,
804 style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
805 }
806 skip_token(parser)
807 return true
808 } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
809 parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
810 return yaml_parser_parse_node(parser, event, false, false)
811 }
812 }
813
814 parser.state = parser.states[len(parser.states)-1]
815 parser.states = parser.states[:len(parser.states)-1]
816 parser.marks = parser.marks[:len(parser.marks)-1]
817
818 *event = yaml_event_t{
819 typ: yaml_SEQUENCE_END_EVENT,
820 start_mark: token.start_mark,
821 end_mark: token.end_mark,
822 }
823
824 skip_token(parser)
825 return true
826}
827
828//
829// Parse the productions:
830// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
831// *** *
832//
833func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
834 token := peek_token(parser)
835 if token == nil {
836 return false
837 }
838 if token.typ != yaml_VALUE_TOKEN &&
839 token.typ != yaml_FLOW_ENTRY_TOKEN &&
840 token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
841 parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
842 return yaml_parser_parse_node(parser, event, false, false)
843 }
844 mark := token.end_mark
845 skip_token(parser)
846 parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
847 return yaml_parser_process_empty_scalar(parser, event, mark)
848}
849
850// Parse the productions:
851// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
852// ***** *
853//
854func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
855 token := peek_token(parser)
856 if token == nil {
857 return false
858 }
859 if token.typ == yaml_VALUE_TOKEN {
860 skip_token(parser)
861 token := peek_token(parser)
862 if token == nil {
863 return false
864 }
865 if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
866 parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
867 return yaml_parser_parse_node(parser, event, false, false)
868 }
869 }
870 parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
871 return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
872}
873
874// Parse the productions:
875// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
876// *
877//
878func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
879 token := peek_token(parser)
880 if token == nil {
881 return false
882 }
883 parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
884 *event = yaml_event_t{
885 typ: yaml_MAPPING_END_EVENT,
886 start_mark: token.start_mark,
887 end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
888 }
889 return true
890}
891
892// Parse the productions:
893// flow_mapping ::= FLOW-MAPPING-START
894// ******************
895// (flow_mapping_entry FLOW-ENTRY)*
896// * **********
897// flow_mapping_entry?
898// ******************
899// FLOW-MAPPING-END
900// ****************
901// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
902// * *** *
903//
904func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
905 if first {
906 token := peek_token(parser)
907 parser.marks = append(parser.marks, token.start_mark)
908 skip_token(parser)
909 }
910
911 token := peek_token(parser)
912 if token == nil {
913 return false
914 }
915
916 if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
917 if !first {
918 if token.typ == yaml_FLOW_ENTRY_TOKEN {
919 skip_token(parser)
920 token = peek_token(parser)
921 if token == nil {
922 return false
923 }
924 } else {
925 context_mark := parser.marks[len(parser.marks)-1]
926 parser.marks = parser.marks[:len(parser.marks)-1]
927 return yaml_parser_set_parser_error_context(parser,
928 "while parsing a flow mapping", context_mark,
929 "did not find expected ',' or '}'", token.start_mark)
930 }
931 }
932
933 if token.typ == yaml_KEY_TOKEN {
934 skip_token(parser)
935 token = peek_token(parser)
936 if token == nil {
937 return false
938 }
939 if token.typ != yaml_VALUE_TOKEN &&
940 token.typ != yaml_FLOW_ENTRY_TOKEN &&
941 token.typ != yaml_FLOW_MAPPING_END_TOKEN {
942 parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
943 return yaml_parser_parse_node(parser, event, false, false)
944 } else {
945 parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
946 return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
947 }
948 } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
949 parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
950 return yaml_parser_parse_node(parser, event, false, false)
951 }
952 }
953
954 parser.state = parser.states[len(parser.states)-1]
955 parser.states = parser.states[:len(parser.states)-1]
956 parser.marks = parser.marks[:len(parser.marks)-1]
957 *event = yaml_event_t{
958 typ: yaml_MAPPING_END_EVENT,
959 start_mark: token.start_mark,
960 end_mark: token.end_mark,
961 }
962 skip_token(parser)
963 return true
964}
965
966// Parse the productions:
967// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
968// * ***** *
969//
970func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
971 token := peek_token(parser)
972 if token == nil {
973 return false
974 }
975 if empty {
976 parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
977 return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
978 }
979 if token.typ == yaml_VALUE_TOKEN {
980 skip_token(parser)
981 token = peek_token(parser)
982 if token == nil {
983 return false
984 }
985 if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
986 parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
987 return yaml_parser_parse_node(parser, event, false, false)
988 }
989 }
990 parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
991 return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
992}
993
994// Generate an empty scalar event.
995func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
996 *event = yaml_event_t{
997 typ: yaml_SCALAR_EVENT,
998 start_mark: mark,
999 end_mark: mark,
1000 value: nil, // Empty
1001 implicit: true,
1002 style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
1003 }
1004 return true
1005}
1006
1007var default_tag_directives = []yaml_tag_directive_t{
1008 {[]byte("!"), []byte("!")},
1009 {[]byte("!!"), []byte("tag:yaml.org,2002:")},
1010}
1011
1012// Parse directives.
1013func yaml_parser_process_directives(parser *yaml_parser_t,
1014 version_directive_ref **yaml_version_directive_t,
1015 tag_directives_ref *[]yaml_tag_directive_t) bool {
1016
1017 var version_directive *yaml_version_directive_t
1018 var tag_directives []yaml_tag_directive_t
1019
1020 token := peek_token(parser)
1021 if token == nil {
1022 return false
1023 }
1024
1025 for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
1026 if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
1027 if version_directive != nil {
1028 yaml_parser_set_parser_error(parser,
1029 "found duplicate %YAML directive", token.start_mark)
1030 return false
1031 }
1032 if token.major != 1 || token.minor != 1 {
1033 yaml_parser_set_parser_error(parser,
1034 "found incompatible YAML document", token.start_mark)
1035 return false
1036 }
1037 version_directive = &yaml_version_directive_t{
1038 major: token.major,
1039 minor: token.minor,
1040 }
1041 } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
1042 value := yaml_tag_directive_t{
1043 handle: token.value,
1044 prefix: token.prefix,
1045 }
1046 if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
1047 return false
1048 }
1049 tag_directives = append(tag_directives, value)
1050 }
1051
1052 skip_token(parser)
1053 token = peek_token(parser)
1054 if token == nil {
1055 return false
1056 }
1057 }
1058
1059 for i := range default_tag_directives {
1060 if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
1061 return false
1062 }
1063 }
1064
1065 if version_directive_ref != nil {
1066 *version_directive_ref = version_directive
1067 }
1068 if tag_directives_ref != nil {
1069 *tag_directives_ref = tag_directives
1070 }
1071 return true
1072}
1073
1074// Append a tag directive to the directives stack.
1075func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
1076 for i := range parser.tag_directives {
1077 if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
1078 if allow_duplicates {
1079 return true
1080 }
1081 return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
1082 }
1083 }
1084
1085 // [Go] I suspect the copy is unnecessary. This was likely done
1086 // because there was no way to track ownership of the data.
1087 value_copy := yaml_tag_directive_t{
1088 handle: make([]byte, len(value.handle)),
1089 prefix: make([]byte, len(value.prefix)),
1090 }
1091 copy(value_copy.handle, value.handle)
1092 copy(value_copy.prefix, value.prefix)
1093 parser.tag_directives = append(parser.tag_directives, value_copy)
1094 return true
1095}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/readerc.go b/vendor/github.com/zclconf/go-cty-yaml/readerc.go
new file mode 100644
index 0000000..7c1f5fa
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/readerc.go
@@ -0,0 +1,412 @@
1package yaml
2
3import (
4 "io"
5)
6
7// Set the reader error and return 0.
8func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
9 parser.error = yaml_READER_ERROR
10 parser.problem = problem
11 parser.problem_offset = offset
12 parser.problem_value = value
13 return false
14}
15
16// Byte order marks.
17const (
18 bom_UTF8 = "\xef\xbb\xbf"
19 bom_UTF16LE = "\xff\xfe"
20 bom_UTF16BE = "\xfe\xff"
21)
22
23// Determine the input stream encoding by checking the BOM symbol. If no BOM is
24// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
25func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
26 // Ensure that we had enough bytes in the raw buffer.
27 for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
28 if !yaml_parser_update_raw_buffer(parser) {
29 return false
30 }
31 }
32
33 // Determine the encoding.
34 buf := parser.raw_buffer
35 pos := parser.raw_buffer_pos
36 avail := len(buf) - pos
37 if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
38 parser.encoding = yaml_UTF16LE_ENCODING
39 parser.raw_buffer_pos += 2
40 parser.offset += 2
41 } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
42 parser.encoding = yaml_UTF16BE_ENCODING
43 parser.raw_buffer_pos += 2
44 parser.offset += 2
45 } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
46 parser.encoding = yaml_UTF8_ENCODING
47 parser.raw_buffer_pos += 3
48 parser.offset += 3
49 } else {
50 parser.encoding = yaml_UTF8_ENCODING
51 }
52 return true
53}
54
55// Update the raw buffer.
56func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
57 size_read := 0
58
59 // Return if the raw buffer is full.
60 if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
61 return true
62 }
63
64 // Return on EOF.
65 if parser.eof {
66 return true
67 }
68
69 // Move the remaining bytes in the raw buffer to the beginning.
70 if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
71 copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
72 }
73 parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
74 parser.raw_buffer_pos = 0
75
76 // Call the read handler to fill the buffer.
77 size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
78 parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
79 if err == io.EOF {
80 parser.eof = true
81 } else if err != nil {
82 return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
83 }
84 return true
85}
86
87// Ensure that the buffer contains at least `length` characters.
88// Return true on success, false on failure.
89//
90// The length is supposed to be significantly less that the buffer size.
91func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
92 if parser.read_handler == nil {
93 panic("read handler must be set")
94 }
95
96 // [Go] This function was changed to guarantee the requested length size at EOF.
97 // The fact we need to do this is pretty awful, but the description above implies
98 // for that to be the case, and there are tests
99
100 // If the EOF flag is set and the raw buffer is empty, do nothing.
101 if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
102 // [Go] ACTUALLY! Read the documentation of this function above.
103 // This is just broken. To return true, we need to have the
104 // given length in the buffer. Not doing that means every single
105 // check that calls this function to make sure the buffer has a
106 // given length is Go) panicking; or C) accessing invalid memory.
107 //return true
108 }
109
110 // Return if the buffer contains enough characters.
111 if parser.unread >= length {
112 return true
113 }
114
115 // Determine the input encoding if it is not known yet.
116 if parser.encoding == yaml_ANY_ENCODING {
117 if !yaml_parser_determine_encoding(parser) {
118 return false
119 }
120 }
121
122 // Move the unread characters to the beginning of the buffer.
123 buffer_len := len(parser.buffer)
124 if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
125 copy(parser.buffer, parser.buffer[parser.buffer_pos:])
126 buffer_len -= parser.buffer_pos
127 parser.buffer_pos = 0
128 } else if parser.buffer_pos == buffer_len {
129 buffer_len = 0
130 parser.buffer_pos = 0
131 }
132
133 // Open the whole buffer for writing, and cut it before returning.
134 parser.buffer = parser.buffer[:cap(parser.buffer)]
135
136 // Fill the buffer until it has enough characters.
137 first := true
138 for parser.unread < length {
139
140 // Fill the raw buffer if necessary.
141 if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
142 if !yaml_parser_update_raw_buffer(parser) {
143 parser.buffer = parser.buffer[:buffer_len]
144 return false
145 }
146 }
147 first = false
148
149 // Decode the raw buffer.
150 inner:
151 for parser.raw_buffer_pos != len(parser.raw_buffer) {
152 var value rune
153 var width int
154
155 raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
156
157 // Decode the next character.
158 switch parser.encoding {
159 case yaml_UTF8_ENCODING:
160 // Decode a UTF-8 character. Check RFC 3629
161 // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
162 //
163 // The following table (taken from the RFC) is used for
164 // decoding.
165 //
166 // Char. number range | UTF-8 octet sequence
167 // (hexadecimal) | (binary)
168 // --------------------+------------------------------------
169 // 0000 0000-0000 007F | 0xxxxxxx
170 // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
171 // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
172 // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
173 //
174 // Additionally, the characters in the range 0xD800-0xDFFF
175 // are prohibited as they are reserved for use with UTF-16
176 // surrogate pairs.
177
178 // Determine the length of the UTF-8 sequence.
179 octet := parser.raw_buffer[parser.raw_buffer_pos]
180 switch {
181 case octet&0x80 == 0x00:
182 width = 1
183 case octet&0xE0 == 0xC0:
184 width = 2
185 case octet&0xF0 == 0xE0:
186 width = 3
187 case octet&0xF8 == 0xF0:
188 width = 4
189 default:
190 // The leading octet is invalid.
191 return yaml_parser_set_reader_error(parser,
192 "invalid leading UTF-8 octet",
193 parser.offset, int(octet))
194 }
195
196 // Check if the raw buffer contains an incomplete character.
197 if width > raw_unread {
198 if parser.eof {
199 return yaml_parser_set_reader_error(parser,
200 "incomplete UTF-8 octet sequence",
201 parser.offset, -1)
202 }
203 break inner
204 }
205
206 // Decode the leading octet.
207 switch {
208 case octet&0x80 == 0x00:
209 value = rune(octet & 0x7F)
210 case octet&0xE0 == 0xC0:
211 value = rune(octet & 0x1F)
212 case octet&0xF0 == 0xE0:
213 value = rune(octet & 0x0F)
214 case octet&0xF8 == 0xF0:
215 value = rune(octet & 0x07)
216 default:
217 value = 0
218 }
219
220 // Check and decode the trailing octets.
221 for k := 1; k < width; k++ {
222 octet = parser.raw_buffer[parser.raw_buffer_pos+k]
223
224 // Check if the octet is valid.
225 if (octet & 0xC0) != 0x80 {
226 return yaml_parser_set_reader_error(parser,
227 "invalid trailing UTF-8 octet",
228 parser.offset+k, int(octet))
229 }
230
231 // Decode the octet.
232 value = (value << 6) + rune(octet&0x3F)
233 }
234
235 // Check the length of the sequence against the value.
236 switch {
237 case width == 1:
238 case width == 2 && value >= 0x80:
239 case width == 3 && value >= 0x800:
240 case width == 4 && value >= 0x10000:
241 default:
242 return yaml_parser_set_reader_error(parser,
243 "invalid length of a UTF-8 sequence",
244 parser.offset, -1)
245 }
246
247 // Check the range of the value.
248 if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
249 return yaml_parser_set_reader_error(parser,
250 "invalid Unicode character",
251 parser.offset, int(value))
252 }
253
254 case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
255 var low, high int
256 if parser.encoding == yaml_UTF16LE_ENCODING {
257 low, high = 0, 1
258 } else {
259 low, high = 1, 0
260 }
261
262 // The UTF-16 encoding is not as simple as one might
263 // naively think. Check RFC 2781
264 // (http://www.ietf.org/rfc/rfc2781.txt).
265 //
266 // Normally, two subsequent bytes describe a Unicode
267 // character. However a special technique (called a
268 // surrogate pair) is used for specifying character
269 // values larger than 0xFFFF.
270 //
271 // A surrogate pair consists of two pseudo-characters:
272 // high surrogate area (0xD800-0xDBFF)
273 // low surrogate area (0xDC00-0xDFFF)
274 //
275 // The following formulas are used for decoding
276 // and encoding characters using surrogate pairs:
277 //
278 // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
279 // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
280 // W1 = 110110yyyyyyyyyy
281 // W2 = 110111xxxxxxxxxx
282 //
283 // where U is the character value, W1 is the high surrogate
284 // area, W2 is the low surrogate area.
285
286 // Check for incomplete UTF-16 character.
287 if raw_unread < 2 {
288 if parser.eof {
289 return yaml_parser_set_reader_error(parser,
290 "incomplete UTF-16 character",
291 parser.offset, -1)
292 }
293 break inner
294 }
295
296 // Get the character.
297 value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
298 (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
299
300 // Check for unexpected low surrogate area.
301 if value&0xFC00 == 0xDC00 {
302 return yaml_parser_set_reader_error(parser,
303 "unexpected low surrogate area",
304 parser.offset, int(value))
305 }
306
307 // Check for a high surrogate area.
308 if value&0xFC00 == 0xD800 {
309 width = 4
310
311 // Check for incomplete surrogate pair.
312 if raw_unread < 4 {
313 if parser.eof {
314 return yaml_parser_set_reader_error(parser,
315 "incomplete UTF-16 surrogate pair",
316 parser.offset, -1)
317 }
318 break inner
319 }
320
321 // Get the next character.
322 value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
323 (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
324
325 // Check for a low surrogate area.
326 if value2&0xFC00 != 0xDC00 {
327 return yaml_parser_set_reader_error(parser,
328 "expected low surrogate area",
329 parser.offset+2, int(value2))
330 }
331
332 // Generate the value of the surrogate pair.
333 value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
334 } else {
335 width = 2
336 }
337
338 default:
339 panic("impossible")
340 }
341
342 // Check if the character is in the allowed range:
343 // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
344 // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
345 // | [#x10000-#x10FFFF] (32 bit)
346 switch {
347 case value == 0x09:
348 case value == 0x0A:
349 case value == 0x0D:
350 case value >= 0x20 && value <= 0x7E:
351 case value == 0x85:
352 case value >= 0xA0 && value <= 0xD7FF:
353 case value >= 0xE000 && value <= 0xFFFD:
354 case value >= 0x10000 && value <= 0x10FFFF:
355 default:
356 return yaml_parser_set_reader_error(parser,
357 "control characters are not allowed",
358 parser.offset, int(value))
359 }
360
361 // Move the raw pointers.
362 parser.raw_buffer_pos += width
363 parser.offset += width
364
365 // Finally put the character into the buffer.
366 if value <= 0x7F {
367 // 0000 0000-0000 007F . 0xxxxxxx
368 parser.buffer[buffer_len+0] = byte(value)
369 buffer_len += 1
370 } else if value <= 0x7FF {
371 // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
372 parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
373 parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
374 buffer_len += 2
375 } else if value <= 0xFFFF {
376 // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
377 parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
378 parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
379 parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
380 buffer_len += 3
381 } else {
382 // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
383 parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
384 parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
385 parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
386 parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
387 buffer_len += 4
388 }
389
390 parser.unread++
391 }
392
393 // On EOF, put NUL into the buffer and return.
394 if parser.eof {
395 parser.buffer[buffer_len] = 0
396 buffer_len++
397 parser.unread++
398 break
399 }
400 }
401 // [Go] Read the documentation of this function above. To return true,
402 // we need to have the given length in the buffer. Not doing that means
403 // every single check that calls this function to make sure the buffer
404 // has a given length is Go) panicking; or C) accessing invalid memory.
405 // This happens here due to the EOF above breaking early.
406 for buffer_len < length {
407 parser.buffer[buffer_len] = 0
408 buffer_len++
409 }
410 parser.buffer = parser.buffer[:buffer_len]
411 return true
412}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/resolve.go b/vendor/github.com/zclconf/go-cty-yaml/resolve.go
new file mode 100644
index 0000000..0f64383
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/resolve.go
@@ -0,0 +1,288 @@
1package yaml
2
3import (
4 "encoding/base64"
5 "fmt"
6 "reflect"
7 "regexp"
8 "strconv"
9 "strings"
10 "time"
11
12 "github.com/zclconf/go-cty/cty"
13)
14
15type resolveMapItem struct {
16 value cty.Value
17 tag string
18}
19
20var resolveTable = make([]byte, 256)
21var resolveMap = make(map[string]resolveMapItem)
22
23func init() {
24 t := resolveTable
25 t[int('+')] = 'S' // Sign
26 t[int('-')] = 'S'
27 for _, c := range "0123456789" {
28 t[int(c)] = 'D' // Digit
29 }
30 for _, c := range "yYnNtTfFoO~" {
31 t[int(c)] = 'M' // In map
32 }
33 t[int('.')] = '.' // Float (potentially in map)
34
35 var resolveMapList = []struct {
36 v cty.Value
37 tag string
38 l []string
39 }{
40 {cty.True, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
41 {cty.True, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
42 {cty.True, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
43 {cty.False, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
44 {cty.False, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
45 {cty.False, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
46 {cty.NullVal(cty.DynamicPseudoType), yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
47 {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
48 {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
49 {cty.NegativeInfinity, yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
50 }
51
52 m := resolveMap
53 for _, item := range resolveMapList {
54 for _, s := range item.l {
55 m[s] = resolveMapItem{item.v, item.tag}
56 }
57 }
58}
59
60const longTagPrefix = "tag:yaml.org,2002:"
61
62func shortTag(tag string) string {
63 // TODO This can easily be made faster and produce less garbage.
64 if strings.HasPrefix(tag, longTagPrefix) {
65 return "!!" + tag[len(longTagPrefix):]
66 }
67 return tag
68}
69
70func longTag(tag string) string {
71 if strings.HasPrefix(tag, "!!") {
72 return longTagPrefix + tag[2:]
73 }
74 return tag
75}
76
77func resolvableTag(tag string) bool {
78 switch tag {
79 case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG, yaml_BINARY_TAG:
80 return true
81 }
82 return false
83}
84
85var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
86
87func (c *Converter) resolveScalar(tag string, src string, style yaml_scalar_style_t) (cty.Value, error) {
88 if !resolvableTag(tag) {
89 return cty.NilVal, fmt.Errorf("unsupported tag %q", tag)
90 }
91
92 // Any data is accepted as a !!str or !!binary.
93 // Otherwise, the prefix is enough of a hint about what it might be.
94 hint := byte('N')
95 if src != "" {
96 hint = resolveTable[src[0]]
97 }
98 if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
99 if style == yaml_SINGLE_QUOTED_SCALAR_STYLE || style == yaml_DOUBLE_QUOTED_SCALAR_STYLE {
100 return cty.StringVal(src), nil
101 }
102
103 // Handle things we can lookup in a map.
104 if item, ok := resolveMap[src]; ok {
105 return item.value, nil
106 }
107
108 if tag == "" {
109 for _, nan := range []string{".nan", ".NaN", ".NAN"} {
110 if src == nan {
111 // cty cannot represent NaN, so this is an error
112 return cty.NilVal, fmt.Errorf("floating point NaN is not supported")
113 }
114 }
115 }
116
117 // Base 60 floats are intentionally not supported.
118
119 switch hint {
120 case 'M':
121 // We've already checked the map above.
122
123 case '.':
124 // Not in the map, so maybe a normal float.
125 if numberVal, err := cty.ParseNumberVal(src); err == nil {
126 return numberVal, nil
127 }
128
129 case 'D', 'S':
130 // Int, float, or timestamp.
131 // Only try values as a timestamp if the value is unquoted or there's an explicit
132 // !!timestamp tag.
133 if tag == "" || tag == yaml_TIMESTAMP_TAG {
134 t, ok := parseTimestamp(src)
135 if ok {
136 // cty has no timestamp type, but its functions stdlib
137 // conventionally uses strings in an RFC3339 encoding
138 // to represent time, so we'll follow that convention here.
139 return cty.StringVal(t.Format(time.RFC3339)), nil
140 }
141 }
142
143 plain := strings.Replace(src, "_", "", -1)
144 if numberVal, err := cty.ParseNumberVal(plain); err == nil {
145 return numberVal, nil
146 }
147 if strings.HasPrefix(plain, "0b") || strings.HasPrefix(plain, "-0b") {
148 tag = yaml_INT_TAG // will handle parsing below in our tag switch
149 }
150 default:
151 panic(fmt.Sprintf("cannot resolve tag %q with source %q", tag, src))
152 }
153 }
154
155 if tag == "" && src == "<<" {
156 return mergeMappingVal, nil
157 }
158
159 switch tag {
160 case yaml_STR_TAG, yaml_BINARY_TAG:
161 // If it's binary then we want to keep the base64 representation, because
162 // cty has no binary type, but we will check that it's actually base64.
163 if tag == yaml_BINARY_TAG {
164 _, err := base64.StdEncoding.DecodeString(src)
165 if err != nil {
166 return cty.NilVal, fmt.Errorf("cannot parse %q as %s: not valid base64", src, tag)
167 }
168 }
169 return cty.StringVal(src), nil
170 case yaml_BOOL_TAG:
171 item, ok := resolveMap[src]
172 if !ok || item.tag != yaml_BOOL_TAG {
173 return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag)
174 }
175 return item.value, nil
176 case yaml_FLOAT_TAG, yaml_INT_TAG:
177 // Note: We don't actually check that a value tagged INT is a whole
178 // number here. We could, but cty generally doesn't care about the
179 // int/float distinction, so we'll just be generous and accept it.
180 plain := strings.Replace(src, "_", "", -1)
181 if numberVal, err := cty.ParseNumberVal(plain); err == nil { // handles decimal integers and floats
182 return numberVal, nil
183 }
184 if intv, err := strconv.ParseInt(plain, 0, 64); err == nil { // handles 0x and 00 prefixes
185 return cty.NumberIntVal(intv), nil
186 }
187 if uintv, err := strconv.ParseUint(plain, 0, 64); err == nil { // handles 0x and 00 prefixes
188 return cty.NumberUIntVal(uintv), nil
189 }
190 if strings.HasPrefix(plain, "0b") {
191 intv, err := strconv.ParseInt(plain[2:], 2, 64)
192 if err == nil {
193 return cty.NumberIntVal(intv), nil
194 }
195 uintv, err := strconv.ParseUint(plain[2:], 2, 64)
196 if err == nil {
197 return cty.NumberUIntVal(uintv), nil
198 }
199 } else if strings.HasPrefix(plain, "-0b") {
200 intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
201 if err == nil {
202 return cty.NumberIntVal(intv), nil
203 }
204 }
205 return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag)
206 case yaml_TIMESTAMP_TAG:
207 t, ok := parseTimestamp(src)
208 if ok {
209 // cty has no timestamp type, but its functions stdlib
210 // conventionally uses strings in an RFC3339 encoding
211 // to represent time, so we'll follow that convention here.
212 return cty.StringVal(t.Format(time.RFC3339)), nil
213 }
214 return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag)
215 case yaml_NULL_TAG:
216 return cty.NullVal(cty.DynamicPseudoType), nil
217 case "":
218 return cty.StringVal(src), nil
219 default:
220 return cty.NilVal, fmt.Errorf("unsupported tag %q", tag)
221 }
222}
223
224// encodeBase64 encodes s as base64 that is broken up into multiple lines
225// as appropriate for the resulting length.
226func encodeBase64(s string) string {
227 const lineLen = 70
228 encLen := base64.StdEncoding.EncodedLen(len(s))
229 lines := encLen/lineLen + 1
230 buf := make([]byte, encLen*2+lines)
231 in := buf[0:encLen]
232 out := buf[encLen:]
233 base64.StdEncoding.Encode(in, []byte(s))
234 k := 0
235 for i := 0; i < len(in); i += lineLen {
236 j := i + lineLen
237 if j > len(in) {
238 j = len(in)
239 }
240 k += copy(out[k:], in[i:j])
241 if lines > 1 {
242 out[k] = '\n'
243 k++
244 }
245 }
246 return string(out[:k])
247}
248
249// This is a subset of the formats allowed by the regular expression
250// defined at http://yaml.org/type/timestamp.html.
251var allowedTimestampFormats = []string{
252 "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
253 "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
254 "2006-1-2 15:4:5.999999999", // space separated with no time zone
255 "2006-1-2", // date only
256 // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
257 // from the set of examples.
258}
259
260// parseTimestamp parses s as a timestamp string and
261// returns the timestamp and reports whether it succeeded.
262// Timestamp formats are defined at http://yaml.org/type/timestamp.html
263func parseTimestamp(s string) (time.Time, bool) {
264 // TODO write code to check all the formats supported by
265 // http://yaml.org/type/timestamp.html instead of using time.Parse.
266
267 // Quick check: all date formats start with YYYY-.
268 i := 0
269 for ; i < len(s); i++ {
270 if c := s[i]; c < '0' || c > '9' {
271 break
272 }
273 }
274 if i != 4 || i == len(s) || s[i] != '-' {
275 return time.Time{}, false
276 }
277 for _, format := range allowedTimestampFormats {
278 if t, err := time.Parse(format, s); err == nil {
279 return t, true
280 }
281 }
282 return time.Time{}, false
283}
284
285type mergeMapping struct{}
286
287var mergeMappingTy = cty.Capsule("merge mapping", reflect.TypeOf(mergeMapping{}))
288var mergeMappingVal = cty.CapsuleVal(mergeMappingTy, &mergeMapping{})
diff --git a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go b/vendor/github.com/zclconf/go-cty-yaml/scannerc.go
new file mode 100644
index 0000000..077fd1d
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/scannerc.go
@@ -0,0 +1,2696 @@
1package yaml
2
3import (
4 "bytes"
5 "fmt"
6)
7
8// Introduction
9// ************
10//
11// The following notes assume that you are familiar with the YAML specification
12// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
13// some cases we are less restrictive that it requires.
14//
15// The process of transforming a YAML stream into a sequence of events is
16// divided on two steps: Scanning and Parsing.
17//
18// The Scanner transforms the input stream into a sequence of tokens, while the
19// parser transform the sequence of tokens produced by the Scanner into a
20// sequence of parsing events.
21//
22// The Scanner is rather clever and complicated. The Parser, on the contrary,
23// is a straightforward implementation of a recursive-descendant parser (or,
24// LL(1) parser, as it is usually called).
25//
26// Actually there are two issues of Scanning that might be called "clever", the
27// rest is quite straightforward. The issues are "block collection start" and
28// "simple keys". Both issues are explained below in details.
29//
30// Here the Scanning step is explained and implemented. We start with the list
31// of all the tokens produced by the Scanner together with short descriptions.
32//
33// Now, tokens:
34//
35// STREAM-START(encoding) # The stream start.
36// STREAM-END # The stream end.
37// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
38// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
39// DOCUMENT-START # '---'
40// DOCUMENT-END # '...'
41// BLOCK-SEQUENCE-START # Indentation increase denoting a block
42// BLOCK-MAPPING-START # sequence or a block mapping.
43// BLOCK-END # Indentation decrease.
44// FLOW-SEQUENCE-START # '['
45// FLOW-SEQUENCE-END # ']'
46// BLOCK-SEQUENCE-START # '{'
47// BLOCK-SEQUENCE-END # '}'
48// BLOCK-ENTRY # '-'
49// FLOW-ENTRY # ','
50// KEY # '?' or nothing (simple keys).
51// VALUE # ':'
52// ALIAS(anchor) # '*anchor'
53// ANCHOR(anchor) # '&anchor'
54// TAG(handle,suffix) # '!handle!suffix'
55// SCALAR(value,style) # A scalar.
56//
57// The following two tokens are "virtual" tokens denoting the beginning and the
58// end of the stream:
59//
60// STREAM-START(encoding)
61// STREAM-END
62//
63// We pass the information about the input stream encoding with the
64// STREAM-START token.
65//
66// The next two tokens are responsible for tags:
67//
68// VERSION-DIRECTIVE(major,minor)
69// TAG-DIRECTIVE(handle,prefix)
70//
71// Example:
72//
73// %YAML 1.1
74// %TAG ! !foo
75// %TAG !yaml! tag:yaml.org,2002:
76// ---
77//
78// The correspoding sequence of tokens:
79//
80// STREAM-START(utf-8)
81// VERSION-DIRECTIVE(1,1)
82// TAG-DIRECTIVE("!","!foo")
83// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
84// DOCUMENT-START
85// STREAM-END
86//
87// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
88// line.
89//
90// The document start and end indicators are represented by:
91//
92// DOCUMENT-START
93// DOCUMENT-END
94//
95// Note that if a YAML stream contains an implicit document (without '---'
96// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
97// produced.
98//
99// In the following examples, we present whole documents together with the
100// produced tokens.
101//
102// 1. An implicit document:
103//
104// 'a scalar'
105//
106// Tokens:
107//
108// STREAM-START(utf-8)
109// SCALAR("a scalar",single-quoted)
110// STREAM-END
111//
112// 2. An explicit document:
113//
114// ---
115// 'a scalar'
116// ...
117//
118// Tokens:
119//
120// STREAM-START(utf-8)
121// DOCUMENT-START
122// SCALAR("a scalar",single-quoted)
123// DOCUMENT-END
124// STREAM-END
125//
126// 3. Several documents in a stream:
127//
128// 'a scalar'
129// ---
130// 'another scalar'
131// ---
132// 'yet another scalar'
133//
134// Tokens:
135//
136// STREAM-START(utf-8)
137// SCALAR("a scalar",single-quoted)
138// DOCUMENT-START
139// SCALAR("another scalar",single-quoted)
140// DOCUMENT-START
141// SCALAR("yet another scalar",single-quoted)
142// STREAM-END
143//
144// We have already introduced the SCALAR token above. The following tokens are
145// used to describe aliases, anchors, tag, and scalars:
146//
147// ALIAS(anchor)
148// ANCHOR(anchor)
149// TAG(handle,suffix)
150// SCALAR(value,style)
151//
152// The following series of examples illustrate the usage of these tokens:
153//
154// 1. A recursive sequence:
155//
156// &A [ *A ]
157//
158// Tokens:
159//
160// STREAM-START(utf-8)
161// ANCHOR("A")
162// FLOW-SEQUENCE-START
163// ALIAS("A")
164// FLOW-SEQUENCE-END
165// STREAM-END
166//
167// 2. A tagged scalar:
168//
169// !!float "3.14" # A good approximation.
170//
171// Tokens:
172//
173// STREAM-START(utf-8)
174// TAG("!!","float")
175// SCALAR("3.14",double-quoted)
176// STREAM-END
177//
178// 3. Various scalar styles:
179//
180// --- # Implicit empty plain scalars do not produce tokens.
181// --- a plain scalar
182// --- 'a single-quoted scalar'
183// --- "a double-quoted scalar"
184// --- |-
185// a literal scalar
186// --- >-
187// a folded
188// scalar
189//
190// Tokens:
191//
192// STREAM-START(utf-8)
193// DOCUMENT-START
194// DOCUMENT-START
195// SCALAR("a plain scalar",plain)
196// DOCUMENT-START
197// SCALAR("a single-quoted scalar",single-quoted)
198// DOCUMENT-START
199// SCALAR("a double-quoted scalar",double-quoted)
200// DOCUMENT-START
201// SCALAR("a literal scalar",literal)
202// DOCUMENT-START
203// SCALAR("a folded scalar",folded)
204// STREAM-END
205//
206// Now it's time to review collection-related tokens. We will start with
207// flow collections:
208//
209// FLOW-SEQUENCE-START
210// FLOW-SEQUENCE-END
211// FLOW-MAPPING-START
212// FLOW-MAPPING-END
213// FLOW-ENTRY
214// KEY
215// VALUE
216//
217// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
218// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
219// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
220// indicators '?' and ':', which are used for denoting mapping keys and values,
221// are represented by the KEY and VALUE tokens.
222//
223// The following examples show flow collections:
224//
225// 1. A flow sequence:
226//
227// [item 1, item 2, item 3]
228//
229// Tokens:
230//
231// STREAM-START(utf-8)
232// FLOW-SEQUENCE-START
233// SCALAR("item 1",plain)
234// FLOW-ENTRY
235// SCALAR("item 2",plain)
236// FLOW-ENTRY
237// SCALAR("item 3",plain)
238// FLOW-SEQUENCE-END
239// STREAM-END
240//
241// 2. A flow mapping:
242//
243// {
244// a simple key: a value, # Note that the KEY token is produced.
245// ? a complex key: another value,
246// }
247//
248// Tokens:
249//
250// STREAM-START(utf-8)
251// FLOW-MAPPING-START
252// KEY
253// SCALAR("a simple key",plain)
254// VALUE
255// SCALAR("a value",plain)
256// FLOW-ENTRY
257// KEY
258// SCALAR("a complex key",plain)
259// VALUE
260// SCALAR("another value",plain)
261// FLOW-ENTRY
262// FLOW-MAPPING-END
263// STREAM-END
264//
265// A simple key is a key which is not denoted by the '?' indicator. Note that
266// the Scanner still produce the KEY token whenever it encounters a simple key.
267//
268// For scanning block collections, the following tokens are used (note that we
269// repeat KEY and VALUE here):
270//
271// BLOCK-SEQUENCE-START
272// BLOCK-MAPPING-START
273// BLOCK-END
274// BLOCK-ENTRY
275// KEY
276// VALUE
277//
278// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
279// increase that precedes a block collection (cf. the INDENT token in Python).
280// The token BLOCK-END denote indentation decrease that ends a block collection
281// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
282// that makes detections of these tokens more complex.
283//
284// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
285// '-', '?', and ':' correspondingly.
286//
287// The following examples show how the tokens BLOCK-SEQUENCE-START,
288// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
289//
290// 1. Block sequences:
291//
292// - item 1
293// - item 2
294// -
295// - item 3.1
296// - item 3.2
297// -
298// key 1: value 1
299// key 2: value 2
300//
301// Tokens:
302//
303// STREAM-START(utf-8)
304// BLOCK-SEQUENCE-START
305// BLOCK-ENTRY
306// SCALAR("item 1",plain)
307// BLOCK-ENTRY
308// SCALAR("item 2",plain)
309// BLOCK-ENTRY
310// BLOCK-SEQUENCE-START
311// BLOCK-ENTRY
312// SCALAR("item 3.1",plain)
313// BLOCK-ENTRY
314// SCALAR("item 3.2",plain)
315// BLOCK-END
316// BLOCK-ENTRY
317// BLOCK-MAPPING-START
318// KEY
319// SCALAR("key 1",plain)
320// VALUE
321// SCALAR("value 1",plain)
322// KEY
323// SCALAR("key 2",plain)
324// VALUE
325// SCALAR("value 2",plain)
326// BLOCK-END
327// BLOCK-END
328// STREAM-END
329//
330// 2. Block mappings:
331//
332// a simple key: a value # The KEY token is produced here.
333// ? a complex key
334// : another value
335// a mapping:
336// key 1: value 1
337// key 2: value 2
338// a sequence:
339// - item 1
340// - item 2
341//
342// Tokens:
343//
344// STREAM-START(utf-8)
345// BLOCK-MAPPING-START
346// KEY
347// SCALAR("a simple key",plain)
348// VALUE
349// SCALAR("a value",plain)
350// KEY
351// SCALAR("a complex key",plain)
352// VALUE
353// SCALAR("another value",plain)
354// KEY
355// SCALAR("a mapping",plain)
356// BLOCK-MAPPING-START
357// KEY
358// SCALAR("key 1",plain)
359// VALUE
360// SCALAR("value 1",plain)
361// KEY
362// SCALAR("key 2",plain)
363// VALUE
364// SCALAR("value 2",plain)
365// BLOCK-END
366// KEY
367// SCALAR("a sequence",plain)
368// VALUE
369// BLOCK-SEQUENCE-START
370// BLOCK-ENTRY
371// SCALAR("item 1",plain)
372// BLOCK-ENTRY
373// SCALAR("item 2",plain)
374// BLOCK-END
375// BLOCK-END
376// STREAM-END
377//
378// YAML does not always require to start a new block collection from a new
379// line. If the current line contains only '-', '?', and ':' indicators, a new
380// block collection may start at the current line. The following examples
381// illustrate this case:
382//
383// 1. Collections in a sequence:
384//
385// - - item 1
386// - item 2
387// - key 1: value 1
388// key 2: value 2
389// - ? complex key
390// : complex value
391//
392// Tokens:
393//
394// STREAM-START(utf-8)
395// BLOCK-SEQUENCE-START
396// BLOCK-ENTRY
397// BLOCK-SEQUENCE-START
398// BLOCK-ENTRY
399// SCALAR("item 1",plain)
400// BLOCK-ENTRY
401// SCALAR("item 2",plain)
402// BLOCK-END
403// BLOCK-ENTRY
404// BLOCK-MAPPING-START
405// KEY
406// SCALAR("key 1",plain)
407// VALUE
408// SCALAR("value 1",plain)
409// KEY
410// SCALAR("key 2",plain)
411// VALUE
412// SCALAR("value 2",plain)
413// BLOCK-END
414// BLOCK-ENTRY
415// BLOCK-MAPPING-START
416// KEY
417// SCALAR("complex key")
418// VALUE
419// SCALAR("complex value")
420// BLOCK-END
421// BLOCK-END
422// STREAM-END
423//
424// 2. Collections in a mapping:
425//
426// ? a sequence
427// : - item 1
428// - item 2
429// ? a mapping
430// : key 1: value 1
431// key 2: value 2
432//
433// Tokens:
434//
435// STREAM-START(utf-8)
436// BLOCK-MAPPING-START
437// KEY
438// SCALAR("a sequence",plain)
439// VALUE
440// BLOCK-SEQUENCE-START
441// BLOCK-ENTRY
442// SCALAR("item 1",plain)
443// BLOCK-ENTRY
444// SCALAR("item 2",plain)
445// BLOCK-END
446// KEY
447// SCALAR("a mapping",plain)
448// VALUE
449// BLOCK-MAPPING-START
450// KEY
451// SCALAR("key 1",plain)
452// VALUE
453// SCALAR("value 1",plain)
454// KEY
455// SCALAR("key 2",plain)
456// VALUE
457// SCALAR("value 2",plain)
458// BLOCK-END
459// BLOCK-END
460// STREAM-END
461//
462// YAML also permits non-indented sequences if they are included into a block
463// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
464//
465// key:
466// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
467// - item 2
468//
469// Tokens:
470//
471// STREAM-START(utf-8)
472// BLOCK-MAPPING-START
473// KEY
474// SCALAR("key",plain)
475// VALUE
476// BLOCK-ENTRY
477// SCALAR("item 1",plain)
478// BLOCK-ENTRY
479// SCALAR("item 2",plain)
480// BLOCK-END
481//
482
483// Ensure that the buffer contains the required number of characters.
484// Return true on success, false on failure (reader error or memory error).
485func cache(parser *yaml_parser_t, length int) bool {
486 // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
487 return parser.unread >= length || yaml_parser_update_buffer(parser, length)
488}
489
490// Advance the buffer pointer.
491func skip(parser *yaml_parser_t) {
492 parser.mark.index++
493 parser.mark.column++
494 parser.unread--
495 parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
496}
497
498func skip_line(parser *yaml_parser_t) {
499 if is_crlf(parser.buffer, parser.buffer_pos) {
500 parser.mark.index += 2
501 parser.mark.column = 0
502 parser.mark.line++
503 parser.unread -= 2
504 parser.buffer_pos += 2
505 } else if is_break(parser.buffer, parser.buffer_pos) {
506 parser.mark.index++
507 parser.mark.column = 0
508 parser.mark.line++
509 parser.unread--
510 parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
511 }
512}
513
514// Copy a character to a string buffer and advance pointers.
515func read(parser *yaml_parser_t, s []byte) []byte {
516 w := width(parser.buffer[parser.buffer_pos])
517 if w == 0 {
518 panic("invalid character sequence")
519 }
520 if len(s) == 0 {
521 s = make([]byte, 0, 32)
522 }
523 if w == 1 && len(s)+w <= cap(s) {
524 s = s[:len(s)+1]
525 s[len(s)-1] = parser.buffer[parser.buffer_pos]
526 parser.buffer_pos++
527 } else {
528 s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
529 parser.buffer_pos += w
530 }
531 parser.mark.index++
532 parser.mark.column++
533 parser.unread--
534 return s
535}
536
537// Copy a line break character to a string buffer and advance pointers.
538func read_line(parser *yaml_parser_t, s []byte) []byte {
539 buf := parser.buffer
540 pos := parser.buffer_pos
541 switch {
542 case buf[pos] == '\r' && buf[pos+1] == '\n':
543 // CR LF . LF
544 s = append(s, '\n')
545 parser.buffer_pos += 2
546 parser.mark.index++
547 parser.unread--
548 case buf[pos] == '\r' || buf[pos] == '\n':
549 // CR|LF . LF
550 s = append(s, '\n')
551 parser.buffer_pos += 1
552 case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
553 // NEL . LF
554 s = append(s, '\n')
555 parser.buffer_pos += 2
556 case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
557 // LS|PS . LS|PS
558 s = append(s, buf[parser.buffer_pos:pos+3]...)
559 parser.buffer_pos += 3
560 default:
561 return s
562 }
563 parser.mark.index++
564 parser.mark.column = 0
565 parser.mark.line++
566 parser.unread--
567 return s
568}
569
570// Get the next token.
571func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
572 // Erase the token object.
573 *token = yaml_token_t{} // [Go] Is this necessary?
574
575 // No tokens after STREAM-END or error.
576 if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
577 return true
578 }
579
580 // Ensure that the tokens queue contains enough tokens.
581 if !parser.token_available {
582 if !yaml_parser_fetch_more_tokens(parser) {
583 return false
584 }
585 }
586
587 // Fetch the next token from the queue.
588 *token = parser.tokens[parser.tokens_head]
589 parser.tokens_head++
590 parser.tokens_parsed++
591 parser.token_available = false
592
593 if token.typ == yaml_STREAM_END_TOKEN {
594 parser.stream_end_produced = true
595 }
596 return true
597}
598
599// Set the scanner error and return false.
600func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
601 parser.error = yaml_SCANNER_ERROR
602 parser.context = context
603 parser.context_mark = context_mark
604 parser.problem = problem
605 parser.problem_mark = parser.mark
606 return false
607}
608
609func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
610 context := "while parsing a tag"
611 if directive {
612 context = "while parsing a %TAG directive"
613 }
614 return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
615}
616
617func trace(args ...interface{}) func() {
618 pargs := append([]interface{}{"+++"}, args...)
619 fmt.Println(pargs...)
620 pargs = append([]interface{}{"---"}, args...)
621 return func() { fmt.Println(pargs...) }
622}
623
624// Ensure that the tokens queue contains at least one token which can be
625// returned to the Parser.
626func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
627 // While we need more tokens to fetch, do it.
628 for {
629 // Check if we really need to fetch more tokens.
630 need_more_tokens := false
631
632 if parser.tokens_head == len(parser.tokens) {
633 // Queue is empty.
634 need_more_tokens = true
635 } else {
636 // Check if any potential simple key may occupy the head position.
637 if !yaml_parser_stale_simple_keys(parser) {
638 return false
639 }
640
641 for i := range parser.simple_keys {
642 simple_key := &parser.simple_keys[i]
643 if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
644 need_more_tokens = true
645 break
646 }
647 }
648 }
649
650 // We are finished.
651 if !need_more_tokens {
652 break
653 }
654 // Fetch the next token.
655 if !yaml_parser_fetch_next_token(parser) {
656 return false
657 }
658 }
659
660 parser.token_available = true
661 return true
662}
663
664// The dispatcher for token fetchers.
665func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
666 // Ensure that the buffer is initialized.
667 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
668 return false
669 }
670
671 // Check if we just started scanning. Fetch STREAM-START then.
672 if !parser.stream_start_produced {
673 return yaml_parser_fetch_stream_start(parser)
674 }
675
676 // Eat whitespaces and comments until we reach the next token.
677 if !yaml_parser_scan_to_next_token(parser) {
678 return false
679 }
680
681 // Remove obsolete potential simple keys.
682 if !yaml_parser_stale_simple_keys(parser) {
683 return false
684 }
685
686 // Check the indentation level against the current column.
687 if !yaml_parser_unroll_indent(parser, parser.mark.column) {
688 return false
689 }
690
691 // Ensure that the buffer contains at least 4 characters. 4 is the length
692 // of the longest indicators ('--- ' and '... ').
693 if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
694 return false
695 }
696
697 // Is it the end of the stream?
698 if is_z(parser.buffer, parser.buffer_pos) {
699 return yaml_parser_fetch_stream_end(parser)
700 }
701
702 // Is it a directive?
703 if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
704 return yaml_parser_fetch_directive(parser)
705 }
706
707 buf := parser.buffer
708 pos := parser.buffer_pos
709
710 // Is it the document start indicator?
711 if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
712 return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
713 }
714
715 // Is it the document end indicator?
716 if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
717 return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
718 }
719
720 // Is it the flow sequence start indicator?
721 if buf[pos] == '[' {
722 return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
723 }
724
725 // Is it the flow mapping start indicator?
726 if parser.buffer[parser.buffer_pos] == '{' {
727 return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
728 }
729
730 // Is it the flow sequence end indicator?
731 if parser.buffer[parser.buffer_pos] == ']' {
732 return yaml_parser_fetch_flow_collection_end(parser,
733 yaml_FLOW_SEQUENCE_END_TOKEN)
734 }
735
736 // Is it the flow mapping end indicator?
737 if parser.buffer[parser.buffer_pos] == '}' {
738 return yaml_parser_fetch_flow_collection_end(parser,
739 yaml_FLOW_MAPPING_END_TOKEN)
740 }
741
742 // Is it the flow entry indicator?
743 if parser.buffer[parser.buffer_pos] == ',' {
744 return yaml_parser_fetch_flow_entry(parser)
745 }
746
747 // Is it the block entry indicator?
748 if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
749 return yaml_parser_fetch_block_entry(parser)
750 }
751
752 // Is it the key indicator?
753 if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
754 return yaml_parser_fetch_key(parser)
755 }
756
757 // Is it the value indicator?
758 if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
759 return yaml_parser_fetch_value(parser)
760 }
761
762 // Is it an alias?
763 if parser.buffer[parser.buffer_pos] == '*' {
764 return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
765 }
766
767 // Is it an anchor?
768 if parser.buffer[parser.buffer_pos] == '&' {
769 return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
770 }
771
772 // Is it a tag?
773 if parser.buffer[parser.buffer_pos] == '!' {
774 return yaml_parser_fetch_tag(parser)
775 }
776
777 // Is it a literal scalar?
778 if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
779 return yaml_parser_fetch_block_scalar(parser, true)
780 }
781
782 // Is it a folded scalar?
783 if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
784 return yaml_parser_fetch_block_scalar(parser, false)
785 }
786
787 // Is it a single-quoted scalar?
788 if parser.buffer[parser.buffer_pos] == '\'' {
789 return yaml_parser_fetch_flow_scalar(parser, true)
790 }
791
792 // Is it a double-quoted scalar?
793 if parser.buffer[parser.buffer_pos] == '"' {
794 return yaml_parser_fetch_flow_scalar(parser, false)
795 }
796
797 // Is it a plain scalar?
798 //
799 // A plain scalar may start with any non-blank characters except
800 //
801 // '-', '?', ':', ',', '[', ']', '{', '}',
802 // '#', '&', '*', '!', '|', '>', '\'', '\"',
803 // '%', '@', '`'.
804 //
805 // In the block context (and, for the '-' indicator, in the flow context
806 // too), it may also start with the characters
807 //
808 // '-', '?', ':'
809 //
810 // if it is followed by a non-space character.
811 //
812 // The last rule is more restrictive than the specification requires.
813 // [Go] Make this logic more reasonable.
814 //switch parser.buffer[parser.buffer_pos] {
815 //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
816 //}
817 if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
818 parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
819 parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
820 parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
821 parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
822 parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
823 parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
824 parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
825 parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
826 parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
827 (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
828 (parser.flow_level == 0 &&
829 (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
830 !is_blankz(parser.buffer, parser.buffer_pos+1)) {
831 return yaml_parser_fetch_plain_scalar(parser)
832 }
833
834 // If we don't determine the token type so far, it is an error.
835 return yaml_parser_set_scanner_error(parser,
836 "while scanning for the next token", parser.mark,
837 "found character that cannot start any token")
838}
839
840// Check the list of potential simple keys and remove the positions that
841// cannot contain simple keys anymore.
842func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
843 // Check for a potential simple key for each flow level.
844 for i := range parser.simple_keys {
845 simple_key := &parser.simple_keys[i]
846
847 // The specification requires that a simple key
848 //
849 // - is limited to a single line,
850 // - is shorter than 1024 characters.
851 if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
852
853 // Check if the potential simple key to be removed is required.
854 if simple_key.required {
855 return yaml_parser_set_scanner_error(parser,
856 "while scanning a simple key", simple_key.mark,
857 "could not find expected ':'")
858 }
859 simple_key.possible = false
860 }
861 }
862 return true
863}
864
865// Check if a simple key may start at the current position and add it if
866// needed.
867func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
868 // A simple key is required at the current position if the scanner is in
869 // the block context and the current column coincides with the indentation
870 // level.
871
872 required := parser.flow_level == 0 && parser.indent == parser.mark.column
873
874 //
875 // If the current position may start a simple key, save it.
876 //
877 if parser.simple_key_allowed {
878 simple_key := yaml_simple_key_t{
879 possible: true,
880 required: required,
881 token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
882 }
883 simple_key.mark = parser.mark
884
885 if !yaml_parser_remove_simple_key(parser) {
886 return false
887 }
888 parser.simple_keys[len(parser.simple_keys)-1] = simple_key
889 }
890 return true
891}
892
893// Remove a potential simple key at the current flow level.
894func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
895 i := len(parser.simple_keys) - 1
896 if parser.simple_keys[i].possible {
897 // If the key is required, it is an error.
898 if parser.simple_keys[i].required {
899 return yaml_parser_set_scanner_error(parser,
900 "while scanning a simple key", parser.simple_keys[i].mark,
901 "could not find expected ':'")
902 }
903 }
904 // Remove the key from the stack.
905 parser.simple_keys[i].possible = false
906 return true
907}
908
909// Increase the flow level and resize the simple key list if needed.
910func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
911 // Reset the simple key on the next level.
912 parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
913
914 // Increase the flow level.
915 parser.flow_level++
916 return true
917}
918
919// Decrease the flow level.
920func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
921 if parser.flow_level > 0 {
922 parser.flow_level--
923 parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
924 }
925 return true
926}
927
928// Push the current indentation level to the stack and set the new level
929// the current column is greater than the indentation level. In this case,
930// append or insert the specified token into the token queue.
931func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
932 // In the flow context, do nothing.
933 if parser.flow_level > 0 {
934 return true
935 }
936
937 if parser.indent < column {
938 // Push the current indentation level to the stack and set the new
939 // indentation level.
940 parser.indents = append(parser.indents, parser.indent)
941 parser.indent = column
942
943 // Create a token and insert it into the queue.
944 token := yaml_token_t{
945 typ: typ,
946 start_mark: mark,
947 end_mark: mark,
948 }
949 if number > -1 {
950 number -= parser.tokens_parsed
951 }
952 yaml_insert_token(parser, number, &token)
953 }
954 return true
955}
956
957// Pop indentation levels from the indents stack until the current level
958// becomes less or equal to the column. For each indentation level, append
959// the BLOCK-END token.
960func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
961 // In the flow context, do nothing.
962 if parser.flow_level > 0 {
963 return true
964 }
965
966 // Loop through the indentation levels in the stack.
967 for parser.indent > column {
968 // Create a token and append it to the queue.
969 token := yaml_token_t{
970 typ: yaml_BLOCK_END_TOKEN,
971 start_mark: parser.mark,
972 end_mark: parser.mark,
973 }
974 yaml_insert_token(parser, -1, &token)
975
976 // Pop the indentation level.
977 parser.indent = parser.indents[len(parser.indents)-1]
978 parser.indents = parser.indents[:len(parser.indents)-1]
979 }
980 return true
981}
982
983// Initialize the scanner and produce the STREAM-START token.
984func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
985
986 // Set the initial indentation.
987 parser.indent = -1
988
989 // Initialize the simple key stack.
990 parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
991
992 // A simple key is allowed at the beginning of the stream.
993 parser.simple_key_allowed = true
994
995 // We have started.
996 parser.stream_start_produced = true
997
998 // Create the STREAM-START token and append it to the queue.
999 token := yaml_token_t{
1000 typ: yaml_STREAM_START_TOKEN,
1001 start_mark: parser.mark,
1002 end_mark: parser.mark,
1003 encoding: parser.encoding,
1004 }
1005 yaml_insert_token(parser, -1, &token)
1006 return true
1007}
1008
1009// Produce the STREAM-END token and shut down the scanner.
1010func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
1011
1012 // Force new line.
1013 if parser.mark.column != 0 {
1014 parser.mark.column = 0
1015 parser.mark.line++
1016 }
1017
1018 // Reset the indentation level.
1019 if !yaml_parser_unroll_indent(parser, -1) {
1020 return false
1021 }
1022
1023 // Reset simple keys.
1024 if !yaml_parser_remove_simple_key(parser) {
1025 return false
1026 }
1027
1028 parser.simple_key_allowed = false
1029
1030 // Create the STREAM-END token and append it to the queue.
1031 token := yaml_token_t{
1032 typ: yaml_STREAM_END_TOKEN,
1033 start_mark: parser.mark,
1034 end_mark: parser.mark,
1035 }
1036 yaml_insert_token(parser, -1, &token)
1037 return true
1038}
1039
1040// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
1041func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
1042 // Reset the indentation level.
1043 if !yaml_parser_unroll_indent(parser, -1) {
1044 return false
1045 }
1046
1047 // Reset simple keys.
1048 if !yaml_parser_remove_simple_key(parser) {
1049 return false
1050 }
1051
1052 parser.simple_key_allowed = false
1053
1054 // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
1055 token := yaml_token_t{}
1056 if !yaml_parser_scan_directive(parser, &token) {
1057 return false
1058 }
1059 // Append the token to the queue.
1060 yaml_insert_token(parser, -1, &token)
1061 return true
1062}
1063
1064// Produce the DOCUMENT-START or DOCUMENT-END token.
1065func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
1066 // Reset the indentation level.
1067 if !yaml_parser_unroll_indent(parser, -1) {
1068 return false
1069 }
1070
1071 // Reset simple keys.
1072 if !yaml_parser_remove_simple_key(parser) {
1073 return false
1074 }
1075
1076 parser.simple_key_allowed = false
1077
1078 // Consume the token.
1079 start_mark := parser.mark
1080
1081 skip(parser)
1082 skip(parser)
1083 skip(parser)
1084
1085 end_mark := parser.mark
1086
1087 // Create the DOCUMENT-START or DOCUMENT-END token.
1088 token := yaml_token_t{
1089 typ: typ,
1090 start_mark: start_mark,
1091 end_mark: end_mark,
1092 }
1093 // Append the token to the queue.
1094 yaml_insert_token(parser, -1, &token)
1095 return true
1096}
1097
1098// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
1099func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
1100 // The indicators '[' and '{' may start a simple key.
1101 if !yaml_parser_save_simple_key(parser) {
1102 return false
1103 }
1104
1105 // Increase the flow level.
1106 if !yaml_parser_increase_flow_level(parser) {
1107 return false
1108 }
1109
1110 // A simple key may follow the indicators '[' and '{'.
1111 parser.simple_key_allowed = true
1112
1113 // Consume the token.
1114 start_mark := parser.mark
1115 skip(parser)
1116 end_mark := parser.mark
1117
1118 // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
1119 token := yaml_token_t{
1120 typ: typ,
1121 start_mark: start_mark,
1122 end_mark: end_mark,
1123 }
1124 // Append the token to the queue.
1125 yaml_insert_token(parser, -1, &token)
1126 return true
1127}
1128
1129// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
1130func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
1131 // Reset any potential simple key on the current flow level.
1132 if !yaml_parser_remove_simple_key(parser) {
1133 return false
1134 }
1135
1136 // Decrease the flow level.
1137 if !yaml_parser_decrease_flow_level(parser) {
1138 return false
1139 }
1140
1141 // No simple keys after the indicators ']' and '}'.
1142 parser.simple_key_allowed = false
1143
1144 // Consume the token.
1145
1146 start_mark := parser.mark
1147 skip(parser)
1148 end_mark := parser.mark
1149
1150 // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
1151 token := yaml_token_t{
1152 typ: typ,
1153 start_mark: start_mark,
1154 end_mark: end_mark,
1155 }
1156 // Append the token to the queue.
1157 yaml_insert_token(parser, -1, &token)
1158 return true
1159}
1160
1161// Produce the FLOW-ENTRY token.
1162func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
1163 // Reset any potential simple keys on the current flow level.
1164 if !yaml_parser_remove_simple_key(parser) {
1165 return false
1166 }
1167
1168 // Simple keys are allowed after ','.
1169 parser.simple_key_allowed = true
1170
1171 // Consume the token.
1172 start_mark := parser.mark
1173 skip(parser)
1174 end_mark := parser.mark
1175
1176 // Create the FLOW-ENTRY token and append it to the queue.
1177 token := yaml_token_t{
1178 typ: yaml_FLOW_ENTRY_TOKEN,
1179 start_mark: start_mark,
1180 end_mark: end_mark,
1181 }
1182 yaml_insert_token(parser, -1, &token)
1183 return true
1184}
1185
1186// Produce the BLOCK-ENTRY token.
1187func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
1188 // Check if the scanner is in the block context.
1189 if parser.flow_level == 0 {
1190 // Check if we are allowed to start a new entry.
1191 if !parser.simple_key_allowed {
1192 return yaml_parser_set_scanner_error(parser, "", parser.mark,
1193 "block sequence entries are not allowed in this context")
1194 }
1195 // Add the BLOCK-SEQUENCE-START token if needed.
1196 if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
1197 return false
1198 }
1199 } else {
1200 // It is an error for the '-' indicator to occur in the flow context,
1201 // but we let the Parser detect and report about it because the Parser
1202 // is able to point to the context.
1203 }
1204
1205 // Reset any potential simple keys on the current flow level.
1206 if !yaml_parser_remove_simple_key(parser) {
1207 return false
1208 }
1209
1210 // Simple keys are allowed after '-'.
1211 parser.simple_key_allowed = true
1212
1213 // Consume the token.
1214 start_mark := parser.mark
1215 skip(parser)
1216 end_mark := parser.mark
1217
1218 // Create the BLOCK-ENTRY token and append it to the queue.
1219 token := yaml_token_t{
1220 typ: yaml_BLOCK_ENTRY_TOKEN,
1221 start_mark: start_mark,
1222 end_mark: end_mark,
1223 }
1224 yaml_insert_token(parser, -1, &token)
1225 return true
1226}
1227
1228// Produce the KEY token.
1229func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
1230
1231 // In the block context, additional checks are required.
1232 if parser.flow_level == 0 {
1233 // Check if we are allowed to start a new key (not nessesary simple).
1234 if !parser.simple_key_allowed {
1235 return yaml_parser_set_scanner_error(parser, "", parser.mark,
1236 "mapping keys are not allowed in this context")
1237 }
1238 // Add the BLOCK-MAPPING-START token if needed.
1239 if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
1240 return false
1241 }
1242 }
1243
1244 // Reset any potential simple keys on the current flow level.
1245 if !yaml_parser_remove_simple_key(parser) {
1246 return false
1247 }
1248
1249 // Simple keys are allowed after '?' in the block context.
1250 parser.simple_key_allowed = parser.flow_level == 0
1251
1252 // Consume the token.
1253 start_mark := parser.mark
1254 skip(parser)
1255 end_mark := parser.mark
1256
1257 // Create the KEY token and append it to the queue.
1258 token := yaml_token_t{
1259 typ: yaml_KEY_TOKEN,
1260 start_mark: start_mark,
1261 end_mark: end_mark,
1262 }
1263 yaml_insert_token(parser, -1, &token)
1264 return true
1265}
1266
1267// Produce the VALUE token.
1268func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
1269
1270 simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
1271
1272 // Have we found a simple key?
1273 if simple_key.possible {
1274 // Create the KEY token and insert it into the queue.
1275 token := yaml_token_t{
1276 typ: yaml_KEY_TOKEN,
1277 start_mark: simple_key.mark,
1278 end_mark: simple_key.mark,
1279 }
1280 yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
1281
1282 // In the block context, we may need to add the BLOCK-MAPPING-START token.
1283 if !yaml_parser_roll_indent(parser, simple_key.mark.column,
1284 simple_key.token_number,
1285 yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
1286 return false
1287 }
1288
1289 // Remove the simple key.
1290 simple_key.possible = false
1291
1292 // A simple key cannot follow another simple key.
1293 parser.simple_key_allowed = false
1294
1295 } else {
1296 // The ':' indicator follows a complex key.
1297
1298 // In the block context, extra checks are required.
1299 if parser.flow_level == 0 {
1300
1301 // Check if we are allowed to start a complex value.
1302 if !parser.simple_key_allowed {
1303 return yaml_parser_set_scanner_error(parser, "", parser.mark,
1304 "mapping values are not allowed in this context")
1305 }
1306
1307 // Add the BLOCK-MAPPING-START token if needed.
1308 if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
1309 return false
1310 }
1311 }
1312
1313 // Simple keys after ':' are allowed in the block context.
1314 parser.simple_key_allowed = parser.flow_level == 0
1315 }
1316
1317 // Consume the token.
1318 start_mark := parser.mark
1319 skip(parser)
1320 end_mark := parser.mark
1321
1322 // Create the VALUE token and append it to the queue.
1323 token := yaml_token_t{
1324 typ: yaml_VALUE_TOKEN,
1325 start_mark: start_mark,
1326 end_mark: end_mark,
1327 }
1328 yaml_insert_token(parser, -1, &token)
1329 return true
1330}
1331
1332// Produce the ALIAS or ANCHOR token.
1333func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
1334 // An anchor or an alias could be a simple key.
1335 if !yaml_parser_save_simple_key(parser) {
1336 return false
1337 }
1338
1339 // A simple key cannot follow an anchor or an alias.
1340 parser.simple_key_allowed = false
1341
1342 // Create the ALIAS or ANCHOR token and append it to the queue.
1343 var token yaml_token_t
1344 if !yaml_parser_scan_anchor(parser, &token, typ) {
1345 return false
1346 }
1347 yaml_insert_token(parser, -1, &token)
1348 return true
1349}
1350
1351// Produce the TAG token.
1352func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
1353 // A tag could be a simple key.
1354 if !yaml_parser_save_simple_key(parser) {
1355 return false
1356 }
1357
1358 // A simple key cannot follow a tag.
1359 parser.simple_key_allowed = false
1360
1361 // Create the TAG token and append it to the queue.
1362 var token yaml_token_t
1363 if !yaml_parser_scan_tag(parser, &token) {
1364 return false
1365 }
1366 yaml_insert_token(parser, -1, &token)
1367 return true
1368}
1369
1370// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
1371func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
1372 // Remove any potential simple keys.
1373 if !yaml_parser_remove_simple_key(parser) {
1374 return false
1375 }
1376
1377 // A simple key may follow a block scalar.
1378 parser.simple_key_allowed = true
1379
1380 // Create the SCALAR token and append it to the queue.
1381 var token yaml_token_t
1382 if !yaml_parser_scan_block_scalar(parser, &token, literal) {
1383 return false
1384 }
1385 yaml_insert_token(parser, -1, &token)
1386 return true
1387}
1388
1389// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
1390func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
1391 // A plain scalar could be a simple key.
1392 if !yaml_parser_save_simple_key(parser) {
1393 return false
1394 }
1395
1396 // A simple key cannot follow a flow scalar.
1397 parser.simple_key_allowed = false
1398
1399 // Create the SCALAR token and append it to the queue.
1400 var token yaml_token_t
1401 if !yaml_parser_scan_flow_scalar(parser, &token, single) {
1402 return false
1403 }
1404 yaml_insert_token(parser, -1, &token)
1405 return true
1406}
1407
1408// Produce the SCALAR(...,plain) token.
1409func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
1410 // A plain scalar could be a simple key.
1411 if !yaml_parser_save_simple_key(parser) {
1412 return false
1413 }
1414
1415 // A simple key cannot follow a flow scalar.
1416 parser.simple_key_allowed = false
1417
1418 // Create the SCALAR token and append it to the queue.
1419 var token yaml_token_t
1420 if !yaml_parser_scan_plain_scalar(parser, &token) {
1421 return false
1422 }
1423 yaml_insert_token(parser, -1, &token)
1424 return true
1425}
1426
1427// Eat whitespaces and comments until the next token is found.
1428func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
1429
1430 // Until the next token is not found.
1431 for {
1432 // Allow the BOM mark to start a line.
1433 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1434 return false
1435 }
1436 if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
1437 skip(parser)
1438 }
1439
1440 // Eat whitespaces.
1441 // Tabs are allowed:
1442 // - in the flow context
1443 // - in the block context, but not at the beginning of the line or
1444 // after '-', '?', or ':' (complex value).
1445 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1446 return false
1447 }
1448
1449 for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
1450 skip(parser)
1451 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1452 return false
1453 }
1454 }
1455
1456 // Eat a comment until a line break.
1457 if parser.buffer[parser.buffer_pos] == '#' {
1458 for !is_breakz(parser.buffer, parser.buffer_pos) {
1459 skip(parser)
1460 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1461 return false
1462 }
1463 }
1464 }
1465
1466 // If it is a line break, eat it.
1467 if is_break(parser.buffer, parser.buffer_pos) {
1468 if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
1469 return false
1470 }
1471 skip_line(parser)
1472
1473 // In the block context, a new line may start a simple key.
1474 if parser.flow_level == 0 {
1475 parser.simple_key_allowed = true
1476 }
1477 } else {
1478 break // We have found a token.
1479 }
1480 }
1481
1482 return true
1483}
1484
1485// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
1486//
1487// Scope:
1488// %YAML 1.1 # a comment \n
1489// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1490// %TAG !yaml! tag:yaml.org,2002: \n
1491// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1492//
1493func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
1494 // Eat '%'.
1495 start_mark := parser.mark
1496 skip(parser)
1497
1498 // Scan the directive name.
1499 var name []byte
1500 if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
1501 return false
1502 }
1503
1504 // Is it a YAML directive?
1505 if bytes.Equal(name, []byte("YAML")) {
1506 // Scan the VERSION directive value.
1507 var major, minor int8
1508 if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
1509 return false
1510 }
1511 end_mark := parser.mark
1512
1513 // Create a VERSION-DIRECTIVE token.
1514 *token = yaml_token_t{
1515 typ: yaml_VERSION_DIRECTIVE_TOKEN,
1516 start_mark: start_mark,
1517 end_mark: end_mark,
1518 major: major,
1519 minor: minor,
1520 }
1521
1522 // Is it a TAG directive?
1523 } else if bytes.Equal(name, []byte("TAG")) {
1524 // Scan the TAG directive value.
1525 var handle, prefix []byte
1526 if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
1527 return false
1528 }
1529 end_mark := parser.mark
1530
1531 // Create a TAG-DIRECTIVE token.
1532 *token = yaml_token_t{
1533 typ: yaml_TAG_DIRECTIVE_TOKEN,
1534 start_mark: start_mark,
1535 end_mark: end_mark,
1536 value: handle,
1537 prefix: prefix,
1538 }
1539
1540 // Unknown directive.
1541 } else {
1542 yaml_parser_set_scanner_error(parser, "while scanning a directive",
1543 start_mark, "found unknown directive name")
1544 return false
1545 }
1546
1547 // Eat the rest of the line including any comments.
1548 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1549 return false
1550 }
1551
1552 for is_blank(parser.buffer, parser.buffer_pos) {
1553 skip(parser)
1554 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1555 return false
1556 }
1557 }
1558
1559 if parser.buffer[parser.buffer_pos] == '#' {
1560 for !is_breakz(parser.buffer, parser.buffer_pos) {
1561 skip(parser)
1562 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1563 return false
1564 }
1565 }
1566 }
1567
1568 // Check if we are at the end of the line.
1569 if !is_breakz(parser.buffer, parser.buffer_pos) {
1570 yaml_parser_set_scanner_error(parser, "while scanning a directive",
1571 start_mark, "did not find expected comment or line break")
1572 return false
1573 }
1574
1575 // Eat a line break.
1576 if is_break(parser.buffer, parser.buffer_pos) {
1577 if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
1578 return false
1579 }
1580 skip_line(parser)
1581 }
1582
1583 return true
1584}
1585
1586// Scan the directive name.
1587//
1588// Scope:
1589// %YAML 1.1 # a comment \n
1590// ^^^^
1591// %TAG !yaml! tag:yaml.org,2002: \n
1592// ^^^
1593//
1594func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
1595 // Consume the directive name.
1596 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1597 return false
1598 }
1599
1600 var s []byte
1601 for is_alpha(parser.buffer, parser.buffer_pos) {
1602 s = read(parser, s)
1603 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1604 return false
1605 }
1606 }
1607
1608 // Check if the name is empty.
1609 if len(s) == 0 {
1610 yaml_parser_set_scanner_error(parser, "while scanning a directive",
1611 start_mark, "could not find expected directive name")
1612 return false
1613 }
1614
1615 // Check for an blank character after the name.
1616 if !is_blankz(parser.buffer, parser.buffer_pos) {
1617 yaml_parser_set_scanner_error(parser, "while scanning a directive",
1618 start_mark, "found unexpected non-alphabetical character")
1619 return false
1620 }
1621 *name = s
1622 return true
1623}
1624
1625// Scan the value of VERSION-DIRECTIVE.
1626//
1627// Scope:
1628// %YAML 1.1 # a comment \n
1629// ^^^^^^
1630func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
1631 // Eat whitespaces.
1632 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1633 return false
1634 }
1635 for is_blank(parser.buffer, parser.buffer_pos) {
1636 skip(parser)
1637 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1638 return false
1639 }
1640 }
1641
1642 // Consume the major version number.
1643 if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
1644 return false
1645 }
1646
1647 // Eat '.'.
1648 if parser.buffer[parser.buffer_pos] != '.' {
1649 return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
1650 start_mark, "did not find expected digit or '.' character")
1651 }
1652
1653 skip(parser)
1654
1655 // Consume the minor version number.
1656 if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
1657 return false
1658 }
1659 return true
1660}
1661
1662const max_number_length = 2
1663
1664// Scan the version number of VERSION-DIRECTIVE.
1665//
1666// Scope:
1667// %YAML 1.1 # a comment \n
1668// ^
1669// %YAML 1.1 # a comment \n
1670// ^
1671func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
1672
1673 // Repeat while the next character is digit.
1674 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1675 return false
1676 }
1677 var value, length int8
1678 for is_digit(parser.buffer, parser.buffer_pos) {
1679 // Check if the number is too long.
1680 length++
1681 if length > max_number_length {
1682 return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
1683 start_mark, "found extremely long version number")
1684 }
1685 value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
1686 skip(parser)
1687 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1688 return false
1689 }
1690 }
1691
1692 // Check if the number was present.
1693 if length == 0 {
1694 return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
1695 start_mark, "did not find expected version number")
1696 }
1697 *number = value
1698 return true
1699}
1700
1701// Scan the value of a TAG-DIRECTIVE token.
1702//
1703// Scope:
1704// %TAG !yaml! tag:yaml.org,2002: \n
1705// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1706//
1707func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
1708 var handle_value, prefix_value []byte
1709
1710 // Eat whitespaces.
1711 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1712 return false
1713 }
1714
1715 for is_blank(parser.buffer, parser.buffer_pos) {
1716 skip(parser)
1717 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1718 return false
1719 }
1720 }
1721
1722 // Scan a handle.
1723 if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
1724 return false
1725 }
1726
1727 // Expect a whitespace.
1728 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1729 return false
1730 }
1731 if !is_blank(parser.buffer, parser.buffer_pos) {
1732 yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
1733 start_mark, "did not find expected whitespace")
1734 return false
1735 }
1736
1737 // Eat whitespaces.
1738 for is_blank(parser.buffer, parser.buffer_pos) {
1739 skip(parser)
1740 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1741 return false
1742 }
1743 }
1744
1745 // Scan a prefix.
1746 if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
1747 return false
1748 }
1749
1750 // Expect a whitespace or line break.
1751 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1752 return false
1753 }
1754 if !is_blankz(parser.buffer, parser.buffer_pos) {
1755 yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
1756 start_mark, "did not find expected whitespace or line break")
1757 return false
1758 }
1759
1760 *handle = handle_value
1761 *prefix = prefix_value
1762 return true
1763}
1764
1765func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
1766 var s []byte
1767
1768 // Eat the indicator character.
1769 start_mark := parser.mark
1770 skip(parser)
1771
1772 // Consume the value.
1773 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1774 return false
1775 }
1776
1777 for is_alpha(parser.buffer, parser.buffer_pos) {
1778 s = read(parser, s)
1779 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1780 return false
1781 }
1782 }
1783
1784 end_mark := parser.mark
1785
1786 /*
1787 * Check if length of the anchor is greater than 0 and it is followed by
1788 * a whitespace character or one of the indicators:
1789 *
1790 * '?', ':', ',', ']', '}', '%', '@', '`'.
1791 */
1792
1793 if len(s) == 0 ||
1794 !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
1795 parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
1796 parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
1797 parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
1798 parser.buffer[parser.buffer_pos] == '`') {
1799 context := "while scanning an alias"
1800 if typ == yaml_ANCHOR_TOKEN {
1801 context = "while scanning an anchor"
1802 }
1803 yaml_parser_set_scanner_error(parser, context, start_mark,
1804 "did not find expected alphabetic or numeric character")
1805 return false
1806 }
1807
1808 // Create a token.
1809 *token = yaml_token_t{
1810 typ: typ,
1811 start_mark: start_mark,
1812 end_mark: end_mark,
1813 value: s,
1814 }
1815
1816 return true
1817}
1818
1819/*
1820 * Scan a TAG token.
1821 */
1822
1823func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
1824 var handle, suffix []byte
1825
1826 start_mark := parser.mark
1827
1828 // Check if the tag is in the canonical form.
1829 if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
1830 return false
1831 }
1832
1833 if parser.buffer[parser.buffer_pos+1] == '<' {
1834 // Keep the handle as ''
1835
1836 // Eat '!<'
1837 skip(parser)
1838 skip(parser)
1839
1840 // Consume the tag value.
1841 if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
1842 return false
1843 }
1844
1845 // Check for '>' and eat it.
1846 if parser.buffer[parser.buffer_pos] != '>' {
1847 yaml_parser_set_scanner_error(parser, "while scanning a tag",
1848 start_mark, "did not find the expected '>'")
1849 return false
1850 }
1851
1852 skip(parser)
1853 } else {
1854 // The tag has either the '!suffix' or the '!handle!suffix' form.
1855
1856 // First, try to scan a handle.
1857 if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
1858 return false
1859 }
1860
1861 // Check if it is, indeed, handle.
1862 if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
1863 // Scan the suffix now.
1864 if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
1865 return false
1866 }
1867 } else {
1868 // It wasn't a handle after all. Scan the rest of the tag.
1869 if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
1870 return false
1871 }
1872
1873 // Set the handle to '!'.
1874 handle = []byte{'!'}
1875
1876 // A special case: the '!' tag. Set the handle to '' and the
1877 // suffix to '!'.
1878 if len(suffix) == 0 {
1879 handle, suffix = suffix, handle
1880 }
1881 }
1882 }
1883
1884 // Check the character which ends the tag.
1885 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1886 return false
1887 }
1888 if !is_blankz(parser.buffer, parser.buffer_pos) {
1889 yaml_parser_set_scanner_error(parser, "while scanning a tag",
1890 start_mark, "did not find expected whitespace or line break")
1891 return false
1892 }
1893
1894 end_mark := parser.mark
1895
1896 // Create a token.
1897 *token = yaml_token_t{
1898 typ: yaml_TAG_TOKEN,
1899 start_mark: start_mark,
1900 end_mark: end_mark,
1901 value: handle,
1902 suffix: suffix,
1903 }
1904 return true
1905}
1906
1907// Scan a tag handle.
1908func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
1909 // Check the initial '!' character.
1910 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1911 return false
1912 }
1913 if parser.buffer[parser.buffer_pos] != '!' {
1914 yaml_parser_set_scanner_tag_error(parser, directive,
1915 start_mark, "did not find expected '!'")
1916 return false
1917 }
1918
1919 var s []byte
1920
1921 // Copy the '!' character.
1922 s = read(parser, s)
1923
1924 // Copy all subsequent alphabetical and numerical characters.
1925 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1926 return false
1927 }
1928 for is_alpha(parser.buffer, parser.buffer_pos) {
1929 s = read(parser, s)
1930 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1931 return false
1932 }
1933 }
1934
1935 // Check if the trailing character is '!' and copy it.
1936 if parser.buffer[parser.buffer_pos] == '!' {
1937 s = read(parser, s)
1938 } else {
1939 // It's either the '!' tag or not really a tag handle. If it's a %TAG
1940 // directive, it's an error. If it's a tag token, it must be a part of URI.
1941 if directive && string(s) != "!" {
1942 yaml_parser_set_scanner_tag_error(parser, directive,
1943 start_mark, "did not find expected '!'")
1944 return false
1945 }
1946 }
1947
1948 *handle = s
1949 return true
1950}
1951
1952// Scan a tag.
1953func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
1954 //size_t length = head ? strlen((char *)head) : 0
1955 var s []byte
1956 hasTag := len(head) > 0
1957
1958 // Copy the head if needed.
1959 //
1960 // Note that we don't copy the leading '!' character.
1961 if len(head) > 1 {
1962 s = append(s, head[1:]...)
1963 }
1964
1965 // Scan the tag.
1966 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1967 return false
1968 }
1969
1970 // The set of characters that may appear in URI is as follows:
1971 //
1972 // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
1973 // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
1974 // '%'.
1975 // [Go] Convert this into more reasonable logic.
1976 for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
1977 parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
1978 parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
1979 parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
1980 parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
1981 parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
1982 parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
1983 parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
1984 parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
1985 parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
1986 parser.buffer[parser.buffer_pos] == '%' {
1987 // Check if it is a URI-escape sequence.
1988 if parser.buffer[parser.buffer_pos] == '%' {
1989 if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
1990 return false
1991 }
1992 } else {
1993 s = read(parser, s)
1994 }
1995 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
1996 return false
1997 }
1998 hasTag = true
1999 }
2000
2001 if !hasTag {
2002 yaml_parser_set_scanner_tag_error(parser, directive,
2003 start_mark, "did not find expected tag URI")
2004 return false
2005 }
2006 *uri = s
2007 return true
2008}
2009
2010// Decode an URI-escape sequence corresponding to a single UTF-8 character.
2011func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
2012
2013 // Decode the required number of characters.
2014 w := 1024
2015 for w > 0 {
2016 // Check for a URI-escaped octet.
2017 if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
2018 return false
2019 }
2020
2021 if !(parser.buffer[parser.buffer_pos] == '%' &&
2022 is_hex(parser.buffer, parser.buffer_pos+1) &&
2023 is_hex(parser.buffer, parser.buffer_pos+2)) {
2024 return yaml_parser_set_scanner_tag_error(parser, directive,
2025 start_mark, "did not find URI escaped octet")
2026 }
2027
2028 // Get the octet.
2029 octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
2030
2031 // If it is the leading octet, determine the length of the UTF-8 sequence.
2032 if w == 1024 {
2033 w = width(octet)
2034 if w == 0 {
2035 return yaml_parser_set_scanner_tag_error(parser, directive,
2036 start_mark, "found an incorrect leading UTF-8 octet")
2037 }
2038 } else {
2039 // Check if the trailing octet is correct.
2040 if octet&0xC0 != 0x80 {
2041 return yaml_parser_set_scanner_tag_error(parser, directive,
2042 start_mark, "found an incorrect trailing UTF-8 octet")
2043 }
2044 }
2045
2046 // Copy the octet and move the pointers.
2047 *s = append(*s, octet)
2048 skip(parser)
2049 skip(parser)
2050 skip(parser)
2051 w--
2052 }
2053 return true
2054}
2055
2056// Scan a block scalar.
2057func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
2058 // Eat the indicator '|' or '>'.
2059 start_mark := parser.mark
2060 skip(parser)
2061
2062 // Scan the additional block scalar indicators.
2063 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2064 return false
2065 }
2066
2067 // Check for a chomping indicator.
2068 var chomping, increment int
2069 if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
2070 // Set the chomping method and eat the indicator.
2071 if parser.buffer[parser.buffer_pos] == '+' {
2072 chomping = +1
2073 } else {
2074 chomping = -1
2075 }
2076 skip(parser)
2077
2078 // Check for an indentation indicator.
2079 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2080 return false
2081 }
2082 if is_digit(parser.buffer, parser.buffer_pos) {
2083 // Check that the indentation is greater than 0.
2084 if parser.buffer[parser.buffer_pos] == '0' {
2085 yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
2086 start_mark, "found an indentation indicator equal to 0")
2087 return false
2088 }
2089
2090 // Get the indentation level and eat the indicator.
2091 increment = as_digit(parser.buffer, parser.buffer_pos)
2092 skip(parser)
2093 }
2094
2095 } else if is_digit(parser.buffer, parser.buffer_pos) {
2096 // Do the same as above, but in the opposite order.
2097
2098 if parser.buffer[parser.buffer_pos] == '0' {
2099 yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
2100 start_mark, "found an indentation indicator equal to 0")
2101 return false
2102 }
2103 increment = as_digit(parser.buffer, parser.buffer_pos)
2104 skip(parser)
2105
2106 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2107 return false
2108 }
2109 if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
2110 if parser.buffer[parser.buffer_pos] == '+' {
2111 chomping = +1
2112 } else {
2113 chomping = -1
2114 }
2115 skip(parser)
2116 }
2117 }
2118
2119 // Eat whitespaces and comments to the end of the line.
2120 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2121 return false
2122 }
2123 for is_blank(parser.buffer, parser.buffer_pos) {
2124 skip(parser)
2125 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2126 return false
2127 }
2128 }
2129 if parser.buffer[parser.buffer_pos] == '#' {
2130 for !is_breakz(parser.buffer, parser.buffer_pos) {
2131 skip(parser)
2132 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2133 return false
2134 }
2135 }
2136 }
2137
2138 // Check if we are at the end of the line.
2139 if !is_breakz(parser.buffer, parser.buffer_pos) {
2140 yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
2141 start_mark, "did not find expected comment or line break")
2142 return false
2143 }
2144
2145 // Eat a line break.
2146 if is_break(parser.buffer, parser.buffer_pos) {
2147 if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
2148 return false
2149 }
2150 skip_line(parser)
2151 }
2152
2153 end_mark := parser.mark
2154
2155 // Set the indentation level if it was specified.
2156 var indent int
2157 if increment > 0 {
2158 if parser.indent >= 0 {
2159 indent = parser.indent + increment
2160 } else {
2161 indent = increment
2162 }
2163 }
2164
2165 // Scan the leading line breaks and determine the indentation level if needed.
2166 var s, leading_break, trailing_breaks []byte
2167 if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
2168 return false
2169 }
2170
2171 // Scan the block scalar content.
2172 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2173 return false
2174 }
2175 var leading_blank, trailing_blank bool
2176 for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
2177 // We are at the beginning of a non-empty line.
2178
2179 // Is it a trailing whitespace?
2180 trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
2181
2182 // Check if we need to fold the leading line break.
2183 if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
2184 // Do we need to join the lines by space?
2185 if len(trailing_breaks) == 0 {
2186 s = append(s, ' ')
2187 }
2188 } else {
2189 s = append(s, leading_break...)
2190 }
2191 leading_break = leading_break[:0]
2192
2193 // Append the remaining line breaks.
2194 s = append(s, trailing_breaks...)
2195 trailing_breaks = trailing_breaks[:0]
2196
2197 // Is it a leading whitespace?
2198 leading_blank = is_blank(parser.buffer, parser.buffer_pos)
2199
2200 // Consume the current line.
2201 for !is_breakz(parser.buffer, parser.buffer_pos) {
2202 s = read(parser, s)
2203 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2204 return false
2205 }
2206 }
2207
2208 // Consume the line break.
2209 if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
2210 return false
2211 }
2212
2213 leading_break = read_line(parser, leading_break)
2214
2215 // Eat the following indentation spaces and line breaks.
2216 if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
2217 return false
2218 }
2219 }
2220
2221 // Chomp the tail.
2222 if chomping != -1 {
2223 s = append(s, leading_break...)
2224 }
2225 if chomping == 1 {
2226 s = append(s, trailing_breaks...)
2227 }
2228
2229 // Create a token.
2230 *token = yaml_token_t{
2231 typ: yaml_SCALAR_TOKEN,
2232 start_mark: start_mark,
2233 end_mark: end_mark,
2234 value: s,
2235 style: yaml_LITERAL_SCALAR_STYLE,
2236 }
2237 if !literal {
2238 token.style = yaml_FOLDED_SCALAR_STYLE
2239 }
2240 return true
2241}
2242
2243// Scan indentation spaces and line breaks for a block scalar. Determine the
2244// indentation level if needed.
2245func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
2246 *end_mark = parser.mark
2247
2248 // Eat the indentation spaces and line breaks.
2249 max_indent := 0
2250 for {
2251 // Eat the indentation spaces.
2252 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2253 return false
2254 }
2255 for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
2256 skip(parser)
2257 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2258 return false
2259 }
2260 }
2261 if parser.mark.column > max_indent {
2262 max_indent = parser.mark.column
2263 }
2264
2265 // Check for a tab character messing the indentation.
2266 if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
2267 return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
2268 start_mark, "found a tab character where an indentation space is expected")
2269 }
2270
2271 // Have we found a non-empty line?
2272 if !is_break(parser.buffer, parser.buffer_pos) {
2273 break
2274 }
2275
2276 // Consume the line break.
2277 if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
2278 return false
2279 }
2280 // [Go] Should really be returning breaks instead.
2281 *breaks = read_line(parser, *breaks)
2282 *end_mark = parser.mark
2283 }
2284
2285 // Determine the indentation level if needed.
2286 if *indent == 0 {
2287 *indent = max_indent
2288 if *indent < parser.indent+1 {
2289 *indent = parser.indent + 1
2290 }
2291 if *indent < 1 {
2292 *indent = 1
2293 }
2294 }
2295 return true
2296}
2297
2298// Scan a quoted scalar.
2299func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
2300 // Eat the left quote.
2301 start_mark := parser.mark
2302 skip(parser)
2303
2304 // Consume the content of the quoted scalar.
2305 var s, leading_break, trailing_breaks, whitespaces []byte
2306 for {
2307 // Check that there are no document indicators at the beginning of the line.
2308 if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
2309 return false
2310 }
2311
2312 if parser.mark.column == 0 &&
2313 ((parser.buffer[parser.buffer_pos+0] == '-' &&
2314 parser.buffer[parser.buffer_pos+1] == '-' &&
2315 parser.buffer[parser.buffer_pos+2] == '-') ||
2316 (parser.buffer[parser.buffer_pos+0] == '.' &&
2317 parser.buffer[parser.buffer_pos+1] == '.' &&
2318 parser.buffer[parser.buffer_pos+2] == '.')) &&
2319 is_blankz(parser.buffer, parser.buffer_pos+3) {
2320 yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
2321 start_mark, "found unexpected document indicator")
2322 return false
2323 }
2324
2325 // Check for EOF.
2326 if is_z(parser.buffer, parser.buffer_pos) {
2327 yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
2328 start_mark, "found unexpected end of stream")
2329 return false
2330 }
2331
2332 // Consume non-blank characters.
2333 leading_blanks := false
2334 for !is_blankz(parser.buffer, parser.buffer_pos) {
2335 if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
2336 // Is is an escaped single quote.
2337 s = append(s, '\'')
2338 skip(parser)
2339 skip(parser)
2340
2341 } else if single && parser.buffer[parser.buffer_pos] == '\'' {
2342 // It is a right single quote.
2343 break
2344 } else if !single && parser.buffer[parser.buffer_pos] == '"' {
2345 // It is a right double quote.
2346 break
2347
2348 } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
2349 // It is an escaped line break.
2350 if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
2351 return false
2352 }
2353 skip(parser)
2354 skip_line(parser)
2355 leading_blanks = true
2356 break
2357
2358 } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
2359 // It is an escape sequence.
2360 code_length := 0
2361
2362 // Check the escape character.
2363 switch parser.buffer[parser.buffer_pos+1] {
2364 case '0':
2365 s = append(s, 0)
2366 case 'a':
2367 s = append(s, '\x07')
2368 case 'b':
2369 s = append(s, '\x08')
2370 case 't', '\t':
2371 s = append(s, '\x09')
2372 case 'n':
2373 s = append(s, '\x0A')
2374 case 'v':
2375 s = append(s, '\x0B')
2376 case 'f':
2377 s = append(s, '\x0C')
2378 case 'r':
2379 s = append(s, '\x0D')
2380 case 'e':
2381 s = append(s, '\x1B')
2382 case ' ':
2383 s = append(s, '\x20')
2384 case '"':
2385 s = append(s, '"')
2386 case '\'':
2387 s = append(s, '\'')
2388 case '\\':
2389 s = append(s, '\\')
2390 case 'N': // NEL (#x85)
2391 s = append(s, '\xC2')
2392 s = append(s, '\x85')
2393 case '_': // #xA0
2394 s = append(s, '\xC2')
2395 s = append(s, '\xA0')
2396 case 'L': // LS (#x2028)
2397 s = append(s, '\xE2')
2398 s = append(s, '\x80')
2399 s = append(s, '\xA8')
2400 case 'P': // PS (#x2029)
2401 s = append(s, '\xE2')
2402 s = append(s, '\x80')
2403 s = append(s, '\xA9')
2404 case 'x':
2405 code_length = 2
2406 case 'u':
2407 code_length = 4
2408 case 'U':
2409 code_length = 8
2410 default:
2411 yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
2412 start_mark, "found unknown escape character")
2413 return false
2414 }
2415
2416 skip(parser)
2417 skip(parser)
2418
2419 // Consume an arbitrary escape code.
2420 if code_length > 0 {
2421 var value int
2422
2423 // Scan the character value.
2424 if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
2425 return false
2426 }
2427 for k := 0; k < code_length; k++ {
2428 if !is_hex(parser.buffer, parser.buffer_pos+k) {
2429 yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
2430 start_mark, "did not find expected hexdecimal number")
2431 return false
2432 }
2433 value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
2434 }
2435
2436 // Check the value and write the character.
2437 if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
2438 yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
2439 start_mark, "found invalid Unicode character escape code")
2440 return false
2441 }
2442 if value <= 0x7F {
2443 s = append(s, byte(value))
2444 } else if value <= 0x7FF {
2445 s = append(s, byte(0xC0+(value>>6)))
2446 s = append(s, byte(0x80+(value&0x3F)))
2447 } else if value <= 0xFFFF {
2448 s = append(s, byte(0xE0+(value>>12)))
2449 s = append(s, byte(0x80+((value>>6)&0x3F)))
2450 s = append(s, byte(0x80+(value&0x3F)))
2451 } else {
2452 s = append(s, byte(0xF0+(value>>18)))
2453 s = append(s, byte(0x80+((value>>12)&0x3F)))
2454 s = append(s, byte(0x80+((value>>6)&0x3F)))
2455 s = append(s, byte(0x80+(value&0x3F)))
2456 }
2457
2458 // Advance the pointer.
2459 for k := 0; k < code_length; k++ {
2460 skip(parser)
2461 }
2462 }
2463 } else {
2464 // It is a non-escaped non-blank character.
2465 s = read(parser, s)
2466 }
2467 if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
2468 return false
2469 }
2470 }
2471
2472 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2473 return false
2474 }
2475
2476 // Check if we are at the end of the scalar.
2477 if single {
2478 if parser.buffer[parser.buffer_pos] == '\'' {
2479 break
2480 }
2481 } else {
2482 if parser.buffer[parser.buffer_pos] == '"' {
2483 break
2484 }
2485 }
2486
2487 // Consume blank characters.
2488 for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
2489 if is_blank(parser.buffer, parser.buffer_pos) {
2490 // Consume a space or a tab character.
2491 if !leading_blanks {
2492 whitespaces = read(parser, whitespaces)
2493 } else {
2494 skip(parser)
2495 }
2496 } else {
2497 if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
2498 return false
2499 }
2500
2501 // Check if it is a first line break.
2502 if !leading_blanks {
2503 whitespaces = whitespaces[:0]
2504 leading_break = read_line(parser, leading_break)
2505 leading_blanks = true
2506 } else {
2507 trailing_breaks = read_line(parser, trailing_breaks)
2508 }
2509 }
2510 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2511 return false
2512 }
2513 }
2514
2515 // Join the whitespaces or fold line breaks.
2516 if leading_blanks {
2517 // Do we need to fold line breaks?
2518 if len(leading_break) > 0 && leading_break[0] == '\n' {
2519 if len(trailing_breaks) == 0 {
2520 s = append(s, ' ')
2521 } else {
2522 s = append(s, trailing_breaks...)
2523 }
2524 } else {
2525 s = append(s, leading_break...)
2526 s = append(s, trailing_breaks...)
2527 }
2528 trailing_breaks = trailing_breaks[:0]
2529 leading_break = leading_break[:0]
2530 } else {
2531 s = append(s, whitespaces...)
2532 whitespaces = whitespaces[:0]
2533 }
2534 }
2535
2536 // Eat the right quote.
2537 skip(parser)
2538 end_mark := parser.mark
2539
2540 // Create a token.
2541 *token = yaml_token_t{
2542 typ: yaml_SCALAR_TOKEN,
2543 start_mark: start_mark,
2544 end_mark: end_mark,
2545 value: s,
2546 style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
2547 }
2548 if !single {
2549 token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
2550 }
2551 return true
2552}
2553
2554// Scan a plain scalar.
2555func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
2556
2557 var s, leading_break, trailing_breaks, whitespaces []byte
2558 var leading_blanks bool
2559 var indent = parser.indent + 1
2560
2561 start_mark := parser.mark
2562 end_mark := parser.mark
2563
2564 // Consume the content of the plain scalar.
2565 for {
2566 // Check for a document indicator.
2567 if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
2568 return false
2569 }
2570 if parser.mark.column == 0 &&
2571 ((parser.buffer[parser.buffer_pos+0] == '-' &&
2572 parser.buffer[parser.buffer_pos+1] == '-' &&
2573 parser.buffer[parser.buffer_pos+2] == '-') ||
2574 (parser.buffer[parser.buffer_pos+0] == '.' &&
2575 parser.buffer[parser.buffer_pos+1] == '.' &&
2576 parser.buffer[parser.buffer_pos+2] == '.')) &&
2577 is_blankz(parser.buffer, parser.buffer_pos+3) {
2578 break
2579 }
2580
2581 // Check for a comment.
2582 if parser.buffer[parser.buffer_pos] == '#' {
2583 break
2584 }
2585
2586 // Consume non-blank characters.
2587 for !is_blankz(parser.buffer, parser.buffer_pos) {
2588
2589 // Check for indicators that may end a plain scalar.
2590 if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
2591 (parser.flow_level > 0 &&
2592 (parser.buffer[parser.buffer_pos] == ',' ||
2593 parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
2594 parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
2595 parser.buffer[parser.buffer_pos] == '}')) {
2596 break
2597 }
2598
2599 // Check if we need to join whitespaces and breaks.
2600 if leading_blanks || len(whitespaces) > 0 {
2601 if leading_blanks {
2602 // Do we need to fold line breaks?
2603 if leading_break[0] == '\n' {
2604 if len(trailing_breaks) == 0 {
2605 s = append(s, ' ')
2606 } else {
2607 s = append(s, trailing_breaks...)
2608 }
2609 } else {
2610 s = append(s, leading_break...)
2611 s = append(s, trailing_breaks...)
2612 }
2613 trailing_breaks = trailing_breaks[:0]
2614 leading_break = leading_break[:0]
2615 leading_blanks = false
2616 } else {
2617 s = append(s, whitespaces...)
2618 whitespaces = whitespaces[:0]
2619 }
2620 }
2621
2622 // Copy the character.
2623 s = read(parser, s)
2624
2625 end_mark = parser.mark
2626 if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
2627 return false
2628 }
2629 }
2630
2631 // Is it the end?
2632 if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
2633 break
2634 }
2635
2636 // Consume blank characters.
2637 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2638 return false
2639 }
2640
2641 for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
2642 if is_blank(parser.buffer, parser.buffer_pos) {
2643
2644 // Check for tab characters that abuse indentation.
2645 if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
2646 yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
2647 start_mark, "found a tab character that violates indentation")
2648 return false
2649 }
2650
2651 // Consume a space or a tab character.
2652 if !leading_blanks {
2653 whitespaces = read(parser, whitespaces)
2654 } else {
2655 skip(parser)
2656 }
2657 } else {
2658 if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
2659 return false
2660 }
2661
2662 // Check if it is a first line break.
2663 if !leading_blanks {
2664 whitespaces = whitespaces[:0]
2665 leading_break = read_line(parser, leading_break)
2666 leading_blanks = true
2667 } else {
2668 trailing_breaks = read_line(parser, trailing_breaks)
2669 }
2670 }
2671 if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
2672 return false
2673 }
2674 }
2675
2676 // Check indentation level.
2677 if parser.flow_level == 0 && parser.mark.column < indent {
2678 break
2679 }
2680 }
2681
2682 // Create a token.
2683 *token = yaml_token_t{
2684 typ: yaml_SCALAR_TOKEN,
2685 start_mark: start_mark,
2686 end_mark: end_mark,
2687 value: s,
2688 style: yaml_PLAIN_SCALAR_STYLE,
2689 }
2690
2691 // Note that we change the 'simple_key_allowed' flag.
2692 if leading_blanks {
2693 parser.simple_key_allowed = true
2694 }
2695 return true
2696}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/writerc.go b/vendor/github.com/zclconf/go-cty-yaml/writerc.go
new file mode 100644
index 0000000..a2dde60
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/writerc.go
@@ -0,0 +1,26 @@
1package yaml
2
3// Set the writer error and return false.
4func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
5 emitter.error = yaml_WRITER_ERROR
6 emitter.problem = problem
7 return false
8}
9
10// Flush the output buffer.
11func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
12 if emitter.write_handler == nil {
13 panic("write handler not set")
14 }
15
16 // Check if the buffer is empty.
17 if emitter.buffer_pos == 0 {
18 return true
19 }
20
21 if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
22 return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
23 }
24 emitter.buffer_pos = 0
25 return true
26}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/yaml.go b/vendor/github.com/zclconf/go-cty-yaml/yaml.go
new file mode 100644
index 0000000..2c314cc
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/yaml.go
@@ -0,0 +1,215 @@
1// Package yaml can marshal and unmarshal cty values in YAML format.
2package yaml
3
4import (
5 "errors"
6 "fmt"
7 "reflect"
8 "strings"
9 "sync"
10
11 "github.com/zclconf/go-cty/cty"
12)
13
14// Unmarshal reads the document found within the given source buffer
15// and attempts to convert it into a value conforming to the given type
16// constraint.
17//
18// This is an alias for Unmarshal on the predefined Converter in "Standard".
19//
20// An error is returned if the given source contains any YAML document
21// delimiters.
22func Unmarshal(src []byte, ty cty.Type) (cty.Value, error) {
23 return Standard.Unmarshal(src, ty)
24}
25
26// Marshal serializes the given value into a YAML document, using a fixed
27// mapping from cty types to YAML constructs.
28//
29// This is an alias for Marshal on the predefined Converter in "Standard".
30//
31// Note that unlike the function of the same name in the cty JSON package,
32// this does not take a type constraint and therefore the YAML serialization
33// cannot preserve late-bound type information in the serialization to be
34// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type
35// constraint given to Unmarshal will be decoded as if the corresponding portion
36// of the input were processed with ImpliedType to find a target type.
37func Marshal(v cty.Value) ([]byte, error) {
38 return Standard.Marshal(v)
39}
40
41// ImpliedType analyzes the given source code and returns a suitable type that
42// it could be decoded into.
43//
44// For a converter that is using standard YAML rather than cty-specific custom
45// tags, only a subset of cty types can be produced: strings, numbers, bools,
46// tuple types, and object types.
47//
48// This is an alias for ImpliedType on the predefined Converter in "Standard".
49func ImpliedType(src []byte) (cty.Type, error) {
50 return Standard.ImpliedType(src)
51}
52
53func handleErr(err *error) {
54 if v := recover(); v != nil {
55 if e, ok := v.(yamlError); ok {
56 *err = e.err
57 } else {
58 panic(v)
59 }
60 }
61}
62
63type yamlError struct {
64 err error
65}
66
67func fail(err error) {
68 panic(yamlError{err})
69}
70
71func failf(format string, args ...interface{}) {
72 panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
73}
74
75// --------------------------------------------------------------------------
76// Maintain a mapping of keys to structure field indexes
77
78// The code in this section was copied from mgo/bson.
79
80// structInfo holds details for the serialization of fields of
81// a given struct.
82type structInfo struct {
83 FieldsMap map[string]fieldInfo
84 FieldsList []fieldInfo
85
86 // InlineMap is the number of the field in the struct that
87 // contains an ,inline map, or -1 if there's none.
88 InlineMap int
89}
90
91type fieldInfo struct {
92 Key string
93 Num int
94 OmitEmpty bool
95 Flow bool
96 // Id holds the unique field identifier, so we can cheaply
97 // check for field duplicates without maintaining an extra map.
98 Id int
99
100 // Inline holds the field index if the field is part of an inlined struct.
101 Inline []int
102}
103
104var structMap = make(map[reflect.Type]*structInfo)
105var fieldMapMutex sync.RWMutex
106
107func getStructInfo(st reflect.Type) (*structInfo, error) {
108 fieldMapMutex.RLock()
109 sinfo, found := structMap[st]
110 fieldMapMutex.RUnlock()
111 if found {
112 return sinfo, nil
113 }
114
115 n := st.NumField()
116 fieldsMap := make(map[string]fieldInfo)
117 fieldsList := make([]fieldInfo, 0, n)
118 inlineMap := -1
119 for i := 0; i != n; i++ {
120 field := st.Field(i)
121 if field.PkgPath != "" && !field.Anonymous {
122 continue // Private field
123 }
124
125 info := fieldInfo{Num: i}
126
127 tag := field.Tag.Get("yaml")
128 if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
129 tag = string(field.Tag)
130 }
131 if tag == "-" {
132 continue
133 }
134
135 inline := false
136 fields := strings.Split(tag, ",")
137 if len(fields) > 1 {
138 for _, flag := range fields[1:] {
139 switch flag {
140 case "omitempty":
141 info.OmitEmpty = true
142 case "flow":
143 info.Flow = true
144 case "inline":
145 inline = true
146 default:
147 return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
148 }
149 }
150 tag = fields[0]
151 }
152
153 if inline {
154 switch field.Type.Kind() {
155 case reflect.Map:
156 if inlineMap >= 0 {
157 return nil, errors.New("Multiple ,inline maps in struct " + st.String())
158 }
159 if field.Type.Key() != reflect.TypeOf("") {
160 return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
161 }
162 inlineMap = info.Num
163 case reflect.Struct:
164 sinfo, err := getStructInfo(field.Type)
165 if err != nil {
166 return nil, err
167 }
168 for _, finfo := range sinfo.FieldsList {
169 if _, found := fieldsMap[finfo.Key]; found {
170 msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
171 return nil, errors.New(msg)
172 }
173 if finfo.Inline == nil {
174 finfo.Inline = []int{i, finfo.Num}
175 } else {
176 finfo.Inline = append([]int{i}, finfo.Inline...)
177 }
178 finfo.Id = len(fieldsList)
179 fieldsMap[finfo.Key] = finfo
180 fieldsList = append(fieldsList, finfo)
181 }
182 default:
183 //return nil, errors.New("Option ,inline needs a struct value or map field")
184 return nil, errors.New("Option ,inline needs a struct value field")
185 }
186 continue
187 }
188
189 if tag != "" {
190 info.Key = tag
191 } else {
192 info.Key = strings.ToLower(field.Name)
193 }
194
195 if _, found = fieldsMap[info.Key]; found {
196 msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
197 return nil, errors.New(msg)
198 }
199
200 info.Id = len(fieldsList)
201 fieldsList = append(fieldsList, info)
202 fieldsMap[info.Key] = info
203 }
204
205 sinfo = &structInfo{
206 FieldsMap: fieldsMap,
207 FieldsList: fieldsList,
208 InlineMap: inlineMap,
209 }
210
211 fieldMapMutex.Lock()
212 structMap[st] = sinfo
213 fieldMapMutex.Unlock()
214 return sinfo, nil
215}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlh.go
new file mode 100644
index 0000000..e25cee5
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/yamlh.go
@@ -0,0 +1,738 @@
1package yaml
2
3import (
4 "fmt"
5 "io"
6)
7
8// The version directive data.
9type yaml_version_directive_t struct {
10 major int8 // The major version number.
11 minor int8 // The minor version number.
12}
13
14// The tag directive data.
15type yaml_tag_directive_t struct {
16 handle []byte // The tag handle.
17 prefix []byte // The tag prefix.
18}
19
20type yaml_encoding_t int
21
22// The stream encoding.
23const (
24 // Let the parser choose the encoding.
25 yaml_ANY_ENCODING yaml_encoding_t = iota
26
27 yaml_UTF8_ENCODING // The default UTF-8 encoding.
28 yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
29 yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
30)
31
32type yaml_break_t int
33
34// Line break types.
35const (
36 // Let the parser choose the break type.
37 yaml_ANY_BREAK yaml_break_t = iota
38
39 yaml_CR_BREAK // Use CR for line breaks (Mac style).
40 yaml_LN_BREAK // Use LN for line breaks (Unix style).
41 yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
42)
43
44type yaml_error_type_t int
45
46// Many bad things could happen with the parser and emitter.
47const (
48 // No error is produced.
49 yaml_NO_ERROR yaml_error_type_t = iota
50
51 yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
52 yaml_READER_ERROR // Cannot read or decode the input stream.
53 yaml_SCANNER_ERROR // Cannot scan the input stream.
54 yaml_PARSER_ERROR // Cannot parse the input stream.
55 yaml_COMPOSER_ERROR // Cannot compose a YAML document.
56 yaml_WRITER_ERROR // Cannot write to the output stream.
57 yaml_EMITTER_ERROR // Cannot emit a YAML stream.
58)
59
60// The pointer position.
61type yaml_mark_t struct {
62 index int // The position index.
63 line int // The position line.
64 column int // The position column.
65}
66
67// Node Styles
68
69type yaml_style_t int8
70
71type yaml_scalar_style_t yaml_style_t
72
73// Scalar styles.
74const (
75 // Let the emitter choose the style.
76 yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
77
78 yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
79 yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
80 yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
81 yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
82 yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
83)
84
85type yaml_sequence_style_t yaml_style_t
86
87// Sequence styles.
88const (
89 // Let the emitter choose the style.
90 yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
91
92 yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
93 yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
94)
95
96type yaml_mapping_style_t yaml_style_t
97
98// Mapping styles.
99const (
100 // Let the emitter choose the style.
101 yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
102
103 yaml_BLOCK_MAPPING_STYLE // The block mapping style.
104 yaml_FLOW_MAPPING_STYLE // The flow mapping style.
105)
106
107// Tokens
108
109type yaml_token_type_t int
110
111// Token types.
112const (
113 // An empty token.
114 yaml_NO_TOKEN yaml_token_type_t = iota
115
116 yaml_STREAM_START_TOKEN // A STREAM-START token.
117 yaml_STREAM_END_TOKEN // A STREAM-END token.
118
119 yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
120 yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
121 yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
122 yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
123
124 yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
125 yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
126 yaml_BLOCK_END_TOKEN // A BLOCK-END token.
127
128 yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
129 yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
130 yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
131 yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
132
133 yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
134 yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
135 yaml_KEY_TOKEN // A KEY token.
136 yaml_VALUE_TOKEN // A VALUE token.
137
138 yaml_ALIAS_TOKEN // An ALIAS token.
139 yaml_ANCHOR_TOKEN // An ANCHOR token.
140 yaml_TAG_TOKEN // A TAG token.
141 yaml_SCALAR_TOKEN // A SCALAR token.
142)
143
144func (tt yaml_token_type_t) String() string {
145 switch tt {
146 case yaml_NO_TOKEN:
147 return "yaml_NO_TOKEN"
148 case yaml_STREAM_START_TOKEN:
149 return "yaml_STREAM_START_TOKEN"
150 case yaml_STREAM_END_TOKEN:
151 return "yaml_STREAM_END_TOKEN"
152 case yaml_VERSION_DIRECTIVE_TOKEN:
153 return "yaml_VERSION_DIRECTIVE_TOKEN"
154 case yaml_TAG_DIRECTIVE_TOKEN:
155 return "yaml_TAG_DIRECTIVE_TOKEN"
156 case yaml_DOCUMENT_START_TOKEN:
157 return "yaml_DOCUMENT_START_TOKEN"
158 case yaml_DOCUMENT_END_TOKEN:
159 return "yaml_DOCUMENT_END_TOKEN"
160 case yaml_BLOCK_SEQUENCE_START_TOKEN:
161 return "yaml_BLOCK_SEQUENCE_START_TOKEN"
162 case yaml_BLOCK_MAPPING_START_TOKEN:
163 return "yaml_BLOCK_MAPPING_START_TOKEN"
164 case yaml_BLOCK_END_TOKEN:
165 return "yaml_BLOCK_END_TOKEN"
166 case yaml_FLOW_SEQUENCE_START_TOKEN:
167 return "yaml_FLOW_SEQUENCE_START_TOKEN"
168 case yaml_FLOW_SEQUENCE_END_TOKEN:
169 return "yaml_FLOW_SEQUENCE_END_TOKEN"
170 case yaml_FLOW_MAPPING_START_TOKEN:
171 return "yaml_FLOW_MAPPING_START_TOKEN"
172 case yaml_FLOW_MAPPING_END_TOKEN:
173 return "yaml_FLOW_MAPPING_END_TOKEN"
174 case yaml_BLOCK_ENTRY_TOKEN:
175 return "yaml_BLOCK_ENTRY_TOKEN"
176 case yaml_FLOW_ENTRY_TOKEN:
177 return "yaml_FLOW_ENTRY_TOKEN"
178 case yaml_KEY_TOKEN:
179 return "yaml_KEY_TOKEN"
180 case yaml_VALUE_TOKEN:
181 return "yaml_VALUE_TOKEN"
182 case yaml_ALIAS_TOKEN:
183 return "yaml_ALIAS_TOKEN"
184 case yaml_ANCHOR_TOKEN:
185 return "yaml_ANCHOR_TOKEN"
186 case yaml_TAG_TOKEN:
187 return "yaml_TAG_TOKEN"
188 case yaml_SCALAR_TOKEN:
189 return "yaml_SCALAR_TOKEN"
190 }
191 return "<unknown token>"
192}
193
194// The token structure.
195type yaml_token_t struct {
196 // The token type.
197 typ yaml_token_type_t
198
199 // The start/end of the token.
200 start_mark, end_mark yaml_mark_t
201
202 // The stream encoding (for yaml_STREAM_START_TOKEN).
203 encoding yaml_encoding_t
204
205 // The alias/anchor/scalar value or tag/tag directive handle
206 // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
207 value []byte
208
209 // The tag suffix (for yaml_TAG_TOKEN).
210 suffix []byte
211
212 // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
213 prefix []byte
214
215 // The scalar style (for yaml_SCALAR_TOKEN).
216 style yaml_scalar_style_t
217
218 // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
219 major, minor int8
220}
221
222// Events
223
224type yaml_event_type_t int8
225
226// Event types.
227const (
228 // An empty event.
229 yaml_NO_EVENT yaml_event_type_t = iota
230
231 yaml_STREAM_START_EVENT // A STREAM-START event.
232 yaml_STREAM_END_EVENT // A STREAM-END event.
233 yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
234 yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
235 yaml_ALIAS_EVENT // An ALIAS event.
236 yaml_SCALAR_EVENT // A SCALAR event.
237 yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
238 yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
239 yaml_MAPPING_START_EVENT // A MAPPING-START event.
240 yaml_MAPPING_END_EVENT // A MAPPING-END event.
241)
242
243var eventStrings = []string{
244 yaml_NO_EVENT: "none",
245 yaml_STREAM_START_EVENT: "stream start",
246 yaml_STREAM_END_EVENT: "stream end",
247 yaml_DOCUMENT_START_EVENT: "document start",
248 yaml_DOCUMENT_END_EVENT: "document end",
249 yaml_ALIAS_EVENT: "alias",
250 yaml_SCALAR_EVENT: "scalar",
251 yaml_SEQUENCE_START_EVENT: "sequence start",
252 yaml_SEQUENCE_END_EVENT: "sequence end",
253 yaml_MAPPING_START_EVENT: "mapping start",
254 yaml_MAPPING_END_EVENT: "mapping end",
255}
256
257func (e yaml_event_type_t) String() string {
258 if e < 0 || int(e) >= len(eventStrings) {
259 return fmt.Sprintf("unknown event %d", e)
260 }
261 return eventStrings[e]
262}
263
264// The event structure.
265type yaml_event_t struct {
266
267 // The event type.
268 typ yaml_event_type_t
269
270 // The start and end of the event.
271 start_mark, end_mark yaml_mark_t
272
273 // The document encoding (for yaml_STREAM_START_EVENT).
274 encoding yaml_encoding_t
275
276 // The version directive (for yaml_DOCUMENT_START_EVENT).
277 version_directive *yaml_version_directive_t
278
279 // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
280 tag_directives []yaml_tag_directive_t
281
282 // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
283 anchor []byte
284
285 // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
286 tag []byte
287
288 // The scalar value (for yaml_SCALAR_EVENT).
289 value []byte
290
291 // Is the document start/end indicator implicit, or the tag optional?
292 // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
293 implicit bool
294
295 // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
296 quoted_implicit bool
297
298 // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
299 style yaml_style_t
300}
301
302func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
303func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
304func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
305
306// Nodes
307
308const (
309 yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
310 yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
311 yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
312 yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
313 yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
314 yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
315
316 yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
317 yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
318
319 // Not in original libyaml.
320 yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
321 yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
322
323 yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
324 yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
325 yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
326)
327
328type yaml_node_type_t int
329
330// Node types.
331const (
332 // An empty node.
333 yaml_NO_NODE yaml_node_type_t = iota
334
335 yaml_SCALAR_NODE // A scalar node.
336 yaml_SEQUENCE_NODE // A sequence node.
337 yaml_MAPPING_NODE // A mapping node.
338)
339
340// An element of a sequence node.
341type yaml_node_item_t int
342
343// An element of a mapping node.
344type yaml_node_pair_t struct {
345 key int // The key of the element.
346 value int // The value of the element.
347}
348
349// The node structure.
350type yaml_node_t struct {
351 typ yaml_node_type_t // The node type.
352 tag []byte // The node tag.
353
354 // The node data.
355
356 // The scalar parameters (for yaml_SCALAR_NODE).
357 scalar struct {
358 value []byte // The scalar value.
359 length int // The length of the scalar value.
360 style yaml_scalar_style_t // The scalar style.
361 }
362
363 // The sequence parameters (for YAML_SEQUENCE_NODE).
364 sequence struct {
365 items_data []yaml_node_item_t // The stack of sequence items.
366 style yaml_sequence_style_t // The sequence style.
367 }
368
369 // The mapping parameters (for yaml_MAPPING_NODE).
370 mapping struct {
371 pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
372 pairs_start *yaml_node_pair_t // The beginning of the stack.
373 pairs_end *yaml_node_pair_t // The end of the stack.
374 pairs_top *yaml_node_pair_t // The top of the stack.
375 style yaml_mapping_style_t // The mapping style.
376 }
377
378 start_mark yaml_mark_t // The beginning of the node.
379 end_mark yaml_mark_t // The end of the node.
380
381}
382
383// The document structure.
384type yaml_document_t struct {
385
386 // The document nodes.
387 nodes []yaml_node_t
388
389 // The version directive.
390 version_directive *yaml_version_directive_t
391
392 // The list of tag directives.
393 tag_directives_data []yaml_tag_directive_t
394 tag_directives_start int // The beginning of the tag directives list.
395 tag_directives_end int // The end of the tag directives list.
396
397 start_implicit int // Is the document start indicator implicit?
398 end_implicit int // Is the document end indicator implicit?
399
400 // The start/end of the document.
401 start_mark, end_mark yaml_mark_t
402}
403
404// The prototype of a read handler.
405//
406// The read handler is called when the parser needs to read more bytes from the
407// source. The handler should write not more than size bytes to the buffer.
408// The number of written bytes should be set to the size_read variable.
409//
410// [in,out] data A pointer to an application data specified by
411// yaml_parser_set_input().
412// [out] buffer The buffer to write the data from the source.
413// [in] size The size of the buffer.
414// [out] size_read The actual number of bytes read from the source.
415//
416// On success, the handler should return 1. If the handler failed,
417// the returned value should be 0. On EOF, the handler should set the
418// size_read to 0 and return 1.
419type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
420
421// This structure holds information about a potential simple key.
422type yaml_simple_key_t struct {
423 possible bool // Is a simple key possible?
424 required bool // Is a simple key required?
425 token_number int // The number of the token.
426 mark yaml_mark_t // The position mark.
427}
428
429// The states of the parser.
430type yaml_parser_state_t int
431
432const (
433 yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
434
435 yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
436 yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
437 yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
438 yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
439 yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
440 yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
441 yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
442 yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
443 yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
444 yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
445 yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
446 yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
447 yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
448 yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
449 yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
450 yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
451 yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
452 yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
453 yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
454 yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
455 yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
456 yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
457 yaml_PARSE_END_STATE // Expect nothing.
458)
459
460func (ps yaml_parser_state_t) String() string {
461 switch ps {
462 case yaml_PARSE_STREAM_START_STATE:
463 return "yaml_PARSE_STREAM_START_STATE"
464 case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
465 return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
466 case yaml_PARSE_DOCUMENT_START_STATE:
467 return "yaml_PARSE_DOCUMENT_START_STATE"
468 case yaml_PARSE_DOCUMENT_CONTENT_STATE:
469 return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
470 case yaml_PARSE_DOCUMENT_END_STATE:
471 return "yaml_PARSE_DOCUMENT_END_STATE"
472 case yaml_PARSE_BLOCK_NODE_STATE:
473 return "yaml_PARSE_BLOCK_NODE_STATE"
474 case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
475 return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
476 case yaml_PARSE_FLOW_NODE_STATE:
477 return "yaml_PARSE_FLOW_NODE_STATE"
478 case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
479 return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
480 case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
481 return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
482 case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
483 return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
484 case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
485 return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
486 case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
487 return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
488 case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
489 return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
490 case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
491 return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
492 case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
493 return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
494 case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
495 return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
496 case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
497 return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
498 case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
499 return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
500 case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
501 return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
502 case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
503 return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
504 case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
505 return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
506 case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
507 return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
508 case yaml_PARSE_END_STATE:
509 return "yaml_PARSE_END_STATE"
510 }
511 return "<unknown parser state>"
512}
513
514// This structure holds aliases data.
515type yaml_alias_data_t struct {
516 anchor []byte // The anchor.
517 index int // The node id.
518 mark yaml_mark_t // The anchor mark.
519}
520
521// The parser structure.
522//
523// All members are internal. Manage the structure using the
524// yaml_parser_ family of functions.
525type yaml_parser_t struct {
526
527 // Error handling
528
529 error yaml_error_type_t // Error type.
530
531 problem string // Error description.
532
533 // The byte about which the problem occurred.
534 problem_offset int
535 problem_value int
536 problem_mark yaml_mark_t
537
538 // The error context.
539 context string
540 context_mark yaml_mark_t
541
542 // Reader stuff
543
544 read_handler yaml_read_handler_t // Read handler.
545
546 input_reader io.Reader // File input data.
547 input []byte // String input data.
548 input_pos int
549
550 eof bool // EOF flag
551
552 buffer []byte // The working buffer.
553 buffer_pos int // The current position of the buffer.
554
555 unread int // The number of unread characters in the buffer.
556
557 raw_buffer []byte // The raw buffer.
558 raw_buffer_pos int // The current position of the buffer.
559
560 encoding yaml_encoding_t // The input encoding.
561
562 offset int // The offset of the current position (in bytes).
563 mark yaml_mark_t // The mark of the current position.
564
565 // Scanner stuff
566
567 stream_start_produced bool // Have we started to scan the input stream?
568 stream_end_produced bool // Have we reached the end of the input stream?
569
570 flow_level int // The number of unclosed '[' and '{' indicators.
571
572 tokens []yaml_token_t // The tokens queue.
573 tokens_head int // The head of the tokens queue.
574 tokens_parsed int // The number of tokens fetched from the queue.
575 token_available bool // Does the tokens queue contain a token ready for dequeueing.
576
577 indent int // The current indentation level.
578 indents []int // The indentation levels stack.
579
580 simple_key_allowed bool // May a simple key occur at the current position?
581 simple_keys []yaml_simple_key_t // The stack of simple keys.
582
583 // Parser stuff
584
585 state yaml_parser_state_t // The current parser state.
586 states []yaml_parser_state_t // The parser states stack.
587 marks []yaml_mark_t // The stack of marks.
588 tag_directives []yaml_tag_directive_t // The list of TAG directives.
589
590 // Dumper stuff
591
592 aliases []yaml_alias_data_t // The alias data.
593
594 document *yaml_document_t // The currently parsed document.
595}
596
597// Emitter Definitions
598
599// The prototype of a write handler.
600//
601// The write handler is called when the emitter needs to flush the accumulated
602// characters to the output. The handler should write @a size bytes of the
603// @a buffer to the output.
604//
605// @param[in,out] data A pointer to an application data specified by
606// yaml_emitter_set_output().
607// @param[in] buffer The buffer with bytes to be written.
608// @param[in] size The size of the buffer.
609//
610// @returns On success, the handler should return @c 1. If the handler failed,
611// the returned value should be @c 0.
612//
613type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
614
615type yaml_emitter_state_t int
616
617// The emitter states.
618const (
619 // Expect STREAM-START.
620 yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
621
622 yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
623 yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
624 yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
625 yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
626 yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
627 yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
628 yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
629 yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
630 yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
631 yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
632 yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
633 yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
634 yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
635 yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
636 yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
637 yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
638 yaml_EMIT_END_STATE // Expect nothing.
639)
640
641// The emitter structure.
642//
643// All members are internal. Manage the structure using the @c yaml_emitter_
644// family of functions.
645type yaml_emitter_t struct {
646
647 // Error handling
648
649 error yaml_error_type_t // Error type.
650 problem string // Error description.
651
652 // Writer stuff
653
654 write_handler yaml_write_handler_t // Write handler.
655
656 output_buffer *[]byte // String output data.
657 output_writer io.Writer // File output data.
658
659 buffer []byte // The working buffer.
660 buffer_pos int // The current position of the buffer.
661
662 raw_buffer []byte // The raw buffer.
663 raw_buffer_pos int // The current position of the buffer.
664
665 encoding yaml_encoding_t // The stream encoding.
666
667 // Emitter stuff
668
669 canonical bool // If the output is in the canonical style?
670 best_indent int // The number of indentation spaces.
671 best_width int // The preferred width of the output lines.
672 unicode bool // Allow unescaped non-ASCII characters?
673 line_break yaml_break_t // The preferred line break.
674
675 state yaml_emitter_state_t // The current emitter state.
676 states []yaml_emitter_state_t // The stack of states.
677
678 events []yaml_event_t // The event queue.
679 events_head int // The head of the event queue.
680
681 indents []int // The stack of indentation levels.
682
683 tag_directives []yaml_tag_directive_t // The list of tag directives.
684
685 indent int // The current indentation level.
686
687 flow_level int // The current flow level.
688
689 root_context bool // Is it the document root context?
690 sequence_context bool // Is it a sequence context?
691 mapping_context bool // Is it a mapping context?
692 simple_key_context bool // Is it a simple mapping key context?
693
694 line int // The current line.
695 column int // The current column.
696 whitespace bool // If the last character was a whitespace?
697 indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
698 open_ended bool // If an explicit document end is required?
699
700 // Anchor analysis.
701 anchor_data struct {
702 anchor []byte // The anchor value.
703 alias bool // Is it an alias?
704 }
705
706 // Tag analysis.
707 tag_data struct {
708 handle []byte // The tag handle.
709 suffix []byte // The tag suffix.
710 }
711
712 // Scalar analysis.
713 scalar_data struct {
714 value []byte // The scalar value.
715 multiline bool // Does the scalar contain line breaks?
716 flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
717 block_plain_allowed bool // Can the scalar be expressed in the block plain style?
718 single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
719 block_allowed bool // Can the scalar be expressed in the literal or folded styles?
720 style yaml_scalar_style_t // The output style.
721 }
722
723 // Dumper stuff
724
725 opened bool // If the stream was already opened?
726 closed bool // If the stream was already closed?
727
728 // The information associated with the document nodes.
729 anchors *struct {
730 references int // The number of references.
731 anchor int // The anchor id.
732 serialized bool // If the node has been emitted?
733 }
734
735 last_anchor_id int // The last assigned anchor id.
736
737 document *yaml_document_t // The currently emitted document.
738}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go
new file mode 100644
index 0000000..8110ce3
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go
@@ -0,0 +1,173 @@
1package yaml
2
3const (
4 // The size of the input raw buffer.
5 input_raw_buffer_size = 512
6
7 // The size of the input buffer.
8 // It should be possible to decode the whole raw buffer.
9 input_buffer_size = input_raw_buffer_size * 3
10
11 // The size of the output buffer.
12 output_buffer_size = 128
13
14 // The size of the output raw buffer.
15 // It should be possible to encode the whole output buffer.
16 output_raw_buffer_size = (output_buffer_size*2 + 2)
17
18 // The size of other stacks and queues.
19 initial_stack_size = 16
20 initial_queue_size = 16
21 initial_string_size = 16
22)
23
24// Check if the character at the specified position is an alphabetical
25// character, a digit, '_', or '-'.
26func is_alpha(b []byte, i int) bool {
27 return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
28}
29
30// Check if the character at the specified position is a digit.
31func is_digit(b []byte, i int) bool {
32 return b[i] >= '0' && b[i] <= '9'
33}
34
35// Get the value of a digit.
36func as_digit(b []byte, i int) int {
37 return int(b[i]) - '0'
38}
39
40// Check if the character at the specified position is a hex-digit.
41func is_hex(b []byte, i int) bool {
42 return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
43}
44
45// Get the value of a hex-digit.
46func as_hex(b []byte, i int) int {
47 bi := b[i]
48 if bi >= 'A' && bi <= 'F' {
49 return int(bi) - 'A' + 10
50 }
51 if bi >= 'a' && bi <= 'f' {
52 return int(bi) - 'a' + 10
53 }
54 return int(bi) - '0'
55}
56
57// Check if the character is ASCII.
58func is_ascii(b []byte, i int) bool {
59 return b[i] <= 0x7F
60}
61
62// Check if the character at the start of the buffer can be printed unescaped.
63func is_printable(b []byte, i int) bool {
64 return ((b[i] == 0x0A) || // . == #x0A
65 (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
66 (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
67 (b[i] > 0xC2 && b[i] < 0xED) ||
68 (b[i] == 0xED && b[i+1] < 0xA0) ||
69 (b[i] == 0xEE) ||
70 (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
71 !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
72 !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
73}
74
75// Check if the character at the specified position is NUL.
76func is_z(b []byte, i int) bool {
77 return b[i] == 0x00
78}
79
80// Check if the beginning of the buffer is a BOM.
81func is_bom(b []byte, i int) bool {
82 return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
83}
84
85// Check if the character at the specified position is space.
86func is_space(b []byte, i int) bool {
87 return b[i] == ' '
88}
89
90// Check if the character at the specified position is tab.
91func is_tab(b []byte, i int) bool {
92 return b[i] == '\t'
93}
94
95// Check if the character at the specified position is blank (space or tab).
96func is_blank(b []byte, i int) bool {
97 //return is_space(b, i) || is_tab(b, i)
98 return b[i] == ' ' || b[i] == '\t'
99}
100
101// Check if the character at the specified position is a line break.
102func is_break(b []byte, i int) bool {
103 return (b[i] == '\r' || // CR (#xD)
104 b[i] == '\n' || // LF (#xA)
105 b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
106 b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
107 b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
108}
109
110func is_crlf(b []byte, i int) bool {
111 return b[i] == '\r' && b[i+1] == '\n'
112}
113
114// Check if the character is a line break or NUL.
115func is_breakz(b []byte, i int) bool {
116 //return is_break(b, i) || is_z(b, i)
117 return ( // is_break:
118 b[i] == '\r' || // CR (#xD)
119 b[i] == '\n' || // LF (#xA)
120 b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
121 b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
122 b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
123 // is_z:
124 b[i] == 0)
125}
126
127// Check if the character is a line break, space, or NUL.
128func is_spacez(b []byte, i int) bool {
129 //return is_space(b, i) || is_breakz(b, i)
130 return ( // is_space:
131 b[i] == ' ' ||
132 // is_breakz:
133 b[i] == '\r' || // CR (#xD)
134 b[i] == '\n' || // LF (#xA)
135 b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
136 b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
137 b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
138 b[i] == 0)
139}
140
141// Check if the character is a line break, space, tab, or NUL.
142func is_blankz(b []byte, i int) bool {
143 //return is_blank(b, i) || is_breakz(b, i)
144 return ( // is_blank:
145 b[i] == ' ' || b[i] == '\t' ||
146 // is_breakz:
147 b[i] == '\r' || // CR (#xD)
148 b[i] == '\n' || // LF (#xA)
149 b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
150 b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
151 b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
152 b[i] == 0)
153}
154
155// Determine the width of the character.
156func width(b byte) int {
157 // Don't replace these by a switch without first
158 // confirming that it is being inlined.
159 if b&0x80 == 0x00 {
160 return 1
161 }
162 if b&0xE0 == 0xC0 {
163 return 2
164 }
165 if b&0xF0 == 0xE0 {
166 return 3
167 }
168 if b&0xF8 == 0xF0 {
169 return 4
170 }
171 return 0
172
173}
diff --git a/vendor/github.com/zclconf/go-cty/cty/path.go b/vendor/github.com/zclconf/go-cty/cty/path.go
index bf1a7c1..b314449 100644
--- a/vendor/github.com/zclconf/go-cty/cty/path.go
+++ b/vendor/github.com/zclconf/go-cty/cty/path.go
@@ -71,6 +71,48 @@ func (p Path) GetAttr(name string) Path {
71 return ret 71 return ret
72} 72}
73 73
74// Equals compares 2 Paths for exact equality.
75func (p Path) Equals(other Path) bool {
76 if len(p) != len(other) {
77 return false
78 }
79
80 for i := range p {
81 pv := p[i]
82 switch pv := pv.(type) {
83 case GetAttrStep:
84 ov, ok := other[i].(GetAttrStep)
85 if !ok || pv != ov {
86 return false
87 }
88 case IndexStep:
89 ov, ok := other[i].(IndexStep)
90 if !ok {
91 return false
92 }
93
94 if !pv.Key.RawEquals(ov.Key) {
95 return false
96 }
97 default:
98 // Any invalid steps default to evaluating false.
99 return false
100 }
101 }
102
103 return true
104
105}
106
107// HasPrefix determines if the path p contains the provided prefix.
108func (p Path) HasPrefix(prefix Path) bool {
109 if len(prefix) > len(p) {
110 return false
111 }
112
113 return p[:len(prefix)].Equals(prefix)
114}
115
74// GetAttrPath is a convenience method to start a new Path with a GetAttrStep. 116// GetAttrPath is a convenience method to start a new Path with a GetAttrStep.
75func GetAttrPath(name string) Path { 117func GetAttrPath(name string) Path {
76 return Path{}.GetAttr(name) 118 return Path{}.GetAttr(name)
diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go
index 3e25186..faa2fb3 100644
--- a/vendor/golang.org/x/crypto/openpgp/keys.go
+++ b/vendor/golang.org/x/crypto/openpgp/keys.go
@@ -504,7 +504,7 @@ const defaultRSAKeyBits = 2048
504// which may be empty but must not contain any of "()<>\x00". 504// which may be empty but must not contain any of "()<>\x00".
505// If config is nil, sensible defaults will be used. 505// If config is nil, sensible defaults will be used.
506func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { 506func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
507 currentTime := config.Now() 507 creationTime := config.Now()
508 508
509 bits := defaultRSAKeyBits 509 bits := defaultRSAKeyBits
510 if config != nil && config.RSABits != 0 { 510 if config != nil && config.RSABits != 0 {
@@ -525,8 +525,8 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
525 } 525 }
526 526
527 e := &Entity{ 527 e := &Entity{
528 PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey), 528 PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey),
529 PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv), 529 PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv),
530 Identities: make(map[string]*Identity), 530 Identities: make(map[string]*Identity),
531 } 531 }
532 isPrimaryId := true 532 isPrimaryId := true
@@ -534,7 +534,7 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
534 Name: uid.Id, 534 Name: uid.Id,
535 UserId: uid, 535 UserId: uid,
536 SelfSignature: &packet.Signature{ 536 SelfSignature: &packet.Signature{
537 CreationTime: currentTime, 537 CreationTime: creationTime,
538 SigType: packet.SigTypePositiveCert, 538 SigType: packet.SigTypePositiveCert,
539 PubKeyAlgo: packet.PubKeyAlgoRSA, 539 PubKeyAlgo: packet.PubKeyAlgoRSA,
540 Hash: config.Hash(), 540 Hash: config.Hash(),
@@ -563,10 +563,10 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
563 563
564 e.Subkeys = make([]Subkey, 1) 564 e.Subkeys = make([]Subkey, 1)
565 e.Subkeys[0] = Subkey{ 565 e.Subkeys[0] = Subkey{
566 PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey), 566 PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey),
567 PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv), 567 PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv),
568 Sig: &packet.Signature{ 568 Sig: &packet.Signature{
569 CreationTime: currentTime, 569 CreationTime: creationTime,
570 SigType: packet.SigTypeSubkeyBinding, 570 SigType: packet.SigTypeSubkeyBinding,
571 PubKeyAlgo: packet.PubKeyAlgoRSA, 571 PubKeyAlgo: packet.PubKeyAlgoRSA,
572 Hash: config.Hash(), 572 Hash: config.Hash(),
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
index bd31cce..6f8ec09 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
@@ -36,49 +36,49 @@ type PrivateKey struct {
36 iv []byte 36 iv []byte
37} 37}
38 38
39func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey { 39func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
40 pk := new(PrivateKey) 40 pk := new(PrivateKey)
41 pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey) 41 pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey)
42 pk.PrivateKey = priv 42 pk.PrivateKey = priv
43 return pk 43 return pk
44} 44}
45 45
46func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey { 46func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
47 pk := new(PrivateKey) 47 pk := new(PrivateKey)
48 pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey) 48 pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey)
49 pk.PrivateKey = priv 49 pk.PrivateKey = priv
50 return pk 50 return pk
51} 51}
52 52
53func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { 53func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
54 pk := new(PrivateKey) 54 pk := new(PrivateKey)
55 pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey) 55 pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey)
56 pk.PrivateKey = priv 56 pk.PrivateKey = priv
57 return pk 57 return pk
58} 58}
59 59
60func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { 60func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
61 pk := new(PrivateKey) 61 pk := new(PrivateKey)
62 pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey) 62 pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey)
63 pk.PrivateKey = priv 63 pk.PrivateKey = priv
64 return pk 64 return pk
65} 65}
66 66
67// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that 67// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
68// implements RSA or ECDSA. 68// implements RSA or ECDSA.
69func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey { 69func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey {
70 pk := new(PrivateKey) 70 pk := new(PrivateKey)
71 // In general, the public Keys should be used as pointers. We still 71 // In general, the public Keys should be used as pointers. We still
72 // type-switch on the values, for backwards-compatibility. 72 // type-switch on the values, for backwards-compatibility.
73 switch pubkey := signer.Public().(type) { 73 switch pubkey := signer.Public().(type) {
74 case *rsa.PublicKey: 74 case *rsa.PublicKey:
75 pk.PublicKey = *NewRSAPublicKey(currentTime, pubkey) 75 pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey)
76 case rsa.PublicKey: 76 case rsa.PublicKey:
77 pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey) 77 pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey)
78 case *ecdsa.PublicKey: 78 case *ecdsa.PublicKey:
79 pk.PublicKey = *NewECDSAPublicKey(currentTime, pubkey) 79 pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey)
80 case ecdsa.PublicKey: 80 case ecdsa.PublicKey:
81 pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey) 81 pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey)
82 default: 82 default:
83 panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") 83 panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey")
84 } 84 }
diff --git a/vendor/modules.txt b/vendor/modules.txt
index b1009a7..7ad0c02 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -6,7 +6,7 @@ cloud.google.com/go/internal/optional
6cloud.google.com/go/internal/trace 6cloud.google.com/go/internal/trace
7cloud.google.com/go/internal/version 7cloud.google.com/go/internal/version
8cloud.google.com/go/compute/metadata 8cloud.google.com/go/compute/metadata
9# github.com/DreamItGetIT/statuscake v0.0.0-20190218105717-471b24d8edfb 9# github.com/DreamItGetIT/statuscake v0.0.0-20190809134845-9d26ad75405b
10github.com/DreamItGetIT/statuscake 10github.com/DreamItGetIT/statuscake
11# github.com/agext/levenshtein v1.2.2 11# github.com/agext/levenshtein v1.2.2
12github.com/agext/levenshtein 12github.com/agext/levenshtein
@@ -16,7 +16,7 @@ github.com/apparentlymart/go-cidr/cidr
16github.com/apparentlymart/go-textseg/textseg 16github.com/apparentlymart/go-textseg/textseg
17# github.com/armon/go-radix v1.0.0 17# github.com/armon/go-radix v1.0.0
18github.com/armon/go-radix 18github.com/armon/go-radix
19# github.com/aws/aws-sdk-go v1.19.18 19# github.com/aws/aws-sdk-go v1.21.7
20github.com/aws/aws-sdk-go/aws 20github.com/aws/aws-sdk-go/aws
21github.com/aws/aws-sdk-go/aws/credentials 21github.com/aws/aws-sdk-go/aws/credentials
22github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds 22github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds
@@ -29,10 +29,10 @@ github.com/aws/aws-sdk-go/internal/sdkio
29github.com/aws/aws-sdk-go/internal/ini 29github.com/aws/aws-sdk-go/internal/ini
30github.com/aws/aws-sdk-go/internal/shareddefaults 30github.com/aws/aws-sdk-go/internal/shareddefaults
31github.com/aws/aws-sdk-go/aws/client 31github.com/aws/aws-sdk-go/aws/client
32github.com/aws/aws-sdk-go/aws/request
32github.com/aws/aws-sdk-go/internal/sdkuri 33github.com/aws/aws-sdk-go/internal/sdkuri
33github.com/aws/aws-sdk-go/aws/client/metadata 34github.com/aws/aws-sdk-go/aws/client/metadata
34github.com/aws/aws-sdk-go/aws/corehandlers 35github.com/aws/aws-sdk-go/aws/corehandlers
35github.com/aws/aws-sdk-go/aws/request
36github.com/aws/aws-sdk-go/aws/credentials/processcreds 36github.com/aws/aws-sdk-go/aws/credentials/processcreds
37github.com/aws/aws-sdk-go/aws/credentials/stscreds 37github.com/aws/aws-sdk-go/aws/credentials/stscreds
38github.com/aws/aws-sdk-go/aws/csm 38github.com/aws/aws-sdk-go/aws/csm
@@ -45,11 +45,13 @@ github.com/aws/aws-sdk-go/private/protocol/eventstream
45github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi 45github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi
46github.com/aws/aws-sdk-go/private/protocol/rest 46github.com/aws/aws-sdk-go/private/protocol/rest
47github.com/aws/aws-sdk-go/private/protocol/restxml 47github.com/aws/aws-sdk-go/private/protocol/restxml
48github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
48github.com/aws/aws-sdk-go/internal/sdkrand 49github.com/aws/aws-sdk-go/internal/sdkrand
49github.com/aws/aws-sdk-go/service/sts 50github.com/aws/aws-sdk-go/service/sts
51github.com/aws/aws-sdk-go/service/sts/stsiface
50github.com/aws/aws-sdk-go/aws/credentials/endpointcreds 52github.com/aws/aws-sdk-go/aws/credentials/endpointcreds
51github.com/aws/aws-sdk-go/private/protocol/query 53github.com/aws/aws-sdk-go/private/protocol/query
52github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil 54github.com/aws/aws-sdk-go/private/protocol/json/jsonutil
53github.com/aws/aws-sdk-go/private/protocol/query/queryutil 55github.com/aws/aws-sdk-go/private/protocol/query/queryutil
54# github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d 56# github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d
55github.com/bgentry/go-netrc/netrc 57github.com/bgentry/go-netrc/netrc
@@ -68,25 +70,28 @@ github.com/golang/protobuf/ptypes/any
68github.com/golang/protobuf/ptypes/duration 70github.com/golang/protobuf/ptypes/duration
69github.com/golang/protobuf/ptypes/timestamp 71github.com/golang/protobuf/ptypes/timestamp
70github.com/golang/protobuf/protoc-gen-go/descriptor 72github.com/golang/protobuf/protoc-gen-go/descriptor
71# github.com/google/go-cmp v0.2.0 73# github.com/google/go-cmp v0.3.0
72github.com/google/go-cmp/cmp 74github.com/google/go-cmp/cmp
73github.com/google/go-cmp/cmp/internal/diff 75github.com/google/go-cmp/cmp/internal/diff
76github.com/google/go-cmp/cmp/internal/flags
74github.com/google/go-cmp/cmp/internal/function 77github.com/google/go-cmp/cmp/internal/function
75github.com/google/go-cmp/cmp/internal/value 78github.com/google/go-cmp/cmp/internal/value
79# github.com/google/go-querystring v1.0.0
80github.com/google/go-querystring/query
76# github.com/googleapis/gax-go/v2 v2.0.3 81# github.com/googleapis/gax-go/v2 v2.0.3
77github.com/googleapis/gax-go/v2 82github.com/googleapis/gax-go/v2
78# github.com/hashicorp/errwrap v1.0.0 83# github.com/hashicorp/errwrap v1.0.0
79github.com/hashicorp/errwrap 84github.com/hashicorp/errwrap
80# github.com/hashicorp/go-cleanhttp v0.5.0 85# github.com/hashicorp/go-cleanhttp v0.5.0
81github.com/hashicorp/go-cleanhttp 86github.com/hashicorp/go-cleanhttp
82# github.com/hashicorp/go-getter v1.3.0 87# github.com/hashicorp/go-getter v1.3.1-0.20190627223108-da0323b9545e
83github.com/hashicorp/go-getter 88github.com/hashicorp/go-getter
84github.com/hashicorp/go-getter/helper/url 89github.com/hashicorp/go-getter/helper/url
85# github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f 90# github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f
86github.com/hashicorp/go-hclog 91github.com/hashicorp/go-hclog
87# github.com/hashicorp/go-multierror v1.0.0 92# github.com/hashicorp/go-multierror v1.0.0
88github.com/hashicorp/go-multierror 93github.com/hashicorp/go-multierror
89# github.com/hashicorp/go-plugin v1.0.1-0.20190430211030-5692942914bb 94# github.com/hashicorp/go-plugin v1.0.1-0.20190610192547-a1bc61569a26
90github.com/hashicorp/go-plugin 95github.com/hashicorp/go-plugin
91github.com/hashicorp/go-plugin/internal/plugin 96github.com/hashicorp/go-plugin/internal/plugin
92# github.com/hashicorp/go-safetemp v1.0.0 97# github.com/hashicorp/go-safetemp v1.0.0
@@ -105,7 +110,7 @@ github.com/hashicorp/hcl/hcl/scanner
105github.com/hashicorp/hcl/hcl/strconv 110github.com/hashicorp/hcl/hcl/strconv
106github.com/hashicorp/hcl/json/scanner 111github.com/hashicorp/hcl/json/scanner
107github.com/hashicorp/hcl/json/token 112github.com/hashicorp/hcl/json/token
108# github.com/hashicorp/hcl2 v0.0.0-20190515223218-4b22149b7cef 113# github.com/hashicorp/hcl2 v0.0.0-20190725010614-0c3fe388e450
109github.com/hashicorp/hcl2/hcl 114github.com/hashicorp/hcl2/hcl
110github.com/hashicorp/hcl2/hcl/hclsyntax 115github.com/hashicorp/hcl2/hcl/hclsyntax
111github.com/hashicorp/hcl2/hcldec 116github.com/hashicorp/hcl2/hcldec
@@ -123,7 +128,7 @@ github.com/hashicorp/hil/parser
123github.com/hashicorp/hil/scanner 128github.com/hashicorp/hil/scanner
124# github.com/hashicorp/logutils v1.0.0 129# github.com/hashicorp/logutils v1.0.0
125github.com/hashicorp/logutils 130github.com/hashicorp/logutils
126# github.com/hashicorp/terraform v0.12.0 131# github.com/hashicorp/terraform v0.12.6
127github.com/hashicorp/terraform/plugin 132github.com/hashicorp/terraform/plugin
128github.com/hashicorp/terraform/helper/schema 133github.com/hashicorp/terraform/helper/schema
129github.com/hashicorp/terraform/terraform 134github.com/hashicorp/terraform/terraform
@@ -204,6 +209,8 @@ github.com/posener/complete
204github.com/posener/complete/cmd/install 209github.com/posener/complete/cmd/install
205github.com/posener/complete/cmd 210github.com/posener/complete/cmd
206github.com/posener/complete/match 211github.com/posener/complete/match
212# github.com/satori/go.uuid v1.2.0
213github.com/satori/go.uuid
207# github.com/spf13/afero v1.2.1 214# github.com/spf13/afero v1.2.1
208github.com/spf13/afero 215github.com/spf13/afero
209github.com/spf13/afero/mem 216github.com/spf13/afero/mem
@@ -215,7 +222,7 @@ github.com/ulikunitz/xz/internal/hash
215# github.com/vmihailenco/msgpack v4.0.1+incompatible 222# github.com/vmihailenco/msgpack v4.0.1+incompatible
216github.com/vmihailenco/msgpack 223github.com/vmihailenco/msgpack
217github.com/vmihailenco/msgpack/codes 224github.com/vmihailenco/msgpack/codes
218# github.com/zclconf/go-cty v0.0.0-20190516203816-4fecf87372ec 225# github.com/zclconf/go-cty v1.0.1-0.20190708163926-19588f92a98f
219github.com/zclconf/go-cty/cty 226github.com/zclconf/go-cty/cty
220github.com/zclconf/go-cty/cty/msgpack 227github.com/zclconf/go-cty/cty/msgpack
221github.com/zclconf/go-cty/cty/convert 228github.com/zclconf/go-cty/cty/convert
@@ -224,6 +231,8 @@ github.com/zclconf/go-cty/cty/gocty
224github.com/zclconf/go-cty/cty/set 231github.com/zclconf/go-cty/cty/set
225github.com/zclconf/go-cty/cty/function 232github.com/zclconf/go-cty/cty/function
226github.com/zclconf/go-cty/cty/function/stdlib 233github.com/zclconf/go-cty/cty/function/stdlib
234# github.com/zclconf/go-cty-yaml v1.0.1
235github.com/zclconf/go-cty-yaml
227# go.opencensus.io v0.18.0 236# go.opencensus.io v0.18.0
228go.opencensus.io/trace 237go.opencensus.io/trace
229go.opencensus.io/plugin/ochttp 238go.opencensus.io/plugin/ochttp
@@ -239,7 +248,7 @@ go.opencensus.io/trace/propagation
239go.opencensus.io 248go.opencensus.io
240go.opencensus.io/stats/internal 249go.opencensus.io/stats/internal
241go.opencensus.io/internal/tagencoding 250go.opencensus.io/internal/tagencoding
242# golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 251# golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
243golang.org/x/crypto/openpgp 252golang.org/x/crypto/openpgp
244golang.org/x/crypto/bcrypt 253golang.org/x/crypto/bcrypt
245golang.org/x/crypto/openpgp/armor 254golang.org/x/crypto/openpgp/armor