aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/agext/levenshtein/.gitignore2
-rw-r--r--vendor/github.com/agext/levenshtein/.travis.yml70
-rw-r--r--vendor/github.com/agext/levenshtein/DCO36
-rw-r--r--vendor/github.com/agext/levenshtein/LICENSE201
-rw-r--r--vendor/github.com/agext/levenshtein/MAINTAINERS1
-rw-r--r--vendor/github.com/agext/levenshtein/NOTICE5
-rw-r--r--vendor/github.com/agext/levenshtein/README.md38
-rw-r--r--vendor/github.com/agext/levenshtein/levenshtein.go290
-rw-r--r--vendor/github.com/agext/levenshtein/params.go152
-rw-r--r--vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go102
-rw-r--r--vendor/github.com/apparentlymart/go-textseg/LICENSE95
-rw-r--r--vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go30
-rw-r--r--vendor/github.com/apparentlymart/go-textseg/textseg/generate.go7
-rw-r--r--vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go5276
-rw-r--r--vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl132
-rw-r--r--vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl1583
-rw-r--r--vendor/github.com/apparentlymart/go-textseg/textseg/make_tables.go307
-rw-r--r--vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go212
-rw-r--r--vendor/github.com/apparentlymart/go-textseg/textseg/tables.go5700
-rw-r--r--vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb335
-rw-r--r--vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go19
-rw-r--r--vendor/github.com/armon/go-radix/.gitignore22
-rw-r--r--vendor/github.com/armon/go-radix/.travis.yml3
-rw-r--r--vendor/github.com/armon/go-radix/LICENSE20
-rw-r--r--vendor/github.com/armon/go-radix/README.md38
-rw-r--r--vendor/github.com/armon/go-radix/radix.go496
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/client.go10
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go80
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/logger.go106
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go1
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/config.go26
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go6
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/convert_types.go18
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go28
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go37
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go18
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go6
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go46
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go67
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go51
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go54
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go231
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go56
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go24
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go1059
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go22
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/logger.go10
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go18
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request.go210
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go40
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/validation.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go16
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/doc.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go35
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/session.go64
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go173
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/types.go83
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/version.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go10
-rw-r--r--vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go29
-rw-r--r--vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go23
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go144
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go199
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go114
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go23
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go196
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go24
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go166
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go501
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go103
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go76
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go81
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go11
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go33
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go20
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go72
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go18
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go20
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go1
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/api.go5185
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go249
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go36
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go24
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/doc.go64
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go13
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/service.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/sse.go18
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go3
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/api.go317
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/doc.go64
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/service.go6
-rw-r--r--vendor/github.com/bgentry/speakeasy/.gitignore2
-rw-r--r--vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS201
-rw-r--r--vendor/github.com/bgentry/speakeasy/Readme.md30
-rw-r--r--vendor/github.com/bgentry/speakeasy/speakeasy.go49
-rw-r--r--vendor/github.com/bgentry/speakeasy/speakeasy_unix.go93
-rw-r--r--vendor/github.com/bgentry/speakeasy/speakeasy_windows.go41
-rw-r--r--vendor/github.com/davecgh/go-spew/LICENSE2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypass.go187
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypasssafe.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/common.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/dump.go10
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/format.go4
-rw-r--r--vendor/github.com/go-ini/ini/.travis.yml13
-rw-r--r--vendor/github.com/go-ini/ini/README.md8
-rw-r--r--vendor/github.com/go-ini/ini/README_ZH.md6
-rw-r--r--vendor/github.com/go-ini/ini/ini.go54
-rw-r--r--vendor/github.com/go-ini/ini/key.go104
-rw-r--r--vendor/github.com/go-ini/ini/parser.go14
-rw-r--r--vendor/github.com/go-ini/ini/section.go25
-rw-r--r--vendor/github.com/go-ini/ini/struct.go37
-rw-r--r--vendor/github.com/golang/protobuf/AUTHORS3
-rw-r--r--vendor/github.com/golang/protobuf/CONTRIBUTORS3
-rw-r--r--vendor/github.com/golang/protobuf/LICENSE28
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone.go253
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go428
-rw-r--r--vendor/github.com/golang/protobuf/proto/discard.go350
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go203
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go300
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go543
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go979
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go314
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go357
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go308
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go544
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_marshal.go2767
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_merge.go654
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_unmarshal.go2051
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go843
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go880
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any.go141
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.pb.go191
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.proto149
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/doc.go35
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration.go102
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go159
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.proto117
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp.go134
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go175
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto133
-rw-r--r--vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go1
-rw-r--r--vendor/github.com/hashicorp/go-cleanhttp/go.mod1
-rw-r--r--vendor/github.com/hashicorp/go-cleanhttp/handlers.go43
-rw-r--r--vendor/github.com/hashicorp/go-getter/.travis.yml8
-rw-r--r--vendor/github.com/hashicorp/go-getter/README.md58
-rw-r--r--vendor/github.com/hashicorp/go-getter/appveyor.yml2
-rw-r--r--vendor/github.com/hashicorp/go-getter/client.go31
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress.go29
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_gzip.go2
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tar.go138
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tbz2.go64
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_testing.go36
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tgz.go62
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_txz.go39
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_xz.go49
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_zip.go5
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect.go6
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_file.go2
-rw-r--r--vendor/github.com/hashicorp/go-getter/get.go9
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_git.go36
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_hg.go10
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_http.go49
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_s3.go75
-rw-r--r--vendor/github.com/hashicorp/go-getter/source.go26
-rw-r--r--vendor/github.com/hashicorp/go-hclog/LICENSE21
-rw-r--r--vendor/github.com/hashicorp/go-hclog/README.md123
-rw-r--r--vendor/github.com/hashicorp/go-hclog/global.go34
-rw-r--r--vendor/github.com/hashicorp/go-hclog/int.go385
-rw-r--r--vendor/github.com/hashicorp/go-hclog/log.go138
-rw-r--r--vendor/github.com/hashicorp/go-hclog/stacktrace.go108
-rw-r--r--vendor/github.com/hashicorp/go-hclog/stdlog.go62
-rw-r--r--vendor/github.com/hashicorp/go-plugin/README.md49
-rw-r--r--vendor/github.com/hashicorp/go-plugin/client.go293
-rw-r--r--vendor/github.com/hashicorp/go-plugin/grpc_broker.go455
-rw-r--r--vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go190
-rw-r--r--vendor/github.com/hashicorp/go-plugin/grpc_broker.proto14
-rw-r--r--vendor/github.com/hashicorp/go-plugin/grpc_client.go107
-rw-r--r--vendor/github.com/hashicorp/go-plugin/grpc_server.go132
-rw-r--r--vendor/github.com/hashicorp/go-plugin/log_entry.go73
-rw-r--r--vendor/github.com/hashicorp/go-plugin/plugin.go33
-rw-r--r--vendor/github.com/hashicorp/go-plugin/protocol.go45
-rw-r--r--vendor/github.com/hashicorp/go-plugin/rpc_client.go47
-rw-r--r--vendor/github.com/hashicorp/go-plugin/rpc_server.go20
-rw-r--r--vendor/github.com/hashicorp/go-plugin/server.go135
-rw-r--r--vendor/github.com/hashicorp/go-plugin/testing.go86
-rw-r--r--vendor/github.com/hashicorp/go-safetemp/LICENSE362
-rw-r--r--vendor/github.com/hashicorp/go-safetemp/README.md10
-rw-r--r--vendor/github.com/hashicorp/go-safetemp/safetemp.go40
-rw-r--r--vendor/github.com/hashicorp/go-uuid/.travis.yml12
-rw-r--r--vendor/github.com/hashicorp/go-uuid/README.md4
-rw-r--r--vendor/github.com/hashicorp/go-uuid/go.mod1
-rw-r--r--vendor/github.com/hashicorp/go-uuid/uuid.go16
-rw-r--r--vendor/github.com/hashicorp/go-version/.travis.yml2
-rw-r--r--vendor/github.com/hashicorp/go-version/constraint.go34
-rw-r--r--vendor/github.com/hashicorp/go-version/go.mod1
-rw-r--r--vendor/github.com/hashicorp/go-version/version.go59
-rw-r--r--vendor/github.com/hashicorp/hcl2/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/hcl2/gohcl/decode.go304
-rw-r--r--vendor/github.com/hashicorp/hcl2/gohcl/doc.go49
-rw-r--r--vendor/github.com/hashicorp/hcl2/gohcl/schema.go174
-rw-r--r--vendor/github.com/hashicorp/hcl2/gohcl/types.go16
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go103
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go168
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go24
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/doc.go1
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/eval_context.go25
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/expr_call.go46
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/expr_list.go37
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/expr_map.go44
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go68
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go24
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go7
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go1275
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go258
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go192
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go76
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go99
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go20
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go9
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go21
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go41
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go22
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go1836
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go728
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go159
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go212
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go171
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go301
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl105
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go5443
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl376
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md923
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go379
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go272
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go69
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode2ragel.rb335
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode_derived.rl2135
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go86
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go77
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/ast.go121
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/didyoumean.go33
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/doc.go8
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/navigation.go70
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/parser.go491
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go25
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/public.go94
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go293
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/spec.md405
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/structure.go616
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go29
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/merged.go226
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/ops.go147
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/pos.go262
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go148
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/schema.go21
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/spec.md691
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/static_expr.go40
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/structure.go151
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/traversal.go352
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go121
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go21
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcldec/decode.go36
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcldec/doc.go12
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcldec/gob.go23
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcldec/public.go78
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcldec/schema.go36
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcldec/spec.go998
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcldec/variables.go34
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclparse/parser.go123
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/scanner.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/config/append.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config.go396
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_string.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go97
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/doc.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go92
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/schema.go107
-rw-r--r--vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go134
-rw-r--r--vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go85
-rw-r--r--vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go246
-rw-r--r--vendor/github.com/hashicorp/terraform/config/import_tree.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go429
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_walk.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader_hcl.go126
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader_hcl2.go473
-rw-r--r--vendor/github.com/hashicorp/terraform/config/merge.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/get.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/module.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/storage.go365
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/testing.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/tree.go314
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/versions.go95
-rw-r--r--vendor/github.com/hashicorp/terraform/config/raw_config.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/config/resource_mode_string.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/config/testing.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/dag.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/marshal.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/walk.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go154
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/id.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/logging/logging.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/logging/transport.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/id.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/state.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing.go348
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/wait.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/backend.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go155
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provider.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource.go98
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go39
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go559
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/schema.go252
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/set.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/testing.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/closer.go83
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go128
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go151
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go66
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/value.go87
-rw-r--r--vendor/github.com/hashicorp/terraform/httpclient/client.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/httpclient/useragent.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/client.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/find.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/get.go134
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provider.go39
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/client.go227
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/errors.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go140
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/regsrc/module.go205
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/module.go93
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/module_list.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/module_provider.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/module_versions.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/pagination.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/redirect.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/svchost/auth/cache.go45
-rw-r--r--vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/svchost/auth/static.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/svchost/disco/disco.go259
-rw-r--r--vendor/github.com/hashicorp/terraform/svchost/disco/host.go264
-rw-r--r--vendor/github.com/hashicorp/terraform/svchost/label_iter.go69
-rw-r--r--vendor/github.com/hashicorp/terraform/svchost/svchost.go207
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context.go189
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_import.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/diff.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_apply.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go102
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_diff.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_local.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_output.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provider.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state.go35
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/features.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/interpolate.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go1
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_local.go66
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/path.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/plan.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource.go42
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_address.go37
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/schemas.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_components.go273
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_context.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go815
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go282
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state.go108
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/test_failure9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go62
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go45
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_local.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go45
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_output.go46
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider.go574
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_reference.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_targets.go56
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/user_agent.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version_required.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go181
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/doc.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/error.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/hcl.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go53
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/source_range.go35
-rw-r--r--vendor/github.com/hashicorp/terraform/version/version.go36
-rw-r--r--vendor/github.com/mattn/go-isatty/LICENSE9
-rw-r--r--vendor/github.com/mattn/go-isatty/README.md37
-rw-r--r--vendor/github.com/mattn/go-isatty/doc.go2
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_appengine.go9
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_bsd.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_linux.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_solaris.go16
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_windows.go19
-rw-r--r--vendor/github.com/mitchellh/cli/.travis.yml13
-rw-r--r--vendor/github.com/mitchellh/cli/LICENSE354
-rw-r--r--vendor/github.com/mitchellh/cli/Makefile20
-rw-r--r--vendor/github.com/mitchellh/cli/README.md67
-rw-r--r--vendor/github.com/mitchellh/cli/autocomplete.go43
-rw-r--r--vendor/github.com/mitchellh/cli/cli.go715
-rw-r--r--vendor/github.com/mitchellh/cli/command.go67
-rw-r--r--vendor/github.com/mitchellh/cli/command_mock.go63
-rw-r--r--vendor/github.com/mitchellh/cli/help.go79
-rw-r--r--vendor/github.com/mitchellh/cli/ui.go187
-rw-r--r--vendor/github.com/mitchellh/cli/ui_colored.go69
-rw-r--r--vendor/github.com/mitchellh/cli/ui_concurrent.go54
-rw-r--r--vendor/github.com/mitchellh/cli/ui_mock.go111
-rw-r--r--vendor/github.com/mitchellh/cli/ui_writer.go18
-rw-r--r--vendor/github.com/mitchellh/copystructure/copystructure.go87
-rw-r--r--vendor/github.com/mitchellh/go-testing-interface/.travis.yml12
-rw-r--r--vendor/github.com/mitchellh/go-testing-interface/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/go-testing-interface/README.md52
-rw-r--r--vendor/github.com/mitchellh/go-testing-interface/testing.go84
-rw-r--r--vendor/github.com/mitchellh/go-testing-interface/testing_go19.go80
-rw-r--r--vendor/github.com/mitchellh/go-wordwrap/LICENSE.md21
-rw-r--r--vendor/github.com/mitchellh/go-wordwrap/README.md39
-rw-r--r--vendor/github.com/mitchellh/go-wordwrap/wordwrap.go73
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/location.go2
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/location_string.go8
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/reflectwalk.go108
-rw-r--r--vendor/github.com/oklog/run/.gitignore14
-rw-r--r--vendor/github.com/oklog/run/.travis.yml12
-rw-r--r--vendor/github.com/oklog/run/LICENSE201
-rw-r--r--vendor/github.com/oklog/run/README.md73
-rw-r--r--vendor/github.com/oklog/run/group.go62
-rw-r--r--vendor/github.com/posener/complete/.gitignore2
-rw-r--r--vendor/github.com/posener/complete/.travis.yml17
-rw-r--r--vendor/github.com/posener/complete/LICENSE.txt21
-rw-r--r--vendor/github.com/posener/complete/args.go102
-rw-r--r--vendor/github.com/posener/complete/cmd/cmd.go128
-rw-r--r--vendor/github.com/posener/complete/cmd/install/bash.go32
-rw-r--r--vendor/github.com/posener/complete/cmd/install/install.go92
-rw-r--r--vendor/github.com/posener/complete/cmd/install/utils.go118
-rw-r--r--vendor/github.com/posener/complete/cmd/install/zsh.go39
-rw-r--r--vendor/github.com/posener/complete/command.go111
-rw-r--r--vendor/github.com/posener/complete/complete.go95
-rw-r--r--vendor/github.com/posener/complete/log.go23
-rw-r--r--vendor/github.com/posener/complete/match/file.go19
-rw-r--r--vendor/github.com/posener/complete/match/match.go6
-rw-r--r--vendor/github.com/posener/complete/match/prefix.go9
-rw-r--r--vendor/github.com/posener/complete/metalinter.json21
-rw-r--r--vendor/github.com/posener/complete/predict.go41
-rw-r--r--vendor/github.com/posener/complete/predict_files.go108
-rw-r--r--vendor/github.com/posener/complete/predict_set.go12
-rw-r--r--vendor/github.com/posener/complete/readme.md116
-rw-r--r--vendor/github.com/posener/complete/test.sh12
-rw-r--r--vendor/github.com/posener/complete/utils.go46
-rw-r--r--vendor/github.com/satori/go.uuid/.travis.yml21
-rw-r--r--vendor/github.com/satori/go.uuid/LICENSE20
-rw-r--r--vendor/github.com/satori/go.uuid/README.md65
-rw-r--r--vendor/github.com/satori/go.uuid/uuid.go481
-rw-r--r--vendor/github.com/ulikunitz/xz/.gitignore25
-rw-r--r--vendor/github.com/ulikunitz/xz/LICENSE26
-rw-r--r--vendor/github.com/ulikunitz/xz/README.md71
-rw-r--r--vendor/github.com/ulikunitz/xz/TODO.md315
-rw-r--r--vendor/github.com/ulikunitz/xz/bits.go74
-rw-r--r--vendor/github.com/ulikunitz/xz/crc.go54
-rw-r--r--vendor/github.com/ulikunitz/xz/example.go40
-rw-r--r--vendor/github.com/ulikunitz/xz/format.go728
-rw-r--r--vendor/github.com/ulikunitz/xz/fox.xzbin0 -> 104 bytes
-rw-r--r--vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go181
-rw-r--r--vendor/github.com/ulikunitz/xz/internal/hash/doc.go14
-rw-r--r--vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go66
-rw-r--r--vendor/github.com/ulikunitz/xz/internal/hash/roller.go29
-rw-r--r--vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go457
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/bintree.go523
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/bitops.go45
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/breader.go39
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/buffer.go171
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/bytewriter.go37
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/decoder.go277
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/decoderdict.go135
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/directcodec.go49
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/distcodec.go156
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/encoder.go268
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/encoderdict.go149
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/fox.lzmabin0 -> 67 bytes
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/hashtable.go309
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/header.go167
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/header2.go398
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go129
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/literalcodec.go132
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go52
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/operation.go80
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/prob.go53
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/properties.go69
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/rangecodec.go248
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/reader.go100
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/reader2.go232
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/state.go151
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/treecodecs.go133
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/writer.go209
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/writer2.go305
-rw-r--r--vendor/github.com/ulikunitz/xz/lzmafilter.go117
-rw-r--r--vendor/github.com/ulikunitz/xz/make-docs5
-rw-r--r--vendor/github.com/ulikunitz/xz/reader.go373
-rw-r--r--vendor/github.com/ulikunitz/xz/writer.go386
-rw-r--r--vendor/github.com/zclconf/go-cty/LICENSE21
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/capsule.go89
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/collection.go34
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go165
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/convert/conversion.go120
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go226
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go33
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go50
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/convert/doc.go15
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/convert/public.go83
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go69
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/convert/unify.go66
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/doc.go18
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/element_iterator.go191
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/error.go55
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/argument.go50
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/doc.go6
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/error.go50
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/function.go291
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go73
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go112
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go140
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go93
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go13
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go496
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go358
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl182
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go107
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go72
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go428
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go130
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go195
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go234
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go31
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/gob.go125
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/gocty/doc.go7
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go43
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/gocty/in.go528
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/gocty/out.go705
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go108
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/helper.go99
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/json.go176
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/json/doc.go11
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/json/marshal.go189
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/json/simple.go41
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/json/type.go23
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/json/type_implied.go171
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go459
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/json/value.go65
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/list_type.go68
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/map_type.go68
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/null.go14
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/object_type.go135
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/path.go186
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/primitive_type.go122
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/set/gob.go76
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/set/iterator.go36
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/set/ops.go199
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/set/rules.go25
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/set/set.go62
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/set_helper.go126
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/set_internals.go146
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/set_type.go66
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/tuple_type.go121
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/type.go95
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/type_conform.go142
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/types_to_register.go57
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/unknown.go79
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/value.go98
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/value_init.go276
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/value_ops.go1071
-rw-r--r--vendor/github.com/zclconf/go-cty/cty/walk.go182
647 files changed, 95295 insertions, 6750 deletions
diff --git a/vendor/github.com/agext/levenshtein/.gitignore b/vendor/github.com/agext/levenshtein/.gitignore
new file mode 100644
index 0000000..404365f
--- /dev/null
+++ b/vendor/github.com/agext/levenshtein/.gitignore
@@ -0,0 +1,2 @@
1README.html
2coverage.out
diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml
new file mode 100644
index 0000000..95be94a
--- /dev/null
+++ b/vendor/github.com/agext/levenshtein/.travis.yml
@@ -0,0 +1,70 @@
1language: go
2sudo: false
3go:
4 - 1.8
5 - 1.7.5
6 - 1.7.4
7 - 1.7.3
8 - 1.7.2
9 - 1.7.1
10 - 1.7
11 - tip
12 - 1.6.4
13 - 1.6.3
14 - 1.6.2
15 - 1.6.1
16 - 1.6
17 - 1.5.4
18 - 1.5.3
19 - 1.5.2
20 - 1.5.1
21 - 1.5
22 - 1.4.3
23 - 1.4.2
24 - 1.4.1
25 - 1.4
26 - 1.3.3
27 - 1.3.2
28 - 1.3.1
29 - 1.3
30 - 1.2.2
31 - 1.2.1
32 - 1.2
33 - 1.1.2
34 - 1.1.1
35 - 1.1
36before_install:
37 - go get github.com/mattn/goveralls
38script:
39 - $HOME/gopath/bin/goveralls -service=travis-ci
40notifications:
41 email:
42 on_success: never
43matrix:
44 fast_finish: true
45 allow_failures:
46 - go: tip
47 - go: 1.6.4
48 - go: 1.6.3
49 - go: 1.6.2
50 - go: 1.6.1
51 - go: 1.6
52 - go: 1.5.4
53 - go: 1.5.3
54 - go: 1.5.2
55 - go: 1.5.1
56 - go: 1.5
57 - go: 1.4.3
58 - go: 1.4.2
59 - go: 1.4.1
60 - go: 1.4
61 - go: 1.3.3
62 - go: 1.3.2
63 - go: 1.3.1
64 - go: 1.3
65 - go: 1.2.2
66 - go: 1.2.1
67 - go: 1.2
68 - go: 1.1.2
69 - go: 1.1.1
70 - go: 1.1
diff --git a/vendor/github.com/agext/levenshtein/DCO b/vendor/github.com/agext/levenshtein/DCO
new file mode 100644
index 0000000..716561d
--- /dev/null
+++ b/vendor/github.com/agext/levenshtein/DCO
@@ -0,0 +1,36 @@
1Developer Certificate of Origin
2Version 1.1
3
4Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
5660 York Street, Suite 102,
6San Francisco, CA 94110 USA
7
8Everyone is permitted to copy and distribute verbatim copies of this
9license document, but changing it is not allowed.
10
11
12Developer's Certificate of Origin 1.1
13
14By making a contribution to this project, I certify that:
15
16(a) The contribution was created in whole or in part by me and I
17 have the right to submit it under the open source license
18 indicated in the file; or
19
20(b) The contribution is based upon previous work that, to the best
21 of my knowledge, is covered under an appropriate open source
22 license and I have the right under that license to submit that
23 work with modifications, whether created in whole or in part
24 by me, under the same open source license (unless I am
25 permitted to submit under a different license), as indicated
26 in the file; or
27
28(c) The contribution was provided directly to me by some other
29 person who certified (a), (b) or (c) and I have not modified
30 it.
31
32(d) I understand and agree that this project and the contribution
33 are public and that a record of the contribution (including all
34 personal information I submit with it, including my sign-off) is
35 maintained indefinitely and may be redistributed consistent with
36 this project or the open source license(s) involved.
diff --git a/vendor/github.com/agext/levenshtein/LICENSE b/vendor/github.com/agext/levenshtein/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/agext/levenshtein/LICENSE
@@ -0,0 +1,201 @@
1 Apache License
2 Version 2.0, January 2004
3 http://www.apache.org/licenses/
4
5 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
7 1. Definitions.
8
9 "License" shall mean the terms and conditions for use, reproduction,
10 and distribution as defined by Sections 1 through 9 of this document.
11
12 "Licensor" shall mean the copyright owner or entity authorized by
13 the copyright owner that is granting the License.
14
15 "Legal Entity" shall mean the union of the acting entity and all
16 other entities that control, are controlled by, or are under common
17 control with that entity. For the purposes of this definition,
18 "control" means (i) the power, direct or indirect, to cause the
19 direction or management of such entity, whether by contract or
20 otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 outstanding shares, or (iii) beneficial ownership of such entity.
22
23 "You" (or "Your") shall mean an individual or Legal Entity
24 exercising permissions granted by this License.
25
26 "Source" form shall mean the preferred form for making modifications,
27 including but not limited to software source code, documentation
28 source, and configuration files.
29
30 "Object" form shall mean any form resulting from mechanical
31 transformation or translation of a Source form, including but
32 not limited to compiled object code, generated documentation,
33 and conversions to other media types.
34
35 "Work" shall mean the work of authorship, whether in Source or
36 Object form, made available under the License, as indicated by a
37 copyright notice that is included in or attached to the work
38 (an example is provided in the Appendix below).
39
40 "Derivative Works" shall mean any work, whether in Source or Object
41 form, that is based on (or derived from) the Work and for which the
42 editorial revisions, annotations, elaborations, or other modifications
43 represent, as a whole, an original work of authorship. For the purposes
44 of this License, Derivative Works shall not include works that remain
45 separable from, or merely link (or bind by name) to the interfaces of,
46 the Work and Derivative Works thereof.
47
48 "Contribution" shall mean any work of authorship, including
49 the original version of the Work and any modifications or additions
50 to that Work or Derivative Works thereof, that is intentionally
51 submitted to Licensor for inclusion in the Work by the copyright owner
52 or by an individual or Legal Entity authorized to submit on behalf of
53 the copyright owner. For the purposes of this definition, "submitted"
54 means any form of electronic, verbal, or written communication sent
55 to the Licensor or its representatives, including but not limited to
56 communication on electronic mailing lists, source code control systems,
57 and issue tracking systems that are managed by, or on behalf of, the
58 Licensor for the purpose of discussing and improving the Work, but
59 excluding communication that is conspicuously marked or otherwise
60 designated in writing by the copyright owner as "Not a Contribution."
61
62 "Contributor" shall mean Licensor and any individual or Legal Entity
63 on behalf of whom a Contribution has been received by Licensor and
64 subsequently incorporated within the Work.
65
66 2. Grant of Copyright License. Subject to the terms and conditions of
67 this License, each Contributor hereby grants to You a perpetual,
68 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 copyright license to reproduce, prepare Derivative Works of,
70 publicly display, publicly perform, sublicense, and distribute the
71 Work and such Derivative Works in Source or Object form.
72
73 3. Grant of Patent License. Subject to the terms and conditions of
74 this License, each Contributor hereby grants to You a perpetual,
75 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 (except as stated in this section) patent license to make, have made,
77 use, offer to sell, sell, import, and otherwise transfer the Work,
78 where such license applies only to those patent claims licensable
79 by such Contributor that are necessarily infringed by their
80 Contribution(s) alone or by combination of their Contribution(s)
81 with the Work to which such Contribution(s) was submitted. If You
82 institute patent litigation against any entity (including a
83 cross-claim or counterclaim in a lawsuit) alleging that the Work
84 or a Contribution incorporated within the Work constitutes direct
85 or contributory patent infringement, then any patent licenses
86 granted to You under this License for that Work shall terminate
87 as of the date such litigation is filed.
88
89 4. Redistribution. You may reproduce and distribute copies of the
90 Work or Derivative Works thereof in any medium, with or without
91 modifications, and in Source or Object form, provided that You
92 meet the following conditions:
93
94 (a) You must give any other recipients of the Work or
95 Derivative Works a copy of this License; and
96
97 (b) You must cause any modified files to carry prominent notices
98 stating that You changed the files; and
99
100 (c) You must retain, in the Source form of any Derivative Works
101 that You distribute, all copyright, patent, trademark, and
102 attribution notices from the Source form of the Work,
103 excluding those notices that do not pertain to any part of
104 the Derivative Works; and
105
106 (d) If the Work includes a "NOTICE" text file as part of its
107 distribution, then any Derivative Works that You distribute must
108 include a readable copy of the attribution notices contained
109 within such NOTICE file, excluding those notices that do not
110 pertain to any part of the Derivative Works, in at least one
111 of the following places: within a NOTICE text file distributed
112 as part of the Derivative Works; within the Source form or
113 documentation, if provided along with the Derivative Works; or,
114 within a display generated by the Derivative Works, if and
115 wherever such third-party notices normally appear. The contents
116 of the NOTICE file are for informational purposes only and
117 do not modify the License. You may add Your own attribution
118 notices within Derivative Works that You distribute, alongside
119 or as an addendum to the NOTICE text from the Work, provided
120 that such additional attribution notices cannot be construed
121 as modifying the License.
122
123 You may add Your own copyright statement to Your modifications and
124 may provide additional or different license terms and conditions
125 for use, reproduction, or distribution of Your modifications, or
126 for any such Derivative Works as a whole, provided Your use,
127 reproduction, and distribution of the Work otherwise complies with
128 the conditions stated in this License.
129
130 5. Submission of Contributions. Unless You explicitly state otherwise,
131 any Contribution intentionally submitted for inclusion in the Work
132 by You to the Licensor shall be under the terms and conditions of
133 this License, without any additional terms or conditions.
134 Notwithstanding the above, nothing herein shall supersede or modify
135 the terms of any separate license agreement you may have executed
136 with Licensor regarding such Contributions.
137
138 6. Trademarks. This License does not grant permission to use the trade
139 names, trademarks, service marks, or product names of the Licensor,
140 except as required for reasonable and customary use in describing the
141 origin of the Work and reproducing the content of the NOTICE file.
142
143 7. Disclaimer of Warranty. Unless required by applicable law or
144 agreed to in writing, Licensor provides the Work (and each
145 Contributor provides its Contributions) on an "AS IS" BASIS,
146 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 implied, including, without limitation, any warranties or conditions
148 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 PARTICULAR PURPOSE. You are solely responsible for determining the
150 appropriateness of using or redistributing the Work and assume any
151 risks associated with Your exercise of permissions under this License.
152
153 8. Limitation of Liability. In no event and under no legal theory,
154 whether in tort (including negligence), contract, or otherwise,
155 unless required by applicable law (such as deliberate and grossly
156 negligent acts) or agreed to in writing, shall any Contributor be
157 liable to You for damages, including any direct, indirect, special,
158 incidental, or consequential damages of any character arising as a
159 result of this License or out of the use or inability to use the
160 Work (including but not limited to damages for loss of goodwill,
161 work stoppage, computer failure or malfunction, or any and all
162 other commercial damages or losses), even if such Contributor
163 has been advised of the possibility of such damages.
164
165 9. Accepting Warranty or Additional Liability. While redistributing
166 the Work or Derivative Works thereof, You may choose to offer,
167 and charge a fee for, acceptance of support, warranty, indemnity,
168 or other liability obligations and/or rights consistent with this
169 License. However, in accepting such obligations, You may act only
170 on Your own behalf and on Your sole responsibility, not on behalf
171 of any other Contributor, and only if You agree to indemnify,
172 defend, and hold each Contributor harmless for any liability
173 incurred by, or claims asserted against, such Contributor by reason
174 of your accepting any such warranty or additional liability.
175
176 END OF TERMS AND CONDITIONS
177
178 APPENDIX: How to apply the Apache License to your work.
179
180 To apply the Apache License to your work, attach the following
181 boilerplate notice, with the fields enclosed by brackets "[]"
182 replaced with your own identifying information. (Don't include
183 the brackets!) The text should be enclosed in the appropriate
184 comment syntax for the file format. We also recommend that a
185 file or class name and description of purpose be included on the
186 same "printed page" as the copyright notice for easier
187 identification within third-party archives.
188
189 Copyright [yyyy] [name of copyright owner]
190
191 Licensed under the Apache License, Version 2.0 (the "License");
192 you may not use this file except in compliance with the License.
193 You may obtain a copy of the License at
194
195 http://www.apache.org/licenses/LICENSE-2.0
196
197 Unless required by applicable law or agreed to in writing, software
198 distributed under the License is distributed on an "AS IS" BASIS,
199 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 See the License for the specific language governing permissions and
201 limitations under the License.
diff --git a/vendor/github.com/agext/levenshtein/MAINTAINERS b/vendor/github.com/agext/levenshtein/MAINTAINERS
new file mode 100644
index 0000000..726c2af
--- /dev/null
+++ b/vendor/github.com/agext/levenshtein/MAINTAINERS
@@ -0,0 +1 @@
Alex Bucataru <alex@alrux.com> (@AlexBucataru)
diff --git a/vendor/github.com/agext/levenshtein/NOTICE b/vendor/github.com/agext/levenshtein/NOTICE
new file mode 100644
index 0000000..eaffaab
--- /dev/null
+++ b/vendor/github.com/agext/levenshtein/NOTICE
@@ -0,0 +1,5 @@
1Alrux Go EXTensions (AGExt) - package levenshtein
2Copyright 2016 ALRUX Inc.
3
4This product includes software developed at ALRUX Inc.
5(http://www.alrux.com/).
diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md
new file mode 100644
index 0000000..90509c2
--- /dev/null
+++ b/vendor/github.com/agext/levenshtein/README.md
@@ -0,0 +1,38 @@
1# A Go package for calculating the Levenshtein distance between two strings
2
3[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest)
4[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein) 
5[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein)
6[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein)
7[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein)
8
9
10This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org).
11
12## Project Status
13
14v1.2.1 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis.
15
16This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome.
17
18## Overview
19
20The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
21
22A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
23
24The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
25
26The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
27
28The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
29
30## Installation
31
32```
33go get github.com/agext/levenshtein
34```
35
36## License
37
38Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go
new file mode 100644
index 0000000..df69ce7
--- /dev/null
+++ b/vendor/github.com/agext/levenshtein/levenshtein.go
@@ -0,0 +1,290 @@
1// Copyright 2016 ALRUX Inc.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15/*
16Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure.
17
18The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
19
20A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
21
22The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
23
24The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
25
26The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
27*/
28package levenshtein
29
30// Calculate determines the Levenshtein distance between two strings, using
31// the given costs for each edit operation. It returns the distance along with
32// the lengths of the longest common prefix and suffix.
33//
34// If maxCost is non-zero, the calculation stops as soon as the distance is determined
35// to be greater than maxCost. Therefore, any return value higher than maxCost is a
36// lower bound for the actual distance.
37func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) {
38 l1, l2 := len(str1), len(str2)
39 // trim common prefix, if any, as it doesn't affect the distance
40 for ; prefixLen < l1 && prefixLen < l2; prefixLen++ {
41 if str1[prefixLen] != str2[prefixLen] {
42 break
43 }
44 }
45 str1, str2 = str1[prefixLen:], str2[prefixLen:]
46 l1 -= prefixLen
47 l2 -= prefixLen
48 // trim common suffix, if any, as it doesn't affect the distance
49 for 0 < l1 && 0 < l2 {
50 if str1[l1-1] != str2[l2-1] {
51 str1, str2 = str1[:l1], str2[:l2]
52 break
53 }
54 l1--
55 l2--
56 suffixLen++
57 }
58 // if the first string is empty, the distance is the length of the second string times the cost of insertion
59 if l1 == 0 {
60 dist = l2 * insCost
61 return
62 }
63 // if the second string is empty, the distance is the length of the first string times the cost of deletion
64 if l2 == 0 {
65 dist = l1 * delCost
66 return
67 }
68
69 // variables used in inner "for" loops
70 var y, dy, c, l int
71
72 // if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited'
73 if maxCost > 0 {
74 if subCost < delCost+insCost {
75 if maxCost >= l1*subCost+(l2-l1)*insCost {
76 maxCost = 0
77 }
78 } else {
79 if maxCost >= l1*delCost+l2*insCost {
80 maxCost = 0
81 }
82 }
83 }
84
85 if maxCost > 0 {
86 // prefer the longer string first, to minimize time;
87 // a swap also transposes the meanings of insertion and deletion.
88 if l1 < l2 {
89 str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
90 }
91
92 // the length differential times cost of deletion is a lower bound for the cost;
93 // if it is higher than the maxCost, there is no point going into the main calculation.
94 if dist = (l1 - l2) * delCost; dist > maxCost {
95 return
96 }
97
98 d := make([]int, l1+1)
99
100 // offset and length of d in the current row
101 doff, dlen := 0, 1
102 for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ {
103 d[y] = dy
104 y++
105 dy = y * delCost
106 }
107 // fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
108
109 for x := 0; x < l2; x++ {
110 dy, d[doff] = d[doff], d[doff]+insCost
111 for d[doff] > maxCost && dlen > 0 {
112 if str1[doff] != str2[x] {
113 dy += subCost
114 }
115 doff++
116 dlen--
117 if c = d[doff] + insCost; c < dy {
118 dy = c
119 }
120 dy, d[doff] = d[doff], dy
121 }
122 for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy {
123 if str1[y] != str2[x] {
124 dy += subCost
125 }
126 if c = d[y] + delCost; c < dy {
127 dy = c
128 }
129 y++
130 if c = d[y] + insCost; c < dy {
131 dy = c
132 }
133 }
134 if y < l1 {
135 if str1[y] != str2[x] {
136 dy += subCost
137 }
138 if c = d[y] + delCost; c < dy {
139 dy = c
140 }
141 for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy {
142 y++
143 dlen++
144 }
145 }
146 // fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
147 if dlen == 0 {
148 dist = maxCost + 1
149 return
150 }
151 }
152 if doff+dlen-1 < l1 {
153 dist = maxCost + 1
154 return
155 }
156 dist = d[l1]
157 } else {
158 // ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is
159 // worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space
160 // http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html
161
162 // prefer the shorter string first, to minimize space; time is O(l1*l2) anyway;
163 // a swap also transposes the meanings of insertion and deletion.
164 if l1 > l2 {
165 str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
166 }
167 d := make([]int, l1+1)
168
169 for y = 1; y <= l1; y++ {
170 d[y] = y * delCost
171 }
172 for x := 0; x < l2; x++ {
173 dy, d[0] = d[0], d[0]+insCost
174 for y = 0; y < l1; dy, d[y] = d[y], dy {
175 if str1[y] != str2[x] {
176 dy += subCost
177 }
178 if c = d[y] + delCost; c < dy {
179 dy = c
180 }
181 y++
182 if c = d[y] + insCost; c < dy {
183 dy = c
184 }
185 }
186 }
187 dist = d[l1]
188 }
189
190 return
191}
192
193// Distance returns the Levenshtein distance between str1 and str2, using the
194// default or provided cost values. Pass nil for the third argument to use the
195// default cost of 1 for all three operations, with no maximum.
196func Distance(str1, str2 string, p *Params) int {
197 if p == nil {
198 p = defaultParams
199 }
200 dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost)
201 return dist
202}
203
204// Similarity returns a score in the range of 0..1 for how similar the two strings are.
205// A score of 1 means the strings are identical, and 0 means they have nothing in common.
206//
207// A nil third argument uses the default cost of 1 for all three operations.
208//
209// If a non-zero MinScore value is provided in the parameters, scores lower than it
210// will be returned as 0.
211func Similarity(str1, str2 string, p *Params) float64 {
212 return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus
213}
214
215// Match returns a similarity score adjusted by the same method as proposed by Winkler for
216// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their
217// similarity score is already over a threshold.
218//
219// The score is in the range of 0..1, with 1 meaning the strings are identical,
220// and 0 meaning they have nothing in common.
221//
222// A nil third argument uses the default cost of 1 for all three operations, maximum length of
223// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7.
224//
225// If a non-zero MinScore value is provided in the parameters, scores lower than it
226// will be returned as 0.
227func Match(str1, str2 string, p *Params) float64 {
228 s1, s2 := []rune(str1), []rune(str2)
229 l1, l2 := len(s1), len(s2)
230 // two empty strings are identical; shortcut also avoids divByZero issues later on.
231 if l1 == 0 && l2 == 0 {
232 return 1
233 }
234
235 if p == nil {
236 p = defaultParams
237 }
238
239 // a min over 1 can never be satisfied, so the score is 0.
240 if p.minScore > 1 {
241 return 0
242 }
243
244 insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0
245 if l1 > l2 {
246 l1, l2, insCost, delCost = l2, l1, delCost, insCost
247 }
248
249 if p.subCost < delCost+insCost {
250 maxDist = l1*p.subCost + (l2-l1)*insCost
251 } else {
252 maxDist = l1*delCost + l2*insCost
253 }
254
255 // a zero min is always satisfied, so no need to set a max cost.
256 if p.minScore > 0 {
257 // if p.minScore is lower than p.bonusThreshold, we can use a simplified formula
258 // for the max cost, because a sim score below min cannot receive a bonus.
259 if p.minScore < p.bonusThreshold {
260 // round down the max - a cost equal to a rounded up max would already be under min.
261 max = int((1 - p.minScore) * float64(maxDist))
262 } else {
263 // p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim)
264 // p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist))
265 // p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist
266 // 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist
267 // (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist
268 max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale))
269 }
270 }
271
272 dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost)
273 if max > 0 && dist > max {
274 return 0
275 }
276 sim := 1 - float64(dist)/float64(maxDist)
277
278 if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 {
279 if pl > p.bonusPrefix {
280 pl = p.bonusPrefix
281 }
282 sim += float64(pl) * p.bonusScale * (1 - sim)
283 }
284
285 if sim < p.minScore {
286 return 0
287 }
288
289 return sim
290}
diff --git a/vendor/github.com/agext/levenshtein/params.go b/vendor/github.com/agext/levenshtein/params.go
new file mode 100644
index 0000000..a85727b
--- /dev/null
+++ b/vendor/github.com/agext/levenshtein/params.go
@@ -0,0 +1,152 @@
1// Copyright 2016 ALRUX Inc.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package levenshtein
16
17// Params represents a set of parameter values for the various formulas involved
18// in the calculation of the Levenshtein string metrics.
19type Params struct {
20 insCost int
21 subCost int
22 delCost int
23 maxCost int
24 minScore float64
25 bonusPrefix int
26 bonusScale float64
27 bonusThreshold float64
28}
29
30var (
31 defaultParams = NewParams()
32)
33
34// NewParams creates a new set of parameters and initializes it with the default values.
35func NewParams() *Params {
36 return &Params{
37 insCost: 1,
38 subCost: 1,
39 delCost: 1,
40 maxCost: 0,
41 minScore: 0,
42 bonusPrefix: 4,
43 bonusScale: .1,
44 bonusThreshold: .7,
45 }
46}
47
48// Clone returns a pointer to a copy of the receiver parameter set, or of a new
49// default parameter set if the receiver is nil.
50func (p *Params) Clone() *Params {
51 if p == nil {
52 return NewParams()
53 }
54 return &Params{
55 insCost: p.insCost,
56 subCost: p.subCost,
57 delCost: p.delCost,
58 maxCost: p.maxCost,
59 minScore: p.minScore,
60 bonusPrefix: p.bonusPrefix,
61 bonusScale: p.bonusScale,
62 bonusThreshold: p.bonusThreshold,
63 }
64}
65
66// InsCost overrides the default value of 1 for the cost of insertion.
67// The new value must be zero or positive.
68func (p *Params) InsCost(v int) *Params {
69 if v >= 0 {
70 p.insCost = v
71 }
72 return p
73}
74
75// SubCost overrides the default value of 1 for the cost of substitution.
76// The new value must be zero or positive.
77func (p *Params) SubCost(v int) *Params {
78 if v >= 0 {
79 p.subCost = v
80 }
81 return p
82}
83
84// DelCost overrides the default value of 1 for the cost of deletion.
85// The new value must be zero or positive.
86func (p *Params) DelCost(v int) *Params {
87 if v >= 0 {
88 p.delCost = v
89 }
90 return p
91}
92
93// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost.
94// The calculation of Distance() stops when the result is guaranteed to exceed
95// this maximum, returning a lower-bound rather than exact value.
96// The new value must be zero or positive.
97func (p *Params) MaxCost(v int) *Params {
98 if v >= 0 {
99 p.maxCost = v
100 }
101 return p
102}
103
104// MinScore overrides the default value of 0 for the minimum similarity score.
105// Scores below this threshold are returned as 0 by Similarity() and Match().
106// The new value must be zero or positive. Note that a minimum greater than 1
107// can never be satisfied, resulting in a score of 0 for any pair of strings.
108func (p *Params) MinScore(v float64) *Params {
109 if v >= 0 {
110 p.minScore = v
111 }
112 return p
113}
114
115// BonusPrefix overrides the default value for the maximum length of
116// common prefix to be considered for bonus by Match().
117// The new value must be zero or positive.
118func (p *Params) BonusPrefix(v int) *Params {
119 if v >= 0 {
120 p.bonusPrefix = v
121 }
122 return p
123}
124
125// BonusScale overrides the default value for the scaling factor used by Match()
126// in calculating the bonus.
127// The new value must be zero or positive. To guarantee that the similarity score
128// remains in the interval 0..1, this scaling factor is not allowed to exceed
129// 1 / BonusPrefix.
130func (p *Params) BonusScale(v float64) *Params {
131 if v >= 0 {
132 p.bonusScale = v
133 }
134
135 // the bonus cannot exceed (1-sim), or the score may become greater than 1.
136 if float64(p.bonusPrefix)*p.bonusScale > 1 {
137 p.bonusScale = 1 / float64(p.bonusPrefix)
138 }
139
140 return p
141}
142
143// BonusThreshold overrides the default value for the minimum similarity score
144// for which Match() can assign a bonus.
145// The new value must be zero or positive. Note that a threshold greater than 1
146// effectively makes Match() become the equivalent of Similarity().
147func (p *Params) BonusThreshold(v float64) *Params {
148 if v >= 0 {
149 p.bonusThreshold = v
150 }
151 return p
152}
diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go
index a31cdec..7534473 100644
--- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go
+++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go
@@ -71,8 +71,13 @@ func Host(base *net.IPNet, num int) (net.IP, error) {
71 if numUint64 > maxHostNum { 71 if numUint64 > maxHostNum {
72 return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) 72 return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num)
73 } 73 }
74 74 var bitlength int
75 return insertNumIntoIP(ip, num, 32), nil 75 if ip.To4() != nil {
76 bitlength = 32
77 } else {
78 bitlength = 128
79 }
80 return insertNumIntoIP(ip, num, bitlength), nil
76} 81}
77 82
78// AddressRange returns the first and last addresses in the given CIDR range. 83// AddressRange returns the first and last addresses in the given CIDR range.
@@ -110,3 +115,96 @@ func AddressCount(network *net.IPNet) uint64 {
110 prefixLen, bits := network.Mask.Size() 115 prefixLen, bits := network.Mask.Size()
111 return 1 << (uint64(bits) - uint64(prefixLen)) 116 return 1 << (uint64(bits) - uint64(prefixLen))
112} 117}
118
119//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies
120//none of the subnets overlap and all subnets are in the supernet
121//it returns an error if any of those conditions are not satisfied
122func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error {
123 firstLastIP := make([][]net.IP, len(subnets))
124 for i, s := range subnets {
125 first, last := AddressRange(s)
126 firstLastIP[i] = []net.IP{first, last}
127 }
128 for i, s := range subnets {
129 if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) {
130 return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String())
131 }
132 for j := i + 1; j < len(subnets); j++ {
133 first := firstLastIP[j][0]
134 last := firstLastIP[j][1]
135 if s.Contains(first) || s.Contains(last) {
136 return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String())
137 }
138 }
139 }
140 return nil
141}
142
143// PreviousSubnet returns the subnet of the desired mask in the IP space
144// just lower than the start of IPNet provided. If the IP space rolls over
145// then the second return value is true
146func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) {
147 startIP := checkIPv4(network.IP)
148 previousIP := make(net.IP, len(startIP))
149 copy(previousIP, startIP)
150 cMask := net.CIDRMask(prefixLen, 8*len(previousIP))
151 previousIP = Dec(previousIP)
152 previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask}
153 if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) {
154 return previous, true
155 }
156 return previous, false
157}
158
159// NextSubnet returns the next available subnet of the desired mask size
160// starting for the maximum IP of the offset subnet
161// If the IP exceeds the maxium IP then the second return value is true
162func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) {
163 _, currentLast := AddressRange(network)
164 mask := net.CIDRMask(prefixLen, 8*len(currentLast))
165 currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask}
166 _, last := AddressRange(currentSubnet)
167 last = Inc(last)
168 next := &net.IPNet{IP: last.Mask(mask), Mask: mask}
169 if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) {
170 return next, true
171 }
172 return next, false
173}
174
175//Inc increases the IP by one this returns a new []byte for the IP
176func Inc(IP net.IP) net.IP {
177 IP = checkIPv4(IP)
178 incIP := make([]byte, len(IP))
179 copy(incIP, IP)
180 for j := len(incIP) - 1; j >= 0; j-- {
181 incIP[j]++
182 if incIP[j] > 0 {
183 break
184 }
185 }
186 return incIP
187}
188
189//Dec decreases the IP by one this returns a new []byte for the IP
190func Dec(IP net.IP) net.IP {
191 IP = checkIPv4(IP)
192 decIP := make([]byte, len(IP))
193 copy(decIP, IP)
194 decIP = checkIPv4(decIP)
195 for j := len(decIP) - 1; j >= 0; j-- {
196 decIP[j]--
197 if decIP[j] < 255 {
198 break
199 }
200 }
201 return decIP
202}
203
204func checkIPv4(ip net.IP) net.IP {
205 // Go for some reason allocs IPv6len for IPv4 so we have to correct it
206 if v4 := ip.To4(); v4 != nil {
207 return v4
208 }
209 return ip
210}
diff --git a/vendor/github.com/apparentlymart/go-textseg/LICENSE b/vendor/github.com/apparentlymart/go-textseg/LICENSE
new file mode 100644
index 0000000..684b03b
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-textseg/LICENSE
@@ -0,0 +1,95 @@
1Copyright (c) 2017 Martin Atkins
2
3Permission is hereby granted, free of charge, to any person obtaining a copy
4of this software and associated documentation files (the "Software"), to deal
5in the Software without restriction, including without limitation the rights
6to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7copies of the Software, and to permit persons to whom the Software is
8furnished to do so, subject to the following conditions:
9
10The above copyright notice and this permission notice shall be included in all
11copies or substantial portions of the Software.
12
13THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19SOFTWARE.
20
21---------
22
23Unicode table generation programs are under a separate copyright and license:
24
25Copyright (c) 2014 Couchbase, Inc.
26Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
27except in compliance with the License. You may obtain a copy of the License at
28
29 http://www.apache.org/licenses/LICENSE-2.0
30
31Unless required by applicable law or agreed to in writing, software distributed under the
32License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
33either express or implied. See the License for the specific language governing permissions
34and limitations under the License.
35
36---------
37
38Grapheme break data is provided as part of the Unicode character database,
39copright 2016 Unicode, Inc, which is provided with the following license:
40
41Unicode Data Files include all data files under the directories
42http://www.unicode.org/Public/, http://www.unicode.org/reports/,
43http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
44http://www.unicode.org/utility/trac/browser/.
45
46Unicode Data Files do not include PDF online code charts under the
47directory http://www.unicode.org/Public/.
48
49Software includes any source code published in the Unicode Standard
50or under the directories
51http://www.unicode.org/Public/, http://www.unicode.org/reports/,
52http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
53http://www.unicode.org/utility/trac/browser/.
54
55NOTICE TO USER: Carefully read the following legal agreement.
56BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
57DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
58YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
59TERMS AND CONDITIONS OF THIS AGREEMENT.
60IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
61THE DATA FILES OR SOFTWARE.
62
63COPYRIGHT AND PERMISSION NOTICE
64
65Copyright © 1991-2017 Unicode, Inc. All rights reserved.
66Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
67
68Permission is hereby granted, free of charge, to any person obtaining
69a copy of the Unicode data files and any associated documentation
70(the "Data Files") or Unicode software and any associated documentation
71(the "Software") to deal in the Data Files or Software
72without restriction, including without limitation the rights to use,
73copy, modify, merge, publish, distribute, and/or sell copies of
74the Data Files or Software, and to permit persons to whom the Data Files
75or Software are furnished to do so, provided that either
76(a) this copyright and permission notice appear with all copies
77of the Data Files or Software, or
78(b) this copyright and permission notice appear in associated
79Documentation.
80
81THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
82ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
83WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
84NONINFRINGEMENT OF THIRD PARTY RIGHTS.
85IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
86NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
87DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
88DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
89TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
90PERFORMANCE OF THE DATA FILES OR SOFTWARE.
91
92Except as contained in this notice, the name of a copyright holder
93shall not be used in advertising or otherwise to promote the sale,
94use or other dealings in these Data Files or Software without prior
95written authorization of the copyright holder.
diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go
new file mode 100644
index 0000000..5752e9e
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go
@@ -0,0 +1,30 @@
1package textseg
2
3import (
4 "bufio"
5 "bytes"
6)
7
8// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of
9// all of the recognized tokens in the given buffer.
10func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) {
11 scanner := bufio.NewScanner(bytes.NewReader(buf))
12 scanner.Split(splitFunc)
13 var ret [][]byte
14 for scanner.Scan() {
15 ret = append(ret, scanner.Bytes())
16 }
17 return ret, scanner.Err()
18}
19
20// TokenCount is a utility that uses a bufio.SplitFunc to count the number of
21// recognized tokens in the given buffer.
22func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) {
23 scanner := bufio.NewScanner(bytes.NewReader(buf))
24 scanner.Split(splitFunc)
25 var ret int
26 for scanner.Scan() {
27 ret++
28 }
29 return ret, scanner.Err()
30}
diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go
new file mode 100644
index 0000000..81f3a74
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go
@@ -0,0 +1,7 @@
1package textseg
2
3//go:generate go run make_tables.go -output tables.go
4//go:generate go run make_test_tables.go -output tables_test.go
5//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl
6//go:generate ragel -Z grapheme_clusters.rl
7//go:generate gofmt -w grapheme_clusters.go
diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go
new file mode 100644
index 0000000..012bc69
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go
@@ -0,0 +1,5276 @@
1
2// line 1 "grapheme_clusters.rl"
3package textseg
4
5import (
6 "errors"
7 "unicode/utf8"
8)
9
10// Generated from grapheme_clusters.rl. DO NOT EDIT
11
12// line 13 "grapheme_clusters.go"
13var _graphclust_actions []byte = []byte{
14 0, 1, 0, 1, 4, 1, 9, 1, 10,
15 1, 11, 1, 12, 1, 13, 1, 14,
16 1, 15, 1, 16, 1, 17, 1, 18,
17 1, 19, 1, 20, 1, 21, 2, 1,
18 7, 2, 1, 8, 2, 2, 3, 2,
19 5, 1, 3, 0, 1, 8, 3, 5,
20 0, 1, 3, 5, 1, 6,
21}
22
23var _graphclust_key_offsets []int16 = []int16{
24 0, 0, 1, 3, 5, 7, 10, 15,
25 17, 20, 28, 31, 33, 35, 37, 67,
26 75, 77, 81, 84, 89, 94, 104, 116,
27 122, 127, 137, 140, 147, 151, 159, 169,
28 173, 181, 183, 191, 194, 196, 201, 203,
29 210, 212, 220, 221, 242, 246, 252, 257,
30 259, 263, 267, 269, 273, 275, 278, 282,
31 284, 291, 293, 297, 301, 305, 307, 309,
32 318, 322, 327, 329, 335, 337, 338, 340,
33 341, 343, 345, 347, 349, 364, 368, 370,
34 372, 377, 381, 385, 387, 389, 393, 397,
35 399, 403, 410, 415, 419, 422, 423, 427,
36 434, 439, 440, 441, 443, 452, 454, 477,
37 481, 483, 487, 491, 492, 496, 500, 503,
38 505, 510, 523, 525, 527, 529, 531, 535,
39 539, 541, 543, 545, 549, 553, 557, 559,
40 561, 563, 565, 566, 568, 574, 580, 586,
41 588, 592, 596, 601, 604, 614, 616, 618,
42 621, 623, 625, 627, 629, 632, 637, 639,
43 642, 650, 653, 655, 657, 659, 690, 698,
44 700, 704, 711, 723, 730, 744, 750, 768,
45 779, 785, 797, 800, 809, 814, 824, 830,
46 844, 850, 862, 874, 878, 880, 886, 888,
47 895, 898, 906, 907, 928, 937, 945, 951,
48 953, 957, 961, 966, 972, 974, 977, 990,
49 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056,
50 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085,
51 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122,
52 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171,
53 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206,
54 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259,
55 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316,
56 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364,
57 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402,
58 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435,
59 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461,
60 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480,
61 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504,
62 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532,
63 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585,
64 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641,
65 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692,
66 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722,
67 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770,
68 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798,
69 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830,
70 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848,
71 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886,
72 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916,
73 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942,
74 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992,
75 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026,
76 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046,
77 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067,
78 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102,
79 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128,
80 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156,
81 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224,
82 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301,
83 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375,
84 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408,
85 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467,
86 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521,
87 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579,
88 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607,
89 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648,
90 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690,
91 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721,
92 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781,
93 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840,
94 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893,
95 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920,
96 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956,
97 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972,
98 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991,
99 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019,
100 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105,
101 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124,
102 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196,
103 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350,
104 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390,
105 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432,
106 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598,
107 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631,
108 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668,
109 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736,
110 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773,
111 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797,
112 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824,
113 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891,
114 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946,
115 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988,
116 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039,
117 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065,
118 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094,
119 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125,
120 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155,
121 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180,
122 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210,
123 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241,
124 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287,
125 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318,
126 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344,
127 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367,
128 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403,
129 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424,
130 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477,
131 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537,
132 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611,
133 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673,
134 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732,
135 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764,
136 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835,
137 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870,
138 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901,
139 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951,
140 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992,
141 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037,
142 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089,
143 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149,
144 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187,
145 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218,
146 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247,
147 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261,
148 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327,
149 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363,
150 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439,
151 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456,
152 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628,
153 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656,
154 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709,
155 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765,
156 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816,
157 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846,
158 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894,
159 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922,
160 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954,
161 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982,
162 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007,
163 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040,
164 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069,
165 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113,
166 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146,
167 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170,
168 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191,
169 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231,
170 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252,
171 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274,
172 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301,
173 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386,
174 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456,
175 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522,
176 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579,
177 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616,
178 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680,
179 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724,
180 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754,
181 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802,
182 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842,
183 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890,
184 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937,
185 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995,
186 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040,
187 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069,
188 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100,
189 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115,
190 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140,
191 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199,
192 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226,
193 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278,
194 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327,
195 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380,
196 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414,
197 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468,
198 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493,
199 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520,
200 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551,
201 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581,
202 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608,
203 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638,
204 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688,
205 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714,
206 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746,
207 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770,
208 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797,
209 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829,
210 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847,
211 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921,
212 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986,
213 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037,
214 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095,
215 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137,
216 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209,
217 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229,
218 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272,
219 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321,
220 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349,
221 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406,
222 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462,
223 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514,
224 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545,
225 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582,
226 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609,
227 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650,
228 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726,
229 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838,
230 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876,
231 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911,
232 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938,
233 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005,
234 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060,
235 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102,
236 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153,
237 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179,
238 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208,
239 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239,
240 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269,
241 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294,
242 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324,
243 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355,
244 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401,
245 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432,
246 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458,
247 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481,
248 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517,
249 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538,
250 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591,
251 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651,
252 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725,
253 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787,
254 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846,
255 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878,
256 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949,
257 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984,
258 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015,
259 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065,
260 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106,
261 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151,
262 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203,
263 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263,
264 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301,
265 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332,
266 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361,
267 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375,
268 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401,
269 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469,
270 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538,
271 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703,
272 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869,
273 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032,
274 11052, 11072, 11092, 11112,
275}
276
277var _graphclust_trans_keys []byte = []byte{
278 10, 128, 255, 176, 255, 131, 137, 191,
279 145, 189, 135, 129, 130, 132, 133, 144,
280 154, 176, 139, 159, 150, 156, 159, 164,
281 167, 168, 170, 173, 145, 176, 255, 139,
282 255, 166, 176, 171, 179, 160, 161, 163,
283 164, 165, 167, 169, 171, 173, 174, 175,
284 176, 177, 179, 180, 181, 182, 183, 184,
285 185, 186, 187, 188, 189, 190, 191, 166,
286 170, 172, 178, 150, 153, 155, 163, 165,
287 167, 169, 173, 153, 155, 148, 161, 163,
288 255, 189, 132, 185, 144, 152, 161, 164,
289 255, 188, 129, 131, 190, 255, 133, 134,
290 137, 138, 142, 150, 152, 161, 164, 255,
291 131, 134, 137, 138, 142, 144, 146, 175,
292 178, 180, 182, 255, 134, 138, 142, 161,
293 164, 255, 188, 129, 131, 190, 191, 128,
294 132, 135, 136, 139, 141, 150, 151, 162,
295 163, 130, 190, 191, 151, 128, 130, 134,
296 136, 138, 141, 128, 131, 190, 255, 133,
297 137, 142, 148, 151, 161, 164, 255, 128,
298 132, 134, 136, 138, 141, 149, 150, 162,
299 163, 129, 131, 190, 255, 133, 137, 142,
300 150, 152, 161, 164, 255, 130, 131, 138,
301 150, 143, 148, 152, 159, 178, 179, 177,
302 179, 186, 135, 142, 177, 179, 185, 187,
303 188, 136, 141, 181, 183, 185, 152, 153,
304 190, 191, 177, 191, 128, 132, 134, 135,
305 141, 151, 153, 188, 134, 128, 129, 130,
306 141, 156, 157, 158, 159, 160, 162, 164,
307 168, 169, 170, 172, 173, 174, 175, 176,
308 179, 183, 173, 183, 185, 190, 150, 153,
309 158, 160, 177, 180, 130, 141, 157, 132,
310 134, 157, 159, 146, 148, 178, 180, 146,
311 147, 178, 179, 180, 255, 148, 156, 158,
312 255, 139, 141, 169, 133, 134, 160, 171,
313 176, 187, 151, 155, 160, 162, 191, 149,
314 158, 165, 188, 176, 190, 128, 132, 180,
315 255, 133, 170, 180, 255, 128, 130, 161,
316 173, 166, 179, 164, 183, 173, 144, 146,
317 148, 168, 178, 180, 184, 185, 128, 181,
318 187, 191, 128, 131, 179, 181, 183, 140,
319 141, 128, 131, 157, 179, 181, 183, 144,
320 176, 164, 175, 177, 191, 160, 191, 128,
321 130, 170, 175, 153, 154, 153, 154, 155,
322 160, 162, 163, 164, 165, 166, 167, 168,
323 169, 170, 171, 175, 175, 178, 180, 189,
324 158, 159, 176, 177, 130, 134, 139, 163,
325 167, 128, 129, 180, 255, 134, 159, 178,
326 255, 166, 173, 135, 147, 128, 131, 179,
327 255, 129, 164, 166, 255, 169, 182, 131,
328 188, 140, 141, 176, 178, 180, 183, 184,
329 190, 191, 129, 171, 175, 181, 182, 163,
330 170, 172, 173, 172, 184, 190, 158, 128,
331 143, 160, 175, 144, 145, 150, 155, 157,
332 158, 159, 135, 139, 141, 168, 171, 189,
333 160, 182, 186, 191, 129, 131, 133, 134,
334 140, 143, 184, 186, 165, 166, 128, 129,
335 130, 132, 133, 134, 135, 136, 139, 140,
336 141, 144, 145, 146, 147, 150, 151, 152,
337 153, 154, 156, 176, 178, 128, 130, 184,
338 255, 135, 190, 131, 175, 187, 255, 128,
339 130, 167, 180, 179, 128, 130, 179, 255,
340 129, 137, 141, 255, 190, 172, 183, 159,
341 170, 188, 128, 131, 190, 191, 151, 128,
342 132, 135, 136, 139, 141, 162, 163, 166,
343 172, 176, 180, 181, 191, 128, 134, 176,
344 255, 132, 255, 175, 181, 184, 255, 129,
345 155, 158, 255, 129, 255, 171, 183, 157,
346 171, 175, 182, 184, 191, 146, 167, 169,
347 182, 171, 172, 189, 190, 176, 180, 176,
348 182, 145, 190, 143, 146, 178, 157, 158,
349 133, 134, 137, 168, 169, 170, 165, 169,
350 173, 178, 187, 255, 131, 132, 140, 169,
351 174, 255, 130, 132, 128, 182, 187, 255,
352 173, 180, 182, 255, 132, 155, 159, 161,
353 175, 128, 163, 165, 128, 134, 136, 152,
354 155, 161, 163, 164, 166, 170, 144, 150,
355 132, 138, 145, 146, 151, 166, 169, 0,
356 127, 176, 255, 131, 137, 191, 145, 189,
357 135, 129, 130, 132, 133, 144, 154, 176,
358 139, 159, 150, 156, 159, 164, 167, 168,
359 170, 173, 145, 176, 255, 139, 255, 166,
360 176, 171, 179, 160, 161, 163, 164, 165,
361 166, 167, 169, 171, 172, 173, 174, 175,
362 176, 177, 178, 179, 180, 181, 182, 183,
363 184, 185, 186, 187, 188, 189, 190, 191,
364 168, 170, 150, 153, 155, 163, 165, 167,
365 169, 173, 153, 155, 148, 161, 163, 255,
366 131, 187, 189, 132, 185, 190, 255, 141,
367 144, 129, 136, 145, 151, 152, 161, 162,
368 163, 164, 255, 129, 188, 190, 130, 131,
369 191, 255, 141, 151, 129, 132, 133, 134,
370 137, 138, 142, 161, 162, 163, 164, 255,
371 131, 188, 129, 130, 190, 255, 145, 181,
372 129, 130, 131, 134, 135, 136, 137, 138,
373 139, 141, 142, 175, 176, 177, 178, 255,
374 134, 138, 141, 129, 136, 142, 161, 162,
375 163, 164, 255, 129, 188, 130, 131, 190,
376 191, 128, 141, 129, 132, 135, 136, 139,
377 140, 150, 151, 162, 163, 130, 190, 191,
378 128, 141, 151, 129, 130, 134, 136, 138,
379 140, 128, 129, 131, 190, 255, 133, 137,
380 129, 132, 142, 148, 151, 161, 164, 255,
381 129, 188, 190, 191, 130, 131, 130, 134,
382 128, 132, 135, 136, 138, 139, 140, 141,
383 149, 150, 162, 163, 129, 190, 130, 131,
384 191, 255, 133, 137, 141, 151, 129, 132,
385 142, 161, 162, 163, 164, 255, 138, 143,
386 150, 159, 144, 145, 146, 148, 152, 158,
387 178, 179, 177, 179, 180, 186, 135, 142,
388 177, 179, 180, 185, 187, 188, 136, 141,
389 181, 183, 185, 152, 153, 190, 191, 191,
390 177, 190, 128, 132, 134, 135, 141, 151,
391 153, 188, 134, 128, 129, 130, 141, 156,
392 157, 158, 159, 160, 162, 164, 168, 169,
393 170, 172, 173, 174, 175, 176, 179, 183,
394 177, 173, 183, 185, 186, 187, 188, 189,
395 190, 150, 151, 152, 153, 158, 160, 177,
396 180, 130, 132, 141, 157, 133, 134, 157,
397 159, 146, 148, 178, 180, 146, 147, 178,
398 179, 182, 180, 189, 190, 255, 134, 157,
399 137, 147, 148, 255, 139, 141, 169, 133,
400 134, 178, 160, 162, 163, 166, 167, 168,
401 169, 171, 176, 184, 185, 187, 155, 151,
402 152, 153, 154, 150, 160, 162, 191, 149,
403 151, 152, 158, 165, 172, 173, 178, 179,
404 188, 176, 190, 132, 181, 187, 128, 131,
405 180, 188, 189, 255, 130, 133, 170, 171,
406 179, 180, 255, 130, 161, 170, 128, 129,
407 162, 165, 166, 167, 168, 173, 167, 173,
408 166, 169, 170, 174, 175, 177, 178, 179,
409 164, 171, 172, 179, 180, 181, 182, 183,
410 161, 173, 180, 144, 146, 148, 168, 178,
411 179, 184, 185, 128, 181, 187, 191, 128,
412 131, 179, 181, 183, 140, 141, 144, 176,
413 175, 177, 191, 160, 191, 128, 130, 170,
414 175, 153, 154, 153, 154, 155, 160, 162,
415 163, 164, 165, 166, 167, 168, 169, 170,
416 171, 175, 175, 178, 180, 189, 158, 159,
417 176, 177, 130, 134, 139, 167, 163, 164,
418 165, 166, 132, 133, 134, 159, 160, 177,
419 178, 255, 166, 173, 135, 145, 146, 147,
420 131, 179, 188, 128, 130, 180, 181, 182,
421 185, 186, 255, 165, 129, 255, 169, 174,
422 175, 176, 177, 178, 179, 180, 181, 182,
423 131, 140, 141, 188, 176, 178, 180, 183,
424 184, 190, 191, 129, 171, 181, 182, 172,
425 173, 174, 175, 165, 168, 172, 173, 163,
426 170, 172, 184, 190, 158, 128, 143, 160,
427 175, 144, 145, 150, 155, 157, 158, 159,
428 135, 139, 141, 168, 171, 189, 160, 182,
429 186, 191, 129, 131, 133, 134, 140, 143,
430 184, 186, 165, 166, 128, 129, 130, 132,
431 133, 134, 135, 136, 139, 140, 141, 144,
432 145, 146, 147, 150, 151, 152, 153, 154,
433 156, 176, 178, 129, 128, 130, 184, 255,
434 135, 190, 130, 131, 175, 176, 178, 183,
435 184, 187, 255, 172, 128, 130, 167, 180,
436 179, 130, 128, 129, 179, 181, 182, 190,
437 191, 255, 129, 137, 138, 140, 141, 255,
438 180, 190, 172, 174, 175, 177, 178, 181,
439 182, 183, 159, 160, 162, 163, 170, 188,
440 190, 191, 128, 129, 130, 131, 128, 151,
441 129, 132, 135, 136, 139, 141, 162, 163,
442 166, 172, 176, 180, 181, 183, 184, 191,
443 133, 128, 129, 130, 134, 176, 185, 189,
444 177, 178, 179, 186, 187, 190, 191, 255,
445 129, 132, 255, 175, 190, 176, 177, 178,
446 181, 184, 187, 188, 255, 129, 155, 158,
447 255, 189, 176, 178, 179, 186, 187, 190,
448 191, 255, 129, 255, 172, 182, 171, 173,
449 174, 175, 176, 183, 166, 157, 159, 160,
450 161, 162, 171, 175, 190, 176, 182, 184,
451 191, 169, 177, 180, 146, 167, 170, 182,
452 171, 172, 189, 190, 176, 180, 176, 182,
453 143, 146, 178, 157, 158, 133, 134, 137,
454 168, 169, 170, 166, 173, 165, 169, 174,
455 178, 187, 255, 131, 132, 140, 169, 174,
456 255, 130, 132, 128, 182, 187, 255, 173,
457 180, 182, 255, 132, 155, 159, 161, 175,
458 128, 163, 165, 128, 134, 136, 152, 155,
459 161, 163, 164, 166, 170, 144, 150, 132,
460 138, 143, 187, 191, 160, 128, 129, 132,
461 135, 133, 134, 160, 255, 192, 255, 139,
462 168, 160, 128, 129, 132, 135, 133, 134,
463 160, 255, 192, 255, 144, 145, 150, 155,
464 157, 158, 128, 191, 173, 128, 159, 160,
465 191, 156, 128, 133, 134, 191, 0, 127,
466 176, 255, 131, 137, 191, 145, 189, 135,
467 129, 130, 132, 133, 144, 154, 176, 139,
468 159, 150, 156, 159, 164, 167, 168, 170,
469 173, 145, 176, 255, 139, 255, 166, 176,
470 171, 179, 160, 161, 163, 164, 165, 167,
471 169, 171, 173, 174, 175, 176, 177, 179,
472 180, 181, 182, 183, 184, 185, 186, 187,
473 188, 189, 190, 191, 166, 170, 172, 178,
474 150, 153, 155, 163, 165, 167, 169, 173,
475 153, 155, 148, 161, 163, 255, 189, 132,
476 185, 144, 152, 161, 164, 255, 188, 129,
477 131, 190, 255, 133, 134, 137, 138, 142,
478 150, 152, 161, 164, 255, 131, 134, 137,
479 138, 142, 144, 146, 175, 178, 180, 182,
480 255, 134, 138, 142, 161, 164, 255, 188,
481 129, 131, 190, 191, 128, 132, 135, 136,
482 139, 141, 150, 151, 162, 163, 130, 190,
483 191, 151, 128, 130, 134, 136, 138, 141,
484 128, 131, 190, 255, 133, 137, 142, 148,
485 151, 161, 164, 255, 128, 132, 134, 136,
486 138, 141, 149, 150, 162, 163, 129, 131,
487 190, 255, 133, 137, 142, 150, 152, 161,
488 164, 255, 130, 131, 138, 150, 143, 148,
489 152, 159, 178, 179, 177, 179, 186, 135,
490 142, 177, 179, 185, 187, 188, 136, 141,
491 181, 183, 185, 152, 153, 190, 191, 177,
492 191, 128, 132, 134, 135, 141, 151, 153,
493 188, 134, 128, 129, 130, 141, 156, 157,
494 158, 159, 160, 162, 164, 168, 169, 170,
495 172, 173, 174, 175, 176, 179, 183, 173,
496 183, 185, 190, 150, 153, 158, 160, 177,
497 180, 130, 141, 157, 132, 134, 157, 159,
498 146, 148, 178, 180, 146, 147, 178, 179,
499 180, 255, 148, 156, 158, 255, 139, 141,
500 169, 133, 134, 160, 171, 176, 187, 151,
501 155, 160, 162, 191, 149, 158, 165, 188,
502 176, 190, 128, 132, 180, 255, 133, 170,
503 180, 255, 128, 130, 161, 173, 166, 179,
504 164, 183, 173, 144, 146, 148, 168, 178,
505 180, 184, 185, 128, 181, 187, 191, 128,
506 131, 179, 181, 183, 140, 141, 128, 131,
507 157, 179, 181, 183, 144, 176, 164, 175,
508 177, 191, 160, 191, 128, 130, 170, 175,
509 153, 154, 153, 154, 155, 160, 162, 163,
510 164, 165, 166, 167, 168, 169, 170, 171,
511 175, 175, 178, 180, 189, 158, 159, 176,
512 177, 130, 134, 139, 163, 167, 128, 129,
513 180, 255, 134, 159, 178, 255, 166, 173,
514 135, 147, 128, 131, 179, 255, 129, 164,
515 166, 255, 169, 182, 131, 188, 140, 141,
516 176, 178, 180, 183, 184, 190, 191, 129,
517 171, 175, 181, 182, 163, 170, 172, 173,
518 172, 184, 190, 158, 128, 143, 160, 175,
519 144, 145, 150, 155, 157, 158, 159, 135,
520 139, 141, 168, 171, 189, 160, 182, 186,
521 191, 129, 131, 133, 134, 140, 143, 184,
522 186, 165, 166, 128, 129, 130, 132, 133,
523 134, 135, 136, 139, 140, 141, 144, 145,
524 146, 147, 150, 151, 152, 153, 154, 156,
525 176, 178, 128, 130, 184, 255, 135, 190,
526 131, 175, 187, 255, 128, 130, 167, 180,
527 179, 128, 130, 179, 255, 129, 137, 141,
528 255, 190, 172, 183, 159, 170, 188, 128,
529 131, 190, 191, 151, 128, 132, 135, 136,
530 139, 141, 162, 163, 166, 172, 176, 180,
531 181, 191, 128, 134, 176, 255, 132, 255,
532 175, 181, 184, 255, 129, 155, 158, 255,
533 129, 255, 171, 183, 157, 171, 175, 182,
534 184, 191, 146, 167, 169, 182, 171, 172,
535 189, 190, 176, 180, 176, 182, 145, 190,
536 143, 146, 178, 157, 158, 133, 134, 137,
537 168, 169, 170, 165, 169, 173, 178, 187,
538 255, 131, 132, 140, 169, 174, 255, 130,
539 132, 128, 182, 187, 255, 173, 180, 182,
540 255, 132, 155, 159, 161, 175, 128, 163,
541 165, 128, 134, 136, 152, 155, 161, 163,
542 164, 166, 170, 144, 150, 132, 138, 145,
543 146, 151, 166, 169, 128, 255, 176, 255,
544 131, 137, 191, 145, 189, 135, 129, 130,
545 132, 133, 144, 154, 176, 139, 159, 150,
546 156, 159, 164, 167, 168, 170, 173, 145,
547 176, 255, 139, 255, 166, 176, 171, 179,
548 160, 161, 163, 164, 165, 166, 167, 169,
549 171, 172, 173, 174, 175, 176, 177, 178,
550 179, 180, 181, 182, 183, 184, 185, 186,
551 187, 188, 189, 190, 191, 168, 170, 150,
552 153, 155, 163, 165, 167, 169, 173, 153,
553 155, 148, 161, 163, 255, 131, 187, 189,
554 132, 185, 190, 255, 141, 144, 129, 136,
555 145, 151, 152, 161, 162, 163, 164, 255,
556 129, 188, 190, 130, 131, 191, 255, 141,
557 151, 129, 132, 133, 134, 137, 138, 142,
558 161, 162, 163, 164, 255, 131, 188, 129,
559 130, 190, 255, 145, 181, 129, 130, 131,
560 134, 135, 136, 137, 138, 139, 141, 142,
561 175, 176, 177, 178, 255, 134, 138, 141,
562 129, 136, 142, 161, 162, 163, 164, 255,
563 129, 188, 130, 131, 190, 191, 128, 141,
564 129, 132, 135, 136, 139, 140, 150, 151,
565 162, 163, 130, 190, 191, 128, 141, 151,
566 129, 130, 134, 136, 138, 140, 128, 129,
567 131, 190, 255, 133, 137, 129, 132, 142,
568 148, 151, 161, 164, 255, 129, 188, 190,
569 191, 130, 131, 130, 134, 128, 132, 135,
570 136, 138, 139, 140, 141, 149, 150, 162,
571 163, 129, 190, 130, 131, 191, 255, 133,
572 137, 141, 151, 129, 132, 142, 161, 162,
573 163, 164, 255, 138, 143, 150, 159, 144,
574 145, 146, 148, 152, 158, 178, 179, 177,
575 179, 180, 186, 135, 142, 177, 179, 180,
576 185, 187, 188, 136, 141, 181, 183, 185,
577 152, 153, 190, 191, 191, 177, 190, 128,
578 132, 134, 135, 141, 151, 153, 188, 134,
579 128, 129, 130, 141, 156, 157, 158, 159,
580 160, 162, 164, 168, 169, 170, 172, 173,
581 174, 175, 176, 179, 183, 177, 173, 183,
582 185, 186, 187, 188, 189, 190, 150, 151,
583 152, 153, 158, 160, 177, 180, 130, 132,
584 141, 157, 133, 134, 157, 159, 146, 148,
585 178, 180, 146, 147, 178, 179, 182, 180,
586 189, 190, 255, 134, 157, 137, 147, 148,
587 255, 139, 141, 169, 133, 134, 178, 160,
588 162, 163, 166, 167, 168, 169, 171, 176,
589 184, 185, 187, 155, 151, 152, 153, 154,
590 150, 160, 162, 191, 149, 151, 152, 158,
591 165, 172, 173, 178, 179, 188, 176, 190,
592 132, 181, 187, 128, 131, 180, 188, 189,
593 255, 130, 133, 170, 171, 179, 180, 255,
594 130, 161, 170, 128, 129, 162, 165, 166,
595 167, 168, 173, 167, 173, 166, 169, 170,
596 174, 175, 177, 178, 179, 164, 171, 172,
597 179, 180, 181, 182, 183, 161, 173, 180,
598 144, 146, 148, 168, 178, 179, 184, 185,
599 128, 181, 187, 191, 128, 131, 179, 181,
600 183, 140, 141, 144, 176, 175, 177, 191,
601 160, 191, 128, 130, 170, 175, 153, 154,
602 153, 154, 155, 160, 162, 163, 164, 165,
603 166, 167, 168, 169, 170, 171, 175, 175,
604 178, 180, 189, 158, 159, 176, 177, 130,
605 134, 139, 167, 163, 164, 165, 166, 132,
606 133, 134, 159, 160, 177, 178, 255, 166,
607 173, 135, 145, 146, 147, 131, 179, 188,
608 128, 130, 180, 181, 182, 185, 186, 255,
609 165, 129, 255, 169, 174, 175, 176, 177,
610 178, 179, 180, 181, 182, 131, 140, 141,
611 188, 176, 178, 180, 183, 184, 190, 191,
612 129, 171, 181, 182, 172, 173, 174, 175,
613 165, 168, 172, 173, 163, 170, 172, 184,
614 190, 158, 128, 143, 160, 175, 144, 145,
615 150, 155, 157, 158, 159, 135, 139, 141,
616 168, 171, 189, 160, 182, 186, 191, 129,
617 131, 133, 134, 140, 143, 184, 186, 165,
618 166, 128, 129, 130, 132, 133, 134, 135,
619 136, 139, 140, 141, 144, 145, 146, 147,
620 150, 151, 152, 153, 154, 156, 176, 178,
621 129, 128, 130, 184, 255, 135, 190, 130,
622 131, 175, 176, 178, 183, 184, 187, 255,
623 172, 128, 130, 167, 180, 179, 130, 128,
624 129, 179, 181, 182, 190, 191, 255, 129,
625 137, 138, 140, 141, 255, 180, 190, 172,
626 174, 175, 177, 178, 181, 182, 183, 159,
627 160, 162, 163, 170, 188, 190, 191, 128,
628 129, 130, 131, 128, 151, 129, 132, 135,
629 136, 139, 141, 162, 163, 166, 172, 176,
630 180, 181, 183, 184, 191, 133, 128, 129,
631 130, 134, 176, 185, 189, 177, 178, 179,
632 186, 187, 190, 191, 255, 129, 132, 255,
633 175, 190, 176, 177, 178, 181, 184, 187,
634 188, 255, 129, 155, 158, 255, 189, 176,
635 178, 179, 186, 187, 190, 191, 255, 129,
636 255, 172, 182, 171, 173, 174, 175, 176,
637 183, 166, 157, 159, 160, 161, 162, 171,
638 175, 190, 176, 182, 184, 191, 169, 177,
639 180, 146, 167, 170, 182, 171, 172, 189,
640 190, 176, 180, 176, 182, 143, 146, 178,
641 157, 158, 133, 134, 137, 168, 169, 170,
642 166, 173, 165, 169, 174, 178, 187, 255,
643 131, 132, 140, 169, 174, 255, 130, 132,
644 128, 182, 187, 255, 173, 180, 182, 255,
645 132, 155, 159, 161, 175, 128, 163, 165,
646 128, 134, 136, 152, 155, 161, 163, 164,
647 166, 170, 144, 150, 132, 138, 143, 187,
648 191, 160, 128, 129, 132, 135, 133, 134,
649 160, 255, 192, 255, 139, 168, 160, 128,
650 129, 132, 135, 133, 134, 160, 255, 192,
651 255, 144, 145, 150, 155, 157, 158, 128,
652 191, 160, 172, 174, 191, 128, 133, 134,
653 155, 157, 191, 157, 128, 191, 143, 128,
654 191, 163, 181, 128, 191, 162, 128, 191,
655 142, 128, 191, 132, 133, 134, 135, 160,
656 128, 191, 128, 255, 128, 129, 130, 132,
657 133, 134, 141, 156, 157, 158, 159, 160,
658 162, 164, 168, 169, 170, 172, 173, 174,
659 175, 176, 179, 183, 160, 255, 128, 129,
660 130, 133, 134, 135, 141, 156, 157, 158,
661 159, 160, 162, 164, 168, 169, 170, 172,
662 173, 174, 175, 176, 179, 183, 160, 255,
663 168, 255, 128, 129, 130, 134, 135, 141,
664 156, 157, 158, 159, 160, 162, 164, 168,
665 169, 170, 172, 173, 174, 175, 176, 179,
666 183, 168, 255, 192, 255, 159, 139, 187,
667 158, 159, 176, 255, 135, 138, 139, 187,
668 188, 255, 168, 255, 153, 154, 155, 160,
669 162, 163, 164, 165, 166, 167, 168, 169,
670 170, 171, 175, 177, 178, 179, 180, 181,
671 182, 184, 185, 186, 187, 188, 189, 191,
672 176, 190, 192, 255, 135, 147, 160, 188,
673 128, 156, 184, 129, 255, 128, 129, 130,
674 133, 134, 141, 156, 157, 158, 159, 160,
675 162, 164, 168, 169, 170, 172, 173, 174,
676 175, 176, 179, 183, 158, 159, 135, 255,
677 148, 176, 140, 168, 132, 160, 188, 152,
678 180, 144, 172, 136, 164, 192, 255, 129,
679 130, 131, 132, 133, 134, 136, 137, 138,
680 139, 140, 141, 143, 144, 145, 146, 147,
681 148, 150, 151, 152, 153, 154, 155, 157,
682 158, 159, 160, 161, 162, 164, 165, 166,
683 167, 168, 169, 171, 172, 173, 174, 175,
684 176, 178, 179, 180, 181, 182, 183, 185,
685 186, 187, 188, 189, 190, 128, 191, 129,
686 130, 131, 132, 133, 134, 136, 137, 138,
687 139, 140, 141, 143, 144, 145, 146, 147,
688 148, 150, 151, 152, 153, 154, 155, 157,
689 158, 159, 160, 161, 162, 164, 165, 166,
690 167, 168, 169, 171, 172, 173, 174, 175,
691 176, 178, 179, 180, 181, 182, 183, 185,
692 186, 187, 188, 189, 190, 128, 191, 129,
693 130, 131, 132, 133, 134, 136, 137, 138,
694 139, 140, 141, 143, 144, 145, 146, 147,
695 148, 150, 151, 152, 153, 154, 155, 157,
696 158, 159, 128, 156, 160, 255, 136, 164,
697 175, 176, 255, 128, 141, 143, 191, 128,
698 129, 152, 155, 156, 130, 191, 140, 141,
699 128, 138, 144, 167, 175, 191, 128, 159,
700 176, 191, 157, 128, 191, 185, 128, 191,
701 128, 137, 138, 141, 142, 191, 128, 191,
702 165, 177, 178, 179, 180, 181, 182, 184,
703 185, 186, 187, 188, 189, 191, 128, 175,
704 176, 190, 192, 255, 128, 159, 160, 188,
705 189, 191, 128, 156, 184, 129, 255, 148,
706 176, 140, 168, 132, 160, 188, 152, 180,
707 144, 172, 136, 164, 192, 255, 129, 130,
708 131, 132, 133, 134, 136, 137, 138, 139,
709 140, 141, 143, 144, 145, 146, 147, 148,
710 150, 151, 152, 153, 154, 155, 157, 158,
711 159, 160, 161, 162, 164, 165, 166, 167,
712 168, 169, 171, 172, 173, 174, 175, 176,
713 178, 179, 180, 181, 182, 183, 185, 186,
714 187, 188, 189, 190, 128, 191, 129, 130,
715 131, 132, 133, 134, 136, 137, 138, 139,
716 140, 141, 143, 144, 145, 146, 147, 148,
717 150, 151, 152, 153, 154, 155, 157, 158,
718 159, 160, 161, 162, 164, 165, 166, 167,
719 168, 169, 171, 172, 173, 174, 175, 176,
720 178, 179, 180, 181, 182, 183, 185, 186,
721 187, 188, 189, 190, 128, 191, 129, 130,
722 131, 132, 133, 134, 136, 137, 138, 139,
723 140, 141, 143, 144, 145, 146, 147, 148,
724 150, 151, 152, 153, 154, 155, 157, 158,
725 159, 128, 156, 160, 255, 136, 164, 175,
726 176, 255, 135, 138, 139, 187, 188, 191,
727 192, 255, 187, 191, 128, 190, 128, 190,
728 188, 128, 175, 190, 191, 145, 155, 157,
729 159, 128, 191, 130, 135, 128, 191, 189,
730 128, 191, 128, 129, 130, 131, 132, 191,
731 178, 128, 191, 128, 159, 164, 191, 133,
732 128, 191, 128, 178, 187, 191, 135, 142,
733 143, 145, 146, 149, 150, 153, 154, 155,
734 164, 128, 191, 128, 165, 166, 191, 144,
735 145, 150, 155, 157, 158, 159, 135, 166,
736 191, 133, 128, 191, 128, 130, 131, 132,
737 133, 137, 138, 139, 140, 191, 174, 188,
738 128, 129, 130, 131, 132, 133, 134, 144,
739 145, 165, 166, 169, 170, 175, 176, 184,
740 185, 191, 128, 132, 170, 129, 135, 136,
741 191, 181, 186, 128, 191, 144, 128, 148,
742 149, 150, 151, 191, 128, 132, 133, 135,
743 136, 138, 139, 143, 144, 191, 163, 128,
744 179, 180, 182, 183, 191, 128, 129, 191,
745 166, 176, 191, 128, 151, 152, 158, 159,
746 178, 179, 185, 186, 187, 188, 190, 128,
747 191, 160, 128, 191, 128, 129, 135, 132,
748 134, 128, 175, 157, 128, 191, 143, 128,
749 191, 163, 181, 128, 191, 162, 128, 191,
750 142, 128, 191, 132, 133, 134, 135, 160,
751 128, 191, 0, 127, 128, 255, 176, 255,
752 131, 137, 191, 145, 189, 135, 129, 130,
753 132, 133, 144, 154, 176, 139, 159, 150,
754 156, 159, 164, 167, 168, 170, 173, 145,
755 176, 255, 139, 255, 166, 176, 171, 179,
756 160, 161, 163, 164, 165, 167, 169, 171,
757 173, 174, 175, 176, 177, 179, 180, 181,
758 182, 183, 184, 185, 186, 187, 188, 189,
759 190, 191, 166, 170, 172, 178, 150, 153,
760 155, 163, 165, 167, 169, 173, 153, 155,
761 148, 161, 163, 255, 189, 132, 185, 144,
762 152, 161, 164, 255, 188, 129, 131, 190,
763 255, 133, 134, 137, 138, 142, 150, 152,
764 161, 164, 255, 131, 134, 137, 138, 142,
765 144, 146, 175, 178, 180, 182, 255, 134,
766 138, 142, 161, 164, 255, 188, 129, 131,
767 190, 191, 128, 132, 135, 136, 139, 141,
768 150, 151, 162, 163, 130, 190, 191, 151,
769 128, 130, 134, 136, 138, 141, 128, 131,
770 190, 255, 133, 137, 142, 148, 151, 161,
771 164, 255, 128, 132, 134, 136, 138, 141,
772 149, 150, 162, 163, 129, 131, 190, 255,
773 133, 137, 142, 150, 152, 161, 164, 255,
774 130, 131, 138, 150, 143, 148, 152, 159,
775 178, 179, 177, 179, 186, 135, 142, 177,
776 179, 185, 187, 188, 136, 141, 181, 183,
777 185, 152, 153, 190, 191, 177, 191, 128,
778 132, 134, 135, 141, 151, 153, 188, 134,
779 128, 129, 130, 141, 156, 157, 158, 159,
780 160, 162, 164, 168, 169, 170, 172, 173,
781 174, 175, 176, 179, 183, 173, 183, 185,
782 190, 150, 153, 158, 160, 177, 180, 130,
783 141, 157, 132, 134, 157, 159, 146, 148,
784 178, 180, 146, 147, 178, 179, 180, 255,
785 148, 156, 158, 255, 139, 141, 169, 133,
786 134, 160, 171, 176, 187, 151, 155, 160,
787 162, 191, 149, 158, 165, 188, 176, 190,
788 128, 132, 180, 255, 133, 170, 180, 255,
789 128, 130, 161, 173, 166, 179, 164, 183,
790 173, 144, 146, 148, 168, 178, 180, 184,
791 185, 128, 181, 187, 191, 128, 131, 179,
792 181, 183, 140, 141, 128, 131, 157, 179,
793 181, 183, 144, 176, 164, 175, 177, 191,
794 160, 191, 128, 130, 170, 175, 153, 154,
795 153, 154, 155, 160, 162, 163, 164, 165,
796 166, 167, 168, 169, 170, 171, 175, 175,
797 178, 180, 189, 158, 159, 176, 177, 130,
798 134, 139, 163, 167, 128, 129, 180, 255,
799 134, 159, 178, 255, 166, 173, 135, 147,
800 128, 131, 179, 255, 129, 164, 166, 255,
801 169, 182, 131, 188, 140, 141, 176, 178,
802 180, 183, 184, 190, 191, 129, 171, 175,
803 181, 182, 163, 170, 172, 173, 172, 184,
804 190, 158, 128, 143, 160, 175, 144, 145,
805 150, 155, 157, 158, 159, 135, 139, 141,
806 168, 171, 189, 160, 182, 186, 191, 129,
807 131, 133, 134, 140, 143, 184, 186, 165,
808 166, 128, 129, 130, 132, 133, 134, 135,
809 136, 139, 140, 141, 144, 145, 146, 147,
810 150, 151, 152, 153, 154, 156, 176, 178,
811 128, 130, 184, 255, 135, 190, 131, 175,
812 187, 255, 128, 130, 167, 180, 179, 128,
813 130, 179, 255, 129, 137, 141, 255, 190,
814 172, 183, 159, 170, 188, 128, 131, 190,
815 191, 151, 128, 132, 135, 136, 139, 141,
816 162, 163, 166, 172, 176, 180, 181, 191,
817 128, 134, 176, 255, 132, 255, 175, 181,
818 184, 255, 129, 155, 158, 255, 129, 255,
819 171, 183, 157, 171, 175, 182, 184, 191,
820 146, 167, 169, 182, 171, 172, 189, 190,
821 176, 180, 176, 182, 145, 190, 143, 146,
822 178, 157, 158, 133, 134, 137, 168, 169,
823 170, 165, 169, 173, 178, 187, 255, 131,
824 132, 140, 169, 174, 255, 130, 132, 128,
825 182, 187, 255, 173, 180, 182, 255, 132,
826 155, 159, 161, 175, 128, 163, 165, 128,
827 134, 136, 152, 155, 161, 163, 164, 166,
828 170, 144, 150, 132, 138, 145, 146, 151,
829 166, 169, 128, 255, 176, 255, 131, 137,
830 191, 145, 189, 135, 129, 130, 132, 133,
831 144, 154, 176, 139, 159, 150, 156, 159,
832 164, 167, 168, 170, 173, 145, 176, 255,
833 139, 255, 166, 176, 171, 179, 160, 161,
834 163, 164, 165, 166, 167, 169, 171, 172,
835 173, 174, 175, 176, 177, 178, 179, 180,
836 181, 182, 183, 184, 185, 186, 187, 188,
837 189, 190, 191, 168, 170, 150, 153, 155,
838 163, 165, 167, 169, 173, 153, 155, 148,
839 161, 163, 255, 131, 187, 189, 132, 185,
840 190, 255, 141, 144, 129, 136, 145, 151,
841 152, 161, 162, 163, 164, 255, 129, 188,
842 190, 130, 131, 191, 255, 141, 151, 129,
843 132, 133, 134, 137, 138, 142, 161, 162,
844 163, 164, 255, 131, 188, 129, 130, 190,
845 255, 145, 181, 129, 130, 131, 134, 135,
846 136, 137, 138, 139, 141, 142, 175, 176,
847 177, 178, 255, 134, 138, 141, 129, 136,
848 142, 161, 162, 163, 164, 255, 129, 188,
849 130, 131, 190, 191, 128, 141, 129, 132,
850 135, 136, 139, 140, 150, 151, 162, 163,
851 130, 190, 191, 128, 141, 151, 129, 130,
852 134, 136, 138, 140, 128, 129, 131, 190,
853 255, 133, 137, 129, 132, 142, 148, 151,
854 161, 164, 255, 129, 188, 190, 191, 130,
855 131, 130, 134, 128, 132, 135, 136, 138,
856 139, 140, 141, 149, 150, 162, 163, 129,
857 190, 130, 131, 191, 255, 133, 137, 141,
858 151, 129, 132, 142, 161, 162, 163, 164,
859 255, 138, 143, 150, 159, 144, 145, 146,
860 148, 152, 158, 178, 179, 177, 179, 180,
861 186, 135, 142, 177, 179, 180, 185, 187,
862 188, 136, 141, 181, 183, 185, 152, 153,
863 190, 191, 191, 177, 190, 128, 132, 134,
864 135, 141, 151, 153, 188, 134, 128, 129,
865 130, 141, 156, 157, 158, 159, 160, 162,
866 164, 168, 169, 170, 172, 173, 174, 175,
867 176, 179, 183, 177, 173, 183, 185, 186,
868 187, 188, 189, 190, 150, 151, 152, 153,
869 158, 160, 177, 180, 130, 132, 141, 157,
870 133, 134, 157, 159, 146, 148, 178, 180,
871 146, 147, 178, 179, 182, 180, 189, 190,
872 255, 134, 157, 137, 147, 148, 255, 139,
873 141, 169, 133, 134, 178, 160, 162, 163,
874 166, 167, 168, 169, 171, 176, 184, 185,
875 187, 155, 151, 152, 153, 154, 150, 160,
876 162, 191, 149, 151, 152, 158, 165, 172,
877 173, 178, 179, 188, 176, 190, 132, 181,
878 187, 128, 131, 180, 188, 189, 255, 130,
879 133, 170, 171, 179, 180, 255, 130, 161,
880 170, 128, 129, 162, 165, 166, 167, 168,
881 173, 167, 173, 166, 169, 170, 174, 175,
882 177, 178, 179, 164, 171, 172, 179, 180,
883 181, 182, 183, 161, 173, 180, 144, 146,
884 148, 168, 178, 179, 184, 185, 128, 181,
885 187, 191, 128, 131, 179, 181, 183, 140,
886 141, 144, 176, 175, 177, 191, 160, 191,
887 128, 130, 170, 175, 153, 154, 153, 154,
888 155, 160, 162, 163, 164, 165, 166, 167,
889 168, 169, 170, 171, 175, 175, 178, 180,
890 189, 158, 159, 176, 177, 130, 134, 139,
891 167, 163, 164, 165, 166, 132, 133, 134,
892 159, 160, 177, 178, 255, 166, 173, 135,
893 145, 146, 147, 131, 179, 188, 128, 130,
894 180, 181, 182, 185, 186, 255, 165, 129,
895 255, 169, 174, 175, 176, 177, 178, 179,
896 180, 181, 182, 131, 140, 141, 188, 176,
897 178, 180, 183, 184, 190, 191, 129, 171,
898 181, 182, 172, 173, 174, 175, 165, 168,
899 172, 173, 163, 170, 172, 184, 190, 158,
900 128, 143, 160, 175, 144, 145, 150, 155,
901 157, 158, 159, 135, 139, 141, 168, 171,
902 189, 160, 182, 186, 191, 129, 131, 133,
903 134, 140, 143, 184, 186, 165, 166, 128,
904 129, 130, 132, 133, 134, 135, 136, 139,
905 140, 141, 144, 145, 146, 147, 150, 151,
906 152, 153, 154, 156, 176, 178, 129, 128,
907 130, 184, 255, 135, 190, 130, 131, 175,
908 176, 178, 183, 184, 187, 255, 172, 128,
909 130, 167, 180, 179, 130, 128, 129, 179,
910 181, 182, 190, 191, 255, 129, 137, 138,
911 140, 141, 255, 180, 190, 172, 174, 175,
912 177, 178, 181, 182, 183, 159, 160, 162,
913 163, 170, 188, 190, 191, 128, 129, 130,
914 131, 128, 151, 129, 132, 135, 136, 139,
915 141, 162, 163, 166, 172, 176, 180, 181,
916 183, 184, 191, 133, 128, 129, 130, 134,
917 176, 185, 189, 177, 178, 179, 186, 187,
918 190, 191, 255, 129, 132, 255, 175, 190,
919 176, 177, 178, 181, 184, 187, 188, 255,
920 129, 155, 158, 255, 189, 176, 178, 179,
921 186, 187, 190, 191, 255, 129, 255, 172,
922 182, 171, 173, 174, 175, 176, 183, 166,
923 157, 159, 160, 161, 162, 171, 175, 190,
924 176, 182, 184, 191, 169, 177, 180, 146,
925 167, 170, 182, 171, 172, 189, 190, 176,
926 180, 176, 182, 143, 146, 178, 157, 158,
927 133, 134, 137, 168, 169, 170, 166, 173,
928 165, 169, 174, 178, 187, 255, 131, 132,
929 140, 169, 174, 255, 130, 132, 128, 182,
930 187, 255, 173, 180, 182, 255, 132, 155,
931 159, 161, 175, 128, 163, 165, 128, 134,
932 136, 152, 155, 161, 163, 164, 166, 170,
933 144, 150, 132, 138, 143, 187, 191, 160,
934 128, 129, 132, 135, 133, 134, 160, 255,
935 192, 255, 139, 168, 160, 128, 129, 132,
936 135, 133, 134, 160, 255, 192, 255, 144,
937 145, 150, 155, 157, 158, 128, 129, 130,
938 132, 133, 134, 141, 156, 157, 158, 159,
939 160, 162, 164, 168, 169, 170, 172, 173,
940 174, 175, 176, 179, 183, 160, 255, 128,
941 129, 130, 133, 134, 135, 141, 156, 157,
942 158, 159, 160, 162, 164, 168, 169, 170,
943 172, 173, 174, 175, 176, 179, 183, 160,
944 255, 168, 255, 128, 129, 130, 134, 135,
945 141, 156, 157, 158, 159, 160, 162, 164,
946 168, 169, 170, 172, 173, 174, 175, 176,
947 179, 183, 168, 255, 192, 255, 159, 139,
948 187, 158, 159, 176, 255, 135, 138, 139,
949 187, 188, 255, 168, 255, 153, 154, 155,
950 160, 162, 163, 164, 165, 166, 167, 168,
951 169, 170, 171, 175, 177, 178, 179, 180,
952 181, 182, 184, 185, 186, 187, 188, 189,
953 191, 176, 190, 192, 255, 135, 147, 160,
954 188, 128, 156, 184, 129, 255, 128, 129,
955 130, 133, 134, 141, 156, 157, 158, 159,
956 160, 162, 164, 168, 169, 170, 172, 173,
957 174, 175, 176, 179, 183, 158, 159, 135,
958 255, 148, 176, 140, 168, 132, 160, 188,
959 152, 180, 144, 172, 136, 164, 192, 255,
960 129, 130, 131, 132, 133, 134, 136, 137,
961 138, 139, 140, 141, 143, 144, 145, 146,
962 147, 148, 150, 151, 152, 153, 154, 155,
963 157, 158, 159, 160, 161, 162, 164, 165,
964 166, 167, 168, 169, 171, 172, 173, 174,
965 175, 176, 178, 179, 180, 181, 182, 183,
966 185, 186, 187, 188, 189, 190, 128, 191,
967 129, 130, 131, 132, 133, 134, 136, 137,
968 138, 139, 140, 141, 143, 144, 145, 146,
969 147, 148, 150, 151, 152, 153, 154, 155,
970 157, 158, 159, 160, 161, 162, 164, 165,
971 166, 167, 168, 169, 171, 172, 173, 174,
972 175, 176, 178, 179, 180, 181, 182, 183,
973 185, 186, 187, 188, 189, 190, 128, 191,
974 129, 130, 131, 132, 133, 134, 136, 137,
975 138, 139, 140, 141, 143, 144, 145, 146,
976 147, 148, 150, 151, 152, 153, 154, 155,
977 157, 158, 159, 128, 156, 160, 255, 136,
978 164, 175, 176, 255, 142, 128, 191, 128,
979 129, 152, 155, 156, 130, 191, 139, 141,
980 128, 140, 142, 143, 144, 167, 168, 174,
981 175, 191, 128, 255, 176, 255, 131, 137,
982 191, 145, 189, 135, 129, 130, 132, 133,
983 144, 154, 176, 139, 159, 150, 156, 159,
984 164, 167, 168, 170, 173, 145, 176, 255,
985 139, 255, 166, 176, 171, 179, 160, 161,
986 163, 164, 165, 167, 169, 171, 173, 174,
987 175, 176, 177, 179, 180, 181, 182, 183,
988 184, 185, 186, 187, 188, 189, 190, 191,
989 166, 170, 172, 178, 150, 153, 155, 163,
990 165, 167, 169, 173, 153, 155, 148, 161,
991 163, 255, 189, 132, 185, 144, 152, 161,
992 164, 255, 188, 129, 131, 190, 255, 133,
993 134, 137, 138, 142, 150, 152, 161, 164,
994 255, 131, 134, 137, 138, 142, 144, 146,
995 175, 178, 180, 182, 255, 134, 138, 142,
996 161, 164, 255, 188, 129, 131, 190, 191,
997 128, 132, 135, 136, 139, 141, 150, 151,
998 162, 163, 130, 190, 191, 151, 128, 130,
999 134, 136, 138, 141, 128, 131, 190, 255,
1000 133, 137, 142, 148, 151, 161, 164, 255,
1001 128, 132, 134, 136, 138, 141, 149, 150,
1002 162, 163, 129, 131, 190, 255, 133, 137,
1003 142, 150, 152, 161, 164, 255, 130, 131,
1004 138, 150, 143, 148, 152, 159, 178, 179,
1005 177, 179, 186, 135, 142, 177, 179, 185,
1006 187, 188, 136, 141, 181, 183, 185, 152,
1007 153, 190, 191, 177, 191, 128, 132, 134,
1008 135, 141, 151, 153, 188, 134, 128, 129,
1009 130, 141, 156, 157, 158, 159, 160, 162,
1010 164, 168, 169, 170, 172, 173, 174, 175,
1011 176, 179, 183, 173, 183, 185, 190, 150,
1012 153, 158, 160, 177, 180, 130, 141, 157,
1013 132, 134, 157, 159, 146, 148, 178, 180,
1014 146, 147, 178, 179, 180, 255, 148, 156,
1015 158, 255, 139, 141, 169, 133, 134, 160,
1016 171, 176, 187, 151, 155, 160, 162, 191,
1017 149, 158, 165, 188, 176, 190, 128, 132,
1018 180, 255, 133, 170, 180, 255, 128, 130,
1019 161, 173, 166, 179, 164, 183, 173, 144,
1020 146, 148, 168, 178, 180, 184, 185, 128,
1021 181, 187, 191, 128, 131, 179, 181, 183,
1022 140, 141, 144, 176, 175, 177, 191, 160,
1023 191, 128, 130, 170, 175, 153, 154, 153,
1024 154, 155, 160, 162, 163, 164, 165, 166,
1025 167, 168, 169, 170, 171, 175, 175, 178,
1026 180, 189, 158, 159, 176, 177, 130, 134,
1027 139, 163, 167, 128, 129, 180, 255, 134,
1028 159, 178, 255, 166, 173, 135, 147, 128,
1029 131, 179, 255, 129, 164, 166, 255, 169,
1030 182, 131, 188, 140, 141, 176, 178, 180,
1031 183, 184, 190, 191, 129, 171, 175, 181,
1032 182, 163, 170, 172, 173, 172, 184, 190,
1033 158, 128, 143, 160, 175, 144, 145, 150,
1034 155, 157, 158, 135, 139, 141, 168, 171,
1035 189, 160, 182, 186, 191, 129, 131, 133,
1036 134, 140, 143, 184, 186, 165, 166, 128,
1037 129, 130, 132, 133, 134, 135, 136, 139,
1038 140, 141, 144, 145, 146, 147, 150, 151,
1039 152, 153, 154, 156, 176, 178, 128, 130,
1040 184, 255, 135, 190, 131, 175, 187, 255,
1041 128, 130, 167, 180, 179, 128, 130, 179,
1042 255, 129, 137, 141, 255, 190, 172, 183,
1043 159, 170, 188, 128, 131, 190, 191, 151,
1044 128, 132, 135, 136, 139, 141, 162, 163,
1045 166, 172, 176, 180, 181, 191, 128, 134,
1046 176, 255, 132, 255, 175, 181, 184, 255,
1047 129, 155, 158, 255, 129, 255, 171, 183,
1048 157, 171, 175, 182, 184, 191, 146, 167,
1049 169, 182, 171, 172, 189, 190, 176, 180,
1050 176, 182, 145, 190, 143, 146, 178, 157,
1051 158, 133, 134, 137, 168, 169, 170, 165,
1052 169, 173, 178, 187, 255, 131, 132, 140,
1053 169, 174, 255, 130, 132, 128, 182, 187,
1054 255, 173, 180, 182, 255, 132, 155, 159,
1055 161, 175, 128, 163, 165, 128, 134, 136,
1056 152, 155, 161, 163, 164, 166, 170, 144,
1057 150, 132, 138, 160, 128, 129, 132, 135,
1058 133, 134, 160, 255, 192, 255, 128, 131,
1059 157, 179, 181, 183, 164, 144, 145, 150,
1060 155, 157, 158, 159, 145, 146, 151, 166,
1061 169, 128, 255, 176, 255, 131, 137, 191,
1062 145, 189, 135, 129, 130, 132, 133, 144,
1063 154, 176, 139, 159, 150, 156, 159, 164,
1064 167, 168, 170, 173, 145, 176, 255, 139,
1065 255, 166, 176, 171, 179, 160, 161, 163,
1066 164, 165, 166, 167, 169, 171, 172, 173,
1067 174, 175, 176, 177, 178, 179, 180, 181,
1068 182, 183, 184, 185, 186, 187, 188, 189,
1069 190, 191, 168, 170, 150, 153, 155, 163,
1070 165, 167, 169, 173, 153, 155, 148, 161,
1071 163, 255, 131, 187, 189, 132, 185, 190,
1072 255, 141, 144, 129, 136, 145, 151, 152,
1073 161, 162, 163, 164, 255, 129, 188, 190,
1074 130, 131, 191, 255, 141, 151, 129, 132,
1075 133, 134, 137, 138, 142, 161, 162, 163,
1076 164, 255, 131, 188, 129, 130, 190, 255,
1077 145, 181, 129, 130, 131, 134, 135, 136,
1078 137, 138, 139, 141, 142, 175, 176, 177,
1079 178, 255, 134, 138, 141, 129, 136, 142,
1080 161, 162, 163, 164, 255, 129, 188, 130,
1081 131, 190, 191, 128, 141, 129, 132, 135,
1082 136, 139, 140, 150, 151, 162, 163, 130,
1083 190, 191, 128, 141, 151, 129, 130, 134,
1084 136, 138, 140, 128, 129, 131, 190, 255,
1085 133, 137, 129, 132, 142, 148, 151, 161,
1086 164, 255, 129, 188, 190, 191, 130, 131,
1087 130, 134, 128, 132, 135, 136, 138, 139,
1088 140, 141, 149, 150, 162, 163, 129, 190,
1089 130, 131, 191, 255, 133, 137, 141, 151,
1090 129, 132, 142, 161, 162, 163, 164, 255,
1091 138, 143, 150, 159, 144, 145, 146, 148,
1092 152, 158, 178, 179, 177, 179, 180, 186,
1093 135, 142, 177, 179, 180, 185, 187, 188,
1094 136, 141, 181, 183, 185, 152, 153, 190,
1095 191, 191, 177, 190, 128, 132, 134, 135,
1096 141, 151, 153, 188, 134, 128, 129, 130,
1097 141, 156, 157, 158, 159, 160, 162, 164,
1098 168, 169, 170, 172, 173, 174, 175, 176,
1099 179, 183, 177, 173, 183, 185, 186, 187,
1100 188, 189, 190, 150, 151, 152, 153, 158,
1101 160, 177, 180, 130, 132, 141, 157, 133,
1102 134, 157, 159, 146, 148, 178, 180, 146,
1103 147, 178, 179, 182, 180, 189, 190, 255,
1104 134, 157, 137, 147, 148, 255, 139, 141,
1105 169, 133, 134, 178, 160, 162, 163, 166,
1106 167, 168, 169, 171, 176, 184, 185, 187,
1107 155, 151, 152, 153, 154, 150, 160, 162,
1108 191, 149, 151, 152, 158, 165, 172, 173,
1109 178, 179, 188, 176, 190, 132, 181, 187,
1110 128, 131, 180, 188, 189, 255, 130, 133,
1111 170, 171, 179, 180, 255, 130, 161, 170,
1112 128, 129, 162, 165, 166, 167, 168, 173,
1113 167, 173, 166, 169, 170, 174, 175, 177,
1114 178, 179, 164, 171, 172, 179, 180, 181,
1115 182, 183, 161, 173, 180, 144, 146, 148,
1116 168, 178, 179, 184, 185, 128, 181, 187,
1117 191, 128, 131, 179, 181, 183, 140, 141,
1118 144, 176, 175, 177, 191, 160, 191, 128,
1119 130, 170, 175, 153, 154, 153, 154, 155,
1120 160, 162, 163, 164, 165, 166, 167, 168,
1121 169, 170, 171, 175, 175, 178, 180, 189,
1122 158, 159, 176, 177, 130, 134, 139, 167,
1123 163, 164, 165, 166, 132, 133, 134, 159,
1124 160, 177, 178, 255, 166, 173, 135, 145,
1125 146, 147, 131, 179, 188, 128, 130, 180,
1126 181, 182, 185, 186, 255, 165, 129, 255,
1127 169, 174, 175, 176, 177, 178, 179, 180,
1128 181, 182, 131, 140, 141, 188, 176, 178,
1129 180, 183, 184, 190, 191, 129, 171, 181,
1130 182, 172, 173, 174, 175, 165, 168, 172,
1131 173, 163, 170, 172, 184, 190, 158, 128,
1132 143, 160, 175, 144, 145, 150, 155, 157,
1133 158, 159, 135, 139, 141, 168, 171, 189,
1134 160, 182, 186, 191, 129, 131, 133, 134,
1135 140, 143, 184, 186, 165, 166, 128, 129,
1136 130, 132, 133, 134, 135, 136, 139, 140,
1137 141, 144, 145, 146, 147, 150, 151, 152,
1138 153, 154, 156, 176, 178, 129, 128, 130,
1139 184, 255, 135, 190, 130, 131, 175, 176,
1140 178, 183, 184, 187, 255, 172, 128, 130,
1141 167, 180, 179, 130, 128, 129, 179, 181,
1142 182, 190, 191, 255, 129, 137, 138, 140,
1143 141, 255, 180, 190, 172, 174, 175, 177,
1144 178, 181, 182, 183, 159, 160, 162, 163,
1145 170, 188, 190, 191, 128, 129, 130, 131,
1146 128, 151, 129, 132, 135, 136, 139, 141,
1147 162, 163, 166, 172, 176, 180, 181, 183,
1148 184, 191, 133, 128, 129, 130, 134, 176,
1149 185, 189, 177, 178, 179, 186, 187, 190,
1150 191, 255, 129, 132, 255, 175, 190, 176,
1151 177, 178, 181, 184, 187, 188, 255, 129,
1152 155, 158, 255, 189, 176, 178, 179, 186,
1153 187, 190, 191, 255, 129, 255, 172, 182,
1154 171, 173, 174, 175, 176, 183, 166, 157,
1155 159, 160, 161, 162, 171, 175, 190, 176,
1156 182, 184, 191, 169, 177, 180, 146, 167,
1157 170, 182, 171, 172, 189, 190, 176, 180,
1158 176, 182, 143, 146, 178, 157, 158, 133,
1159 134, 137, 168, 169, 170, 166, 173, 165,
1160 169, 174, 178, 187, 255, 131, 132, 140,
1161 169, 174, 255, 130, 132, 128, 182, 187,
1162 255, 173, 180, 182, 255, 132, 155, 159,
1163 161, 175, 128, 163, 165, 128, 134, 136,
1164 152, 155, 161, 163, 164, 166, 170, 144,
1165 150, 132, 138, 143, 187, 191, 160, 128,
1166 129, 132, 135, 133, 134, 160, 255, 192,
1167 255, 139, 168, 128, 159, 160, 175, 176,
1168 191, 157, 128, 191, 128, 255, 176, 255,
1169 131, 137, 191, 145, 189, 135, 129, 130,
1170 132, 133, 144, 154, 176, 139, 159, 150,
1171 156, 159, 164, 167, 168, 170, 173, 145,
1172 176, 255, 139, 255, 166, 176, 171, 179,
1173 160, 161, 163, 164, 165, 166, 167, 169,
1174 171, 172, 173, 174, 175, 176, 177, 178,
1175 179, 180, 181, 182, 183, 184, 185, 186,
1176 187, 188, 189, 190, 191, 168, 170, 150,
1177 153, 155, 163, 165, 167, 169, 173, 153,
1178 155, 148, 161, 163, 255, 131, 187, 189,
1179 132, 185, 190, 255, 128, 255, 176, 255,
1180 131, 137, 191, 145, 189, 135, 129, 130,
1181 132, 133, 144, 154, 176, 139, 159, 150,
1182 156, 159, 164, 167, 168, 170, 173, 145,
1183 176, 255, 139, 255, 166, 176, 171, 179,
1184 160, 161, 163, 164, 165, 167, 169, 171,
1185 173, 174, 175, 176, 177, 179, 180, 181,
1186 182, 183, 184, 185, 186, 187, 188, 189,
1187 190, 191, 166, 170, 172, 178, 150, 153,
1188 155, 163, 165, 167, 169, 173, 153, 155,
1189 148, 161, 163, 255, 189, 132, 185, 144,
1190 152, 161, 164, 255, 188, 129, 131, 190,
1191 255, 133, 134, 137, 138, 142, 150, 152,
1192 161, 164, 255, 131, 134, 137, 138, 142,
1193 144, 146, 175, 178, 180, 182, 255, 134,
1194 138, 142, 161, 164, 255, 188, 129, 131,
1195 190, 191, 128, 132, 135, 136, 139, 141,
1196 150, 151, 162, 163, 130, 190, 191, 151,
1197 128, 130, 134, 136, 138, 141, 128, 131,
1198 190, 255, 133, 137, 142, 148, 151, 161,
1199 164, 255, 128, 132, 134, 136, 138, 141,
1200 149, 150, 162, 163, 129, 131, 190, 255,
1201 133, 137, 142, 150, 152, 161, 164, 255,
1202 130, 131, 138, 150, 143, 148, 152, 159,
1203 178, 179, 177, 179, 186, 135, 142, 177,
1204 179, 185, 187, 188, 136, 141, 181, 183,
1205 185, 152, 153, 190, 191, 177, 191, 128,
1206 132, 134, 135, 141, 151, 153, 188, 134,
1207 128, 129, 130, 141, 156, 157, 158, 159,
1208 160, 162, 164, 168, 169, 170, 172, 173,
1209 174, 175, 176, 179, 183, 173, 183, 185,
1210 190, 150, 153, 158, 160, 177, 180, 130,
1211 141, 157, 132, 134, 157, 159, 146, 148,
1212 178, 180, 146, 147, 178, 179, 180, 255,
1213 148, 156, 158, 255, 139, 141, 169, 133,
1214 134, 160, 171, 176, 187, 151, 155, 160,
1215 162, 191, 149, 158, 165, 188, 176, 190,
1216 128, 132, 180, 255, 133, 170, 180, 255,
1217 128, 130, 161, 173, 166, 179, 164, 183,
1218 173, 144, 146, 148, 168, 178, 180, 184,
1219 185, 128, 181, 187, 191, 128, 131, 179,
1220 181, 183, 140, 141, 128, 131, 157, 179,
1221 181, 183, 144, 176, 164, 175, 177, 191,
1222 160, 191, 128, 130, 170, 175, 153, 154,
1223 153, 154, 155, 160, 162, 163, 164, 165,
1224 166, 167, 168, 169, 170, 171, 175, 175,
1225 178, 180, 189, 158, 159, 176, 177, 130,
1226 134, 139, 163, 167, 128, 129, 180, 255,
1227 134, 159, 178, 255, 166, 173, 135, 147,
1228 128, 131, 179, 255, 129, 164, 166, 255,
1229 169, 182, 131, 188, 140, 141, 176, 178,
1230 180, 183, 184, 190, 191, 129, 171, 175,
1231 181, 182, 163, 170, 172, 173, 172, 184,
1232 190, 158, 128, 143, 160, 175, 144, 145,
1233 150, 155, 157, 158, 159, 135, 139, 141,
1234 168, 171, 189, 160, 182, 186, 191, 129,
1235 131, 133, 134, 140, 143, 184, 186, 165,
1236 166, 128, 129, 130, 132, 133, 134, 135,
1237 136, 139, 140, 141, 144, 145, 146, 147,
1238 150, 151, 152, 153, 154, 156, 176, 178,
1239 128, 130, 184, 255, 135, 190, 131, 175,
1240 187, 255, 128, 130, 167, 180, 179, 128,
1241 130, 179, 255, 129, 137, 141, 255, 190,
1242 172, 183, 159, 170, 188, 128, 131, 190,
1243 191, 151, 128, 132, 135, 136, 139, 141,
1244 162, 163, 166, 172, 176, 180, 181, 191,
1245 128, 134, 176, 255, 132, 255, 175, 181,
1246 184, 255, 129, 155, 158, 255, 129, 255,
1247 171, 183, 157, 171, 175, 182, 184, 191,
1248 146, 167, 169, 182, 171, 172, 189, 190,
1249 176, 180, 176, 182, 145, 190, 143, 146,
1250 178, 157, 158, 133, 134, 137, 168, 169,
1251 170, 165, 169, 173, 178, 187, 255, 131,
1252 132, 140, 169, 174, 255, 130, 132, 128,
1253 182, 187, 255, 173, 180, 182, 255, 132,
1254 155, 159, 161, 175, 128, 163, 165, 128,
1255 134, 136, 152, 155, 161, 163, 164, 166,
1256 170, 144, 150, 132, 138, 145, 146, 151,
1257 166, 169, 139, 168, 160, 128, 129, 132,
1258 135, 133, 134, 160, 255, 192, 255, 144,
1259 145, 150, 155, 157, 158, 141, 144, 129,
1260 136, 145, 151, 152, 161, 162, 163, 164,
1261 255, 129, 188, 190, 130, 131, 191, 255,
1262 141, 151, 129, 132, 133, 134, 137, 138,
1263 142, 161, 162, 163, 164, 255, 131, 188,
1264 129, 130, 190, 255, 145, 181, 129, 130,
1265 131, 134, 135, 136, 137, 138, 139, 141,
1266 142, 175, 176, 177, 178, 255, 134, 138,
1267 141, 129, 136, 142, 161, 162, 163, 164,
1268 255, 129, 188, 130, 131, 190, 191, 128,
1269 141, 129, 132, 135, 136, 139, 140, 150,
1270 151, 162, 163, 130, 190, 191, 128, 141,
1271 151, 129, 130, 134, 136, 138, 140, 128,
1272 129, 131, 190, 255, 133, 137, 129, 132,
1273 142, 148, 151, 161, 164, 255, 129, 188,
1274 190, 191, 130, 131, 130, 134, 128, 132,
1275 135, 136, 138, 139, 140, 141, 149, 150,
1276 162, 163, 129, 190, 130, 131, 191, 255,
1277 133, 137, 141, 151, 129, 132, 142, 161,
1278 162, 163, 164, 255, 138, 143, 150, 159,
1279 144, 145, 146, 148, 152, 158, 178, 179,
1280 177, 179, 180, 186, 135, 142, 177, 179,
1281 180, 185, 187, 188, 136, 141, 181, 183,
1282 185, 152, 153, 190, 191, 191, 177, 190,
1283 128, 132, 134, 135, 141, 151, 153, 188,
1284 134, 128, 129, 130, 141, 156, 157, 158,
1285 159, 160, 162, 164, 168, 169, 170, 172,
1286 173, 174, 175, 176, 179, 183, 177, 173,
1287 183, 185, 186, 187, 188, 189, 190, 150,
1288 151, 152, 153, 158, 160, 177, 180, 130,
1289 132, 141, 157, 133, 134, 157, 159, 146,
1290 148, 178, 180, 146, 147, 178, 179, 182,
1291 180, 189, 190, 255, 134, 157, 137, 147,
1292 148, 255, 139, 141, 169, 133, 134, 178,
1293 160, 162, 163, 166, 167, 168, 169, 171,
1294 176, 184, 185, 187, 155, 151, 152, 153,
1295 154, 150, 160, 162, 191, 149, 151, 152,
1296 158, 165, 172, 173, 178, 179, 188, 176,
1297 190, 132, 181, 187, 128, 131, 180, 188,
1298 189, 255, 130, 133, 170, 171, 179, 180,
1299 255, 130, 161, 170, 128, 129, 162, 165,
1300 166, 167, 168, 173, 167, 173, 166, 169,
1301 170, 174, 175, 177, 178, 179, 164, 171,
1302 172, 179, 180, 181, 182, 183, 161, 173,
1303 180, 144, 146, 148, 168, 178, 179, 184,
1304 185, 128, 181, 187, 191, 128, 131, 179,
1305 181, 183, 140, 141, 144, 176, 175, 177,
1306 191, 160, 191, 128, 130, 170, 175, 153,
1307 154, 153, 154, 155, 160, 162, 163, 164,
1308 165, 166, 167, 168, 169, 170, 171, 175,
1309 175, 178, 180, 189, 158, 159, 176, 177,
1310 130, 134, 139, 167, 163, 164, 165, 166,
1311 132, 133, 134, 159, 160, 177, 178, 255,
1312 166, 173, 135, 145, 146, 147, 131, 179,
1313 188, 128, 130, 180, 181, 182, 185, 186,
1314 255, 165, 129, 255, 169, 174, 175, 176,
1315 177, 178, 179, 180, 181, 182, 131, 140,
1316 141, 188, 176, 178, 180, 183, 184, 190,
1317 191, 129, 171, 181, 182, 172, 173, 174,
1318 175, 165, 168, 172, 173, 163, 170, 172,
1319 184, 190, 158, 128, 143, 160, 175, 144,
1320 145, 150, 155, 157, 158, 159, 135, 139,
1321 141, 168, 171, 189, 160, 182, 186, 191,
1322 129, 131, 133, 134, 140, 143, 184, 186,
1323 165, 166, 128, 129, 130, 132, 133, 134,
1324 135, 136, 139, 140, 141, 144, 145, 146,
1325 147, 150, 151, 152, 153, 154, 156, 176,
1326 178, 129, 128, 130, 184, 255, 135, 190,
1327 130, 131, 175, 176, 178, 183, 184, 187,
1328 255, 172, 128, 130, 167, 180, 179, 130,
1329 128, 129, 179, 181, 182, 190, 191, 255,
1330 129, 137, 138, 140, 141, 255, 180, 190,
1331 172, 174, 175, 177, 178, 181, 182, 183,
1332 159, 160, 162, 163, 170, 188, 190, 191,
1333 128, 129, 130, 131, 128, 151, 129, 132,
1334 135, 136, 139, 141, 162, 163, 166, 172,
1335 176, 180, 181, 183, 184, 191, 133, 128,
1336 129, 130, 134, 176, 185, 189, 177, 178,
1337 179, 186, 187, 190, 191, 255, 129, 132,
1338 255, 175, 190, 176, 177, 178, 181, 184,
1339 187, 188, 255, 129, 155, 158, 255, 189,
1340 176, 178, 179, 186, 187, 190, 191, 255,
1341 129, 255, 172, 182, 171, 173, 174, 175,
1342 176, 183, 166, 157, 159, 160, 161, 162,
1343 171, 175, 190, 176, 182, 184, 191, 169,
1344 177, 180, 146, 167, 170, 182, 171, 172,
1345 189, 190, 176, 180, 176, 182, 143, 146,
1346 178, 157, 158, 133, 134, 137, 168, 169,
1347 170, 166, 173, 165, 169, 174, 178, 187,
1348 255, 131, 132, 140, 169, 174, 255, 130,
1349 132, 128, 182, 187, 255, 173, 180, 182,
1350 255, 132, 155, 159, 161, 175, 128, 163,
1351 165, 128, 134, 136, 152, 155, 161, 163,
1352 164, 166, 170, 144, 150, 132, 138, 143,
1353 187, 191, 160, 128, 129, 132, 135, 133,
1354 134, 160, 255, 192, 255, 185, 128, 191,
1355 128, 137, 138, 141, 142, 191, 128, 191,
1356 165, 177, 178, 179, 180, 181, 182, 184,
1357 185, 186, 187, 188, 189, 191, 128, 175,
1358 176, 190, 192, 255, 128, 159, 160, 188,
1359 189, 191, 128, 156, 184, 129, 255, 148,
1360 176, 140, 168, 132, 160, 188, 152, 180,
1361 144, 172, 136, 164, 192, 255, 129, 130,
1362 131, 132, 133, 134, 136, 137, 138, 139,
1363 140, 141, 143, 144, 145, 146, 147, 148,
1364 150, 151, 152, 153, 154, 155, 157, 158,
1365 159, 160, 161, 162, 164, 165, 166, 167,
1366 168, 169, 171, 172, 173, 174, 175, 176,
1367 178, 179, 180, 181, 182, 183, 185, 186,
1368 187, 188, 189, 190, 128, 191, 129, 130,
1369 131, 132, 133, 134, 136, 137, 138, 139,
1370 140, 141, 143, 144, 145, 146, 147, 148,
1371 150, 151, 152, 153, 154, 155, 157, 158,
1372 159, 160, 161, 162, 164, 165, 166, 167,
1373 168, 169, 171, 172, 173, 174, 175, 176,
1374 178, 179, 180, 181, 182, 183, 185, 186,
1375 187, 188, 189, 190, 128, 191, 129, 130,
1376 131, 132, 133, 134, 136, 137, 138, 139,
1377 140, 141, 143, 144, 145, 146, 147, 148,
1378 150, 151, 152, 153, 154, 155, 157, 158,
1379 159, 160, 191, 128, 156, 161, 190, 192,
1380 255, 136, 164, 175, 176, 255, 135, 138,
1381 139, 187, 188, 191, 192, 255, 0, 127,
1382 192, 255, 187, 191, 128, 190, 191, 128,
1383 190, 188, 128, 175, 176, 189, 190, 191,
1384 145, 155, 157, 159, 128, 191, 130, 135,
1385 128, 191, 189, 128, 191, 128, 129, 130,
1386 131, 132, 191, 178, 128, 191, 128, 159,
1387 160, 163, 164, 191, 133, 128, 191, 128,
1388 178, 179, 186, 187, 191, 135, 142, 143,
1389 145, 146, 149, 150, 153, 154, 155, 164,
1390 128, 191, 128, 165, 166, 191, 128, 255,
1391 176, 255, 131, 137, 191, 145, 189, 135,
1392 129, 130, 132, 133, 144, 154, 176, 139,
1393 159, 150, 156, 159, 164, 167, 168, 170,
1394 173, 145, 176, 255, 139, 255, 166, 176,
1395 171, 179, 160, 161, 163, 164, 165, 167,
1396 169, 171, 173, 174, 175, 176, 177, 179,
1397 180, 181, 182, 183, 184, 185, 186, 187,
1398 188, 189, 190, 191, 166, 170, 172, 178,
1399 150, 153, 155, 163, 165, 167, 169, 173,
1400 153, 155, 148, 161, 163, 255, 189, 132,
1401 185, 144, 152, 161, 164, 255, 188, 129,
1402 131, 190, 255, 133, 134, 137, 138, 142,
1403 150, 152, 161, 164, 255, 131, 134, 137,
1404 138, 142, 144, 146, 175, 178, 180, 182,
1405 255, 134, 138, 142, 161, 164, 255, 188,
1406 129, 131, 190, 191, 128, 132, 135, 136,
1407 139, 141, 150, 151, 162, 163, 130, 190,
1408 191, 151, 128, 130, 134, 136, 138, 141,
1409 128, 131, 190, 255, 133, 137, 142, 148,
1410 151, 161, 164, 255, 128, 132, 134, 136,
1411 138, 141, 149, 150, 162, 163, 129, 131,
1412 190, 255, 133, 137, 142, 150, 152, 161,
1413 164, 255, 130, 131, 138, 150, 143, 148,
1414 152, 159, 178, 179, 177, 179, 186, 135,
1415 142, 177, 179, 185, 187, 188, 136, 141,
1416 181, 183, 185, 152, 153, 190, 191, 177,
1417 191, 128, 132, 134, 135, 141, 151, 153,
1418 188, 134, 128, 129, 130, 141, 156, 157,
1419 158, 159, 160, 162, 164, 168, 169, 170,
1420 172, 173, 174, 175, 176, 179, 183, 173,
1421 183, 185, 190, 150, 153, 158, 160, 177,
1422 180, 130, 141, 157, 132, 134, 157, 159,
1423 146, 148, 178, 180, 146, 147, 178, 179,
1424 180, 255, 148, 156, 158, 255, 139, 141,
1425 169, 133, 134, 160, 171, 176, 187, 151,
1426 155, 160, 162, 191, 149, 158, 165, 188,
1427 176, 190, 128, 132, 180, 255, 133, 170,
1428 180, 255, 128, 130, 161, 173, 166, 179,
1429 164, 183, 173, 144, 146, 148, 168, 178,
1430 180, 184, 185, 128, 181, 187, 191, 128,
1431 131, 179, 181, 183, 140, 141, 128, 131,
1432 157, 179, 181, 183, 144, 176, 164, 175,
1433 177, 191, 160, 191, 128, 130, 170, 175,
1434 153, 154, 153, 154, 155, 160, 162, 163,
1435 164, 165, 166, 167, 168, 169, 170, 171,
1436 175, 175, 178, 180, 189, 158, 159, 176,
1437 177, 130, 134, 139, 163, 167, 128, 129,
1438 180, 255, 134, 159, 178, 255, 166, 173,
1439 135, 147, 128, 131, 179, 255, 129, 164,
1440 166, 255, 169, 182, 131, 188, 140, 141,
1441 176, 178, 180, 183, 184, 190, 191, 129,
1442 171, 175, 181, 182, 163, 170, 172, 173,
1443 172, 184, 190, 158, 128, 143, 160, 175,
1444 144, 145, 150, 155, 157, 158, 159, 135,
1445 139, 141, 168, 171, 189, 160, 182, 186,
1446 191, 129, 131, 133, 134, 140, 143, 184,
1447 186, 165, 166, 128, 129, 130, 132, 133,
1448 134, 135, 136, 139, 140, 141, 144, 145,
1449 146, 147, 150, 151, 152, 153, 154, 156,
1450 176, 178, 128, 130, 184, 255, 135, 190,
1451 131, 175, 187, 255, 128, 130, 167, 180,
1452 179, 128, 130, 179, 255, 129, 137, 141,
1453 255, 190, 172, 183, 159, 170, 188, 128,
1454 131, 190, 191, 151, 128, 132, 135, 136,
1455 139, 141, 162, 163, 166, 172, 176, 180,
1456 181, 191, 128, 134, 176, 255, 132, 255,
1457 175, 181, 184, 255, 129, 155, 158, 255,
1458 129, 255, 171, 183, 157, 171, 175, 182,
1459 184, 191, 146, 167, 169, 182, 171, 172,
1460 189, 190, 176, 180, 176, 182, 145, 190,
1461 143, 146, 178, 157, 158, 133, 134, 137,
1462 168, 169, 170, 165, 169, 173, 178, 187,
1463 255, 131, 132, 140, 169, 174, 255, 130,
1464 132, 128, 182, 187, 255, 173, 180, 182,
1465 255, 132, 155, 159, 161, 175, 128, 163,
1466 165, 128, 134, 136, 152, 155, 161, 163,
1467 164, 166, 170, 144, 150, 132, 138, 145,
1468 146, 151, 166, 169, 128, 255, 176, 255,
1469 131, 137, 191, 145, 189, 135, 129, 130,
1470 132, 133, 144, 154, 176, 139, 159, 150,
1471 156, 159, 164, 167, 168, 170, 173, 145,
1472 176, 255, 139, 255, 166, 176, 171, 179,
1473 160, 161, 163, 164, 165, 166, 167, 169,
1474 171, 172, 173, 174, 175, 176, 177, 178,
1475 179, 180, 181, 182, 183, 184, 185, 186,
1476 187, 188, 189, 190, 191, 168, 170, 150,
1477 153, 155, 163, 165, 167, 169, 173, 153,
1478 155, 148, 161, 163, 255, 131, 187, 189,
1479 132, 185, 190, 255, 141, 144, 129, 136,
1480 145, 151, 152, 161, 162, 163, 164, 255,
1481 129, 188, 190, 130, 131, 191, 255, 141,
1482 151, 129, 132, 133, 134, 137, 138, 142,
1483 161, 162, 163, 164, 255, 131, 188, 129,
1484 130, 190, 255, 145, 181, 129, 130, 131,
1485 134, 135, 136, 137, 138, 139, 141, 142,
1486 175, 176, 177, 178, 255, 134, 138, 141,
1487 129, 136, 142, 161, 162, 163, 164, 255,
1488 129, 188, 130, 131, 190, 191, 128, 141,
1489 129, 132, 135, 136, 139, 140, 150, 151,
1490 162, 163, 130, 190, 191, 128, 141, 151,
1491 129, 130, 134, 136, 138, 140, 128, 129,
1492 131, 190, 255, 133, 137, 129, 132, 142,
1493 148, 151, 161, 164, 255, 129, 188, 190,
1494 191, 130, 131, 130, 134, 128, 132, 135,
1495 136, 138, 139, 140, 141, 149, 150, 162,
1496 163, 129, 190, 130, 131, 191, 255, 133,
1497 137, 141, 151, 129, 132, 142, 161, 162,
1498 163, 164, 255, 138, 143, 150, 159, 144,
1499 145, 146, 148, 152, 158, 178, 179, 177,
1500 179, 180, 186, 135, 142, 177, 179, 180,
1501 185, 187, 188, 136, 141, 181, 183, 185,
1502 152, 153, 190, 191, 191, 177, 190, 128,
1503 132, 134, 135, 141, 151, 153, 188, 134,
1504 128, 129, 130, 141, 156, 157, 158, 159,
1505 160, 162, 164, 168, 169, 170, 172, 173,
1506 174, 175, 176, 179, 183, 177, 173, 183,
1507 185, 186, 187, 188, 189, 190, 150, 151,
1508 152, 153, 158, 160, 177, 180, 130, 132,
1509 141, 157, 133, 134, 157, 159, 146, 148,
1510 178, 180, 146, 147, 178, 179, 182, 180,
1511 189, 190, 255, 134, 157, 137, 147, 148,
1512 255, 139, 141, 169, 133, 134, 178, 160,
1513 162, 163, 166, 167, 168, 169, 171, 176,
1514 184, 185, 187, 155, 151, 152, 153, 154,
1515 150, 160, 162, 191, 149, 151, 152, 158,
1516 165, 172, 173, 178, 179, 188, 176, 190,
1517 132, 181, 187, 128, 131, 180, 188, 189,
1518 255, 130, 133, 170, 171, 179, 180, 255,
1519 130, 161, 170, 128, 129, 162, 165, 166,
1520 167, 168, 173, 167, 173, 166, 169, 170,
1521 174, 175, 177, 178, 179, 164, 171, 172,
1522 179, 180, 181, 182, 183, 161, 173, 180,
1523 144, 146, 148, 168, 178, 179, 184, 185,
1524 128, 181, 187, 191, 128, 131, 179, 181,
1525 183, 140, 141, 144, 176, 175, 177, 191,
1526 160, 191, 128, 130, 170, 175, 153, 154,
1527 153, 154, 155, 160, 162, 163, 164, 165,
1528 166, 167, 168, 169, 170, 171, 175, 175,
1529 178, 180, 189, 158, 159, 176, 177, 130,
1530 134, 139, 167, 163, 164, 165, 166, 132,
1531 133, 134, 159, 160, 177, 178, 255, 166,
1532 173, 135, 145, 146, 147, 131, 179, 188,
1533 128, 130, 180, 181, 182, 185, 186, 255,
1534 165, 129, 255, 169, 174, 175, 176, 177,
1535 178, 179, 180, 181, 182, 131, 140, 141,
1536 188, 176, 178, 180, 183, 184, 190, 191,
1537 129, 171, 181, 182, 172, 173, 174, 175,
1538 165, 168, 172, 173, 163, 170, 172, 184,
1539 190, 158, 128, 143, 160, 175, 144, 145,
1540 150, 155, 157, 158, 159, 135, 139, 141,
1541 168, 171, 189, 160, 182, 186, 191, 129,
1542 131, 133, 134, 140, 143, 184, 186, 165,
1543 166, 128, 129, 130, 132, 133, 134, 135,
1544 136, 139, 140, 141, 144, 145, 146, 147,
1545 150, 151, 152, 153, 154, 156, 176, 178,
1546 129, 128, 130, 184, 255, 135, 190, 130,
1547 131, 175, 176, 178, 183, 184, 187, 255,
1548 172, 128, 130, 167, 180, 179, 130, 128,
1549 129, 179, 181, 182, 190, 191, 255, 129,
1550 137, 138, 140, 141, 255, 180, 190, 172,
1551 174, 175, 177, 178, 181, 182, 183, 159,
1552 160, 162, 163, 170, 188, 190, 191, 128,
1553 129, 130, 131, 128, 151, 129, 132, 135,
1554 136, 139, 141, 162, 163, 166, 172, 176,
1555 180, 181, 183, 184, 191, 133, 128, 129,
1556 130, 134, 176, 185, 189, 177, 178, 179,
1557 186, 187, 190, 191, 255, 129, 132, 255,
1558 175, 190, 176, 177, 178, 181, 184, 187,
1559 188, 255, 129, 155, 158, 255, 189, 176,
1560 178, 179, 186, 187, 190, 191, 255, 129,
1561 255, 172, 182, 171, 173, 174, 175, 176,
1562 183, 166, 157, 159, 160, 161, 162, 171,
1563 175, 190, 176, 182, 184, 191, 169, 177,
1564 180, 146, 167, 170, 182, 171, 172, 189,
1565 190, 176, 180, 176, 182, 143, 146, 178,
1566 157, 158, 133, 134, 137, 168, 169, 170,
1567 166, 173, 165, 169, 174, 178, 187, 255,
1568 131, 132, 140, 169, 174, 255, 130, 132,
1569 128, 182, 187, 255, 173, 180, 182, 255,
1570 132, 155, 159, 161, 175, 128, 163, 165,
1571 128, 134, 136, 152, 155, 161, 163, 164,
1572 166, 170, 144, 150, 132, 138, 143, 187,
1573 191, 160, 128, 129, 132, 135, 133, 134,
1574 160, 255, 192, 255, 139, 168, 160, 128,
1575 129, 132, 135, 133, 134, 160, 255, 192,
1576 255, 144, 145, 150, 155, 157, 158, 144,
1577 145, 150, 155, 157, 158, 159, 135, 166,
1578 191, 133, 128, 191, 128, 130, 131, 132,
1579 133, 137, 138, 139, 140, 191, 174, 188,
1580 128, 129, 130, 131, 132, 133, 134, 144,
1581 145, 165, 166, 169, 170, 175, 176, 184,
1582 185, 191, 128, 132, 170, 129, 135, 136,
1583 191, 181, 186, 128, 191, 144, 128, 148,
1584 149, 150, 151, 191, 128, 132, 133, 135,
1585 136, 138, 139, 143, 144, 191, 163, 128,
1586 179, 180, 182, 183, 191, 128, 129, 191,
1587 166, 176, 191, 128, 151, 152, 158, 159,
1588 178, 179, 185, 186, 187, 188, 190, 128,
1589 191, 160, 128, 191, 128, 130, 131, 135,
1590 191, 129, 134, 136, 190, 128, 159, 160,
1591 191, 128, 175, 176, 255, 10, 13, 127,
1592 194, 216, 219, 220, 224, 225, 226, 234,
1593 235, 236, 237, 239, 240, 243, 0, 31,
1594 128, 191, 192, 223, 227, 238, 241, 247,
1595 248, 255, 204, 205, 210, 214, 215, 216,
1596 217, 219, 220, 221, 222, 223, 224, 225,
1597 226, 227, 234, 239, 240, 243, 204, 205,
1598 210, 214, 215, 216, 217, 219, 220, 221,
1599 222, 223, 224, 225, 226, 227, 234, 239,
1600 240, 243, 204, 205, 210, 214, 215, 216,
1601 217, 219, 220, 221, 222, 223, 224, 225,
1602 226, 227, 234, 239, 240, 243, 194, 216,
1603 219, 220, 224, 225, 226, 234, 235, 236,
1604 237, 239, 240, 243, 32, 126, 192, 223,
1605 227, 238, 241, 247, 204, 205, 210, 214,
1606 215, 216, 217, 219, 220, 221, 222, 223,
1607 224, 225, 226, 227, 234, 239, 240, 243,
1608 204, 205, 210, 214, 215, 216, 217, 219,
1609 220, 221, 222, 223, 224, 225, 226, 227,
1610 234, 239, 240, 243, 204, 205, 210, 214,
1611 215, 216, 217, 219, 220, 221, 222, 223,
1612 224, 225, 226, 227, 234, 239, 240, 243,
1613 204, 205, 210, 214, 215, 216, 217, 219,
1614 220, 221, 222, 223, 224, 225, 226, 227,
1615 234, 235, 236, 237, 239, 240, 243, 204,
1616 205, 210, 214, 215, 216, 217, 219, 220,
1617 221, 222, 223, 224, 225, 226, 227, 234,
1618 237, 239, 240, 243, 204, 205, 210, 214,
1619 215, 216, 217, 219, 220, 221, 222, 223,
1620 224, 225, 226, 227, 234, 237, 239, 240,
1621 243, 204, 205, 210, 214, 215, 216, 217,
1622 219, 220, 221, 222, 223, 224, 225, 226,
1623 227, 234, 237, 239, 240, 243, 204, 205,
1624 210, 214, 215, 216, 217, 219, 220, 221,
1625 222, 223, 224, 225, 226, 227, 234, 239,
1626 240, 243, 204, 205, 210, 214, 215, 216,
1627 217, 219, 220, 221, 222, 223, 224, 225,
1628 226, 227, 234, 235, 236, 237, 239, 240,
1629 243, 204, 205, 210, 214, 215, 216, 217,
1630 219, 220, 221, 222, 223, 224, 225, 226,
1631 227, 234, 239, 240, 243, 204, 205, 210,
1632 214, 215, 216, 217, 219, 220, 221, 222,
1633 223, 224, 225, 226, 227, 234, 239, 240,
1634 243, 204, 205, 210, 214, 215, 216, 217,
1635 219, 220, 221, 222, 223, 224, 225, 226,
1636 227, 234, 239, 240, 243, 204, 205, 210,
1637 214, 215, 216, 217, 219, 220, 221, 222,
1638 223, 224, 225, 226, 227, 234, 237, 239,
1639 240, 243, 204, 205, 210, 214, 215, 216,
1640 217, 219, 220, 221, 222, 223, 224, 225,
1641 226, 227, 234, 237, 239, 240, 243, 204,
1642 205, 210, 214, 215, 216, 217, 219, 220,
1643 221, 222, 223, 224, 225, 226, 227, 234,
1644 237, 239, 240, 243, 204, 205, 210, 214,
1645 215, 216, 217, 219, 220, 221, 222, 223,
1646 224, 225, 226, 227, 234, 239, 240, 243,
1647 204, 205, 210, 214, 215, 216, 217, 219,
1648 220, 221, 222, 223, 224, 225, 226, 227,
1649 234, 239, 240, 243, 204, 205, 210, 214,
1650 215, 216, 217, 219, 220, 221, 222, 223,
1651 224, 225, 226, 227, 234, 239, 240, 243,
1652 204, 205, 210, 214, 215, 216, 217, 219,
1653 220, 221, 222, 223, 224, 225, 226, 227,
1654 234, 239, 240, 243, 204, 205, 210, 214,
1655 215, 216, 217, 219, 220, 221, 222, 223,
1656 224, 225, 226, 227, 234, 239, 240, 243,
1657 204, 205, 210, 214, 215, 216, 217, 219,
1658 220, 221, 222, 223, 224, 225, 226, 227,
1659 234, 239, 240, 243, 204, 205, 210, 214,
1660 215, 216, 217, 219, 220, 221, 222, 223,
1661 224, 225, 226, 227, 234, 239, 240, 243,
1662 204, 205, 210, 214, 215, 216, 217, 219,
1663 220, 221, 222, 223, 224, 225, 226, 227,
1664 234, 239, 240, 243, 204, 205, 210, 214,
1665 215, 216, 217, 219, 220, 221, 222, 223,
1666 224, 225, 226, 227, 234, 239, 240, 243,
1667 204, 205, 210, 214, 215, 216, 217, 219,
1668 220, 221, 222, 223, 224, 225, 226, 227,
1669 234, 239, 240, 243,
1670}
1671
1672var _graphclust_single_lengths []byte = []byte{
1673 0, 1, 0, 0, 0, 1, 1, 0,
1674 1, 0, 1, 0, 0, 0, 26, 0,
1675 0, 0, 1, 1, 1, 0, 0, 2,
1676 1, 0, 1, 1, 0, 2, 0, 0,
1677 2, 0, 2, 1, 0, 1, 0, 3,
1678 0, 0, 1, 21, 0, 0, 3, 0,
1679 0, 0, 0, 0, 0, 1, 0, 0,
1680 3, 0, 0, 0, 0, 0, 0, 1,
1681 0, 5, 2, 6, 0, 1, 0, 1,
1682 0, 2, 0, 0, 15, 0, 0, 0,
1683 3, 0, 0, 0, 0, 0, 0, 0,
1684 2, 1, 1, 0, 3, 1, 0, 7,
1685 5, 1, 1, 0, 1, 0, 23, 0,
1686 0, 0, 0, 1, 0, 0, 1, 0,
1687 1, 1, 0, 0, 0, 0, 0, 0,
1688 0, 0, 0, 0, 0, 4, 0, 0,
1689 0, 0, 1, 0, 6, 0, 0, 0,
1690 0, 0, 1, 3, 0, 0, 0, 3,
1691 0, 0, 0, 0, 1, 1, 0, 1,
1692 0, 1, 0, 0, 0, 29, 0, 0,
1693 0, 3, 2, 3, 2, 2, 2, 3,
1694 2, 2, 3, 3, 1, 2, 4, 2,
1695 2, 4, 4, 2, 0, 2, 0, 3,
1696 1, 0, 1, 21, 1, 0, 4, 0,
1697 0, 0, 1, 2, 0, 1, 1, 1,
1698 4, 0, 3, 1, 3, 2, 0, 3,
1699 0, 5, 2, 0, 0, 1, 0, 2,
1700 0, 0, 15, 0, 0, 0, 4, 0,
1701 0, 0, 3, 1, 0, 4, 1, 4,
1702 4, 3, 1, 0, 7, 5, 1, 1,
1703 0, 1, 0, 23, 1, 0, 1, 1,
1704 1, 1, 0, 2, 1, 3, 2, 0,
1705 1, 3, 1, 2, 0, 1, 0, 2,
1706 1, 2, 3, 4, 0, 0, 0, 1,
1707 0, 6, 2, 0, 0, 0, 0, 1,
1708 3, 0, 0, 0, 1, 0, 1, 4,
1709 0, 0, 0, 1, 1, 1, 4, 0,
1710 0, 0, 6, 0, 1, 1, 0, 0,
1711 0, 1, 1, 0, 1, 0, 1, 0,
1712 0, 0, 26, 0, 0, 0, 1, 1,
1713 1, 0, 0, 2, 1, 0, 1, 1,
1714 0, 2, 0, 0, 2, 0, 2, 1,
1715 0, 1, 0, 3, 0, 0, 1, 21,
1716 0, 0, 3, 0, 0, 0, 0, 0,
1717 0, 1, 0, 0, 3, 0, 0, 0,
1718 0, 0, 0, 1, 0, 5, 2, 6,
1719 0, 1, 0, 1, 0, 2, 0, 0,
1720 15, 0, 0, 0, 3, 0, 0, 0,
1721 0, 0, 0, 0, 2, 1, 1, 0,
1722 3, 1, 0, 7, 5, 1, 1, 0,
1723 1, 0, 23, 0, 0, 0, 0, 1,
1724 0, 0, 1, 0, 1, 1, 0, 0,
1725 0, 0, 0, 0, 0, 0, 0, 0,
1726 0, 4, 0, 0, 0, 0, 1, 0,
1727 6, 0, 0, 0, 0, 0, 1, 3,
1728 0, 0, 0, 3, 0, 0, 0, 0,
1729 1, 1, 0, 1, 0, 1, 0, 0,
1730 0, 29, 0, 0, 0, 3, 2, 3,
1731 2, 2, 2, 3, 2, 2, 3, 3,
1732 1, 2, 4, 2, 2, 4, 4, 2,
1733 0, 2, 0, 3, 1, 0, 1, 21,
1734 1, 0, 4, 0, 0, 0, 1, 2,
1735 0, 1, 1, 1, 4, 0, 3, 1,
1736 3, 2, 0, 3, 0, 5, 2, 0,
1737 0, 1, 0, 2, 0, 0, 15, 0,
1738 0, 0, 4, 0, 0, 0, 3, 1,
1739 0, 4, 1, 4, 4, 3, 1, 0,
1740 7, 5, 1, 1, 0, 1, 0, 23,
1741 1, 0, 1, 1, 1, 1, 0, 2,
1742 1, 3, 2, 0, 1, 3, 1, 2,
1743 0, 1, 0, 2, 1, 2, 3, 4,
1744 0, 0, 0, 1, 0, 6, 2, 0,
1745 0, 0, 0, 1, 3, 0, 0, 0,
1746 1, 0, 1, 4, 0, 0, 0, 1,
1747 1, 1, 4, 0, 0, 0, 6, 0,
1748 0, 0, 1, 1, 2, 1, 1, 5,
1749 0, 24, 0, 24, 0, 0, 23, 0,
1750 0, 1, 0, 2, 0, 0, 0, 28,
1751 0, 3, 23, 2, 0, 2, 2, 3,
1752 2, 2, 2, 0, 54, 54, 27, 1,
1753 0, 5, 2, 0, 1, 1, 0, 0,
1754 14, 0, 3, 2, 2, 3, 2, 2,
1755 2, 54, 54, 27, 1, 0, 2, 0,
1756 1, 4, 2, 1, 0, 1, 0, 1,
1757 0, 11, 0, 7, 1, 0, 1, 0,
1758 2, 3, 2, 1, 0, 1, 1, 3,
1759 0, 1, 3, 0, 1, 1, 2, 1,
1760 1, 5, 0, 0, 0, 0, 1, 1,
1761 0, 1, 0, 1, 0, 0, 0, 26,
1762 0, 0, 0, 1, 1, 1, 0, 0,
1763 2, 1, 0, 1, 1, 0, 2, 0,
1764 0, 2, 0, 2, 1, 0, 1, 0,
1765 3, 0, 0, 1, 21, 0, 0, 3,
1766 0, 0, 0, 0, 0, 0, 1, 0,
1767 0, 3, 0, 0, 0, 0, 0, 0,
1768 1, 0, 5, 2, 6, 0, 1, 0,
1769 1, 0, 2, 0, 0, 15, 0, 0,
1770 0, 3, 0, 0, 0, 0, 0, 0,
1771 0, 2, 1, 1, 0, 3, 1, 0,
1772 7, 5, 1, 1, 0, 1, 0, 23,
1773 0, 0, 0, 0, 1, 0, 0, 1,
1774 0, 1, 1, 0, 0, 0, 0, 0,
1775 0, 0, 0, 0, 0, 0, 4, 0,
1776 0, 0, 0, 1, 0, 6, 0, 0,
1777 0, 0, 0, 1, 3, 0, 0, 0,
1778 3, 0, 0, 0, 0, 1, 1, 0,
1779 1, 0, 1, 0, 0, 0, 29, 0,
1780 0, 0, 3, 2, 3, 2, 2, 2,
1781 3, 2, 2, 3, 3, 1, 2, 4,
1782 2, 2, 4, 4, 2, 0, 2, 0,
1783 3, 1, 0, 1, 21, 1, 0, 4,
1784 0, 0, 0, 1, 2, 0, 1, 1,
1785 1, 4, 0, 3, 1, 3, 2, 0,
1786 3, 0, 5, 2, 0, 0, 1, 0,
1787 2, 0, 0, 15, 0, 0, 0, 4,
1788 0, 0, 0, 3, 1, 0, 4, 1,
1789 4, 4, 3, 1, 0, 7, 5, 1,
1790 1, 0, 1, 0, 23, 1, 0, 1,
1791 1, 1, 1, 0, 2, 1, 3, 2,
1792 0, 1, 3, 1, 2, 0, 1, 0,
1793 2, 1, 2, 3, 4, 0, 0, 0,
1794 1, 0, 6, 2, 0, 0, 0, 0,
1795 1, 3, 0, 0, 0, 1, 0, 1,
1796 4, 0, 0, 0, 1, 1, 1, 4,
1797 0, 0, 0, 6, 24, 0, 24, 0,
1798 0, 23, 0, 0, 1, 0, 2, 0,
1799 0, 0, 28, 0, 3, 23, 2, 0,
1800 2, 2, 3, 2, 2, 2, 0, 54,
1801 54, 27, 1, 1, 5, 2, 0, 0,
1802 0, 1, 1, 0, 1, 0, 1, 0,
1803 0, 0, 26, 0, 0, 0, 1, 1,
1804 1, 0, 0, 2, 1, 0, 1, 1,
1805 0, 2, 0, 0, 2, 0, 2, 1,
1806 0, 1, 0, 3, 0, 0, 1, 21,
1807 0, 0, 3, 0, 0, 0, 0, 0,
1808 0, 1, 0, 0, 3, 0, 0, 0,
1809 0, 0, 0, 1, 0, 5, 2, 0,
1810 0, 1, 0, 2, 0, 0, 15, 0,
1811 0, 0, 3, 0, 0, 0, 0, 0,
1812 0, 0, 2, 1, 1, 0, 3, 1,
1813 0, 6, 5, 1, 1, 0, 1, 0,
1814 23, 0, 0, 0, 0, 1, 0, 0,
1815 1, 0, 1, 1, 0, 0, 0, 0,
1816 0, 0, 0, 0, 0, 0, 0, 4,
1817 0, 0, 0, 0, 1, 0, 6, 0,
1818 0, 0, 0, 0, 1, 3, 0, 0,
1819 0, 1, 4, 0, 0, 0, 6, 1,
1820 7, 3, 0, 0, 0, 0, 1, 1,
1821 0, 1, 0, 1, 0, 0, 0, 29,
1822 0, 0, 0, 3, 2, 3, 2, 2,
1823 2, 3, 2, 2, 3, 3, 1, 2,
1824 4, 2, 2, 4, 4, 2, 0, 2,
1825 0, 3, 1, 0, 1, 21, 1, 0,
1826 4, 0, 0, 0, 1, 2, 0, 1,
1827 1, 1, 4, 0, 3, 1, 3, 2,
1828 0, 3, 0, 5, 2, 0, 0, 1,
1829 0, 2, 0, 0, 15, 0, 0, 0,
1830 4, 0, 0, 0, 3, 1, 0, 4,
1831 1, 4, 4, 3, 1, 0, 7, 5,
1832 1, 1, 0, 1, 0, 23, 1, 0,
1833 1, 1, 1, 1, 0, 2, 1, 3,
1834 2, 0, 1, 3, 1, 2, 0, 1,
1835 0, 2, 1, 2, 3, 4, 0, 0,
1836 0, 1, 0, 6, 2, 0, 0, 0,
1837 0, 1, 3, 0, 0, 0, 1, 0,
1838 1, 4, 0, 0, 0, 1, 1, 0,
1839 1, 0, 0, 0, 1, 1, 0, 1,
1840 0, 1, 0, 0, 0, 29, 0, 0,
1841 0, 3, 0, 0, 0, 1, 1, 0,
1842 1, 0, 1, 0, 0, 0, 26, 0,
1843 0, 0, 1, 1, 1, 0, 0, 2,
1844 1, 0, 1, 1, 0, 2, 0, 0,
1845 2, 0, 2, 1, 0, 1, 0, 3,
1846 0, 0, 1, 21, 0, 0, 3, 0,
1847 0, 0, 0, 0, 0, 1, 0, 0,
1848 3, 0, 0, 0, 0, 0, 0, 1,
1849 0, 5, 2, 6, 0, 1, 0, 1,
1850 0, 2, 0, 0, 15, 0, 0, 0,
1851 3, 0, 0, 0, 0, 0, 0, 0,
1852 2, 1, 1, 0, 3, 1, 0, 7,
1853 5, 1, 1, 0, 1, 0, 23, 0,
1854 0, 0, 0, 1, 0, 0, 1, 0,
1855 1, 1, 0, 0, 0, 0, 0, 0,
1856 0, 0, 0, 0, 0, 4, 0, 0,
1857 0, 0, 1, 0, 6, 0, 0, 0,
1858 0, 0, 1, 3, 0, 0, 0, 3,
1859 0, 1, 1, 1, 4, 0, 0, 0,
1860 6, 2, 3, 2, 2, 2, 3, 2,
1861 2, 3, 3, 1, 2, 4, 2, 2,
1862 4, 4, 2, 0, 2, 0, 3, 1,
1863 0, 1, 21, 1, 0, 4, 0, 0,
1864 0, 1, 2, 0, 1, 1, 1, 4,
1865 0, 3, 1, 3, 2, 0, 3, 0,
1866 5, 2, 0, 0, 1, 0, 2, 0,
1867 0, 15, 0, 0, 0, 4, 0, 0,
1868 0, 3, 1, 0, 4, 1, 4, 4,
1869 3, 1, 0, 7, 5, 1, 1, 0,
1870 1, 0, 23, 1, 0, 1, 1, 1,
1871 1, 0, 2, 1, 3, 2, 0, 1,
1872 3, 1, 2, 0, 1, 0, 2, 1,
1873 2, 3, 4, 0, 0, 0, 1, 0,
1874 6, 2, 0, 0, 0, 0, 1, 3,
1875 0, 0, 0, 1, 0, 1, 4, 0,
1876 0, 0, 1, 0, 0, 14, 0, 3,
1877 2, 2, 3, 2, 2, 2, 54, 54,
1878 29, 1, 0, 0, 0, 0, 2, 1,
1879 1, 4, 2, 1, 0, 1, 0, 1,
1880 0, 11, 0, 0, 0, 0, 1, 1,
1881 0, 1, 0, 1, 0, 0, 0, 26,
1882 0, 0, 0, 1, 1, 1, 0, 0,
1883 2, 1, 0, 1, 1, 0, 2, 0,
1884 0, 2, 0, 2, 1, 0, 1, 0,
1885 3, 0, 0, 1, 21, 0, 0, 3,
1886 0, 0, 0, 0, 0, 0, 1, 0,
1887 0, 3, 0, 0, 0, 0, 0, 0,
1888 1, 0, 5, 2, 6, 0, 1, 0,
1889 1, 0, 2, 0, 0, 15, 0, 0,
1890 0, 3, 0, 0, 0, 0, 0, 0,
1891 0, 2, 1, 1, 0, 3, 1, 0,
1892 7, 5, 1, 1, 0, 1, 0, 23,
1893 0, 0, 0, 0, 1, 0, 0, 1,
1894 0, 1, 1, 0, 0, 0, 0, 0,
1895 0, 0, 0, 0, 0, 0, 4, 0,
1896 0, 0, 0, 1, 0, 6, 0, 0,
1897 0, 0, 0, 1, 3, 0, 0, 0,
1898 3, 0, 0, 0, 0, 1, 1, 0,
1899 1, 0, 1, 0, 0, 0, 29, 0,
1900 0, 0, 3, 2, 3, 2, 2, 2,
1901 3, 2, 2, 3, 3, 1, 2, 4,
1902 2, 2, 4, 4, 2, 0, 2, 0,
1903 3, 1, 0, 1, 21, 1, 0, 4,
1904 0, 0, 0, 1, 2, 0, 1, 1,
1905 1, 4, 0, 3, 1, 3, 2, 0,
1906 3, 0, 5, 2, 0, 0, 1, 0,
1907 2, 0, 0, 15, 0, 0, 0, 4,
1908 0, 0, 0, 3, 1, 0, 4, 1,
1909 4, 4, 3, 1, 0, 7, 5, 1,
1910 1, 0, 1, 0, 23, 1, 0, 1,
1911 1, 1, 1, 0, 2, 1, 3, 2,
1912 0, 1, 3, 1, 2, 0, 1, 0,
1913 2, 1, 2, 3, 4, 0, 0, 0,
1914 1, 0, 6, 2, 0, 0, 0, 0,
1915 1, 3, 0, 0, 0, 1, 0, 1,
1916 4, 0, 0, 0, 1, 1, 1, 4,
1917 0, 0, 0, 6, 7, 1, 0, 1,
1918 0, 2, 3, 2, 1, 0, 1, 1,
1919 3, 0, 1, 5, 0, 0, 17, 20,
1920 20, 20, 14, 20, 20, 20, 23, 21,
1921 21, 21, 20, 23, 20, 20, 20, 21,
1922 21, 21, 20, 20, 20, 20, 20, 20,
1923 20, 20, 20, 20,
1924}
1925
1926var _graphclust_range_lengths []byte = []byte{
1927 0, 0, 1, 1, 1, 1, 2, 1,
1928 1, 4, 1, 1, 1, 1, 2, 4,
1929 1, 2, 1, 2, 2, 5, 6, 2,
1930 2, 5, 1, 3, 2, 3, 5, 2,
1931 3, 1, 3, 1, 1, 2, 1, 2,
1932 1, 4, 0, 0, 2, 3, 1, 1,
1933 2, 2, 1, 2, 1, 1, 2, 1,
1934 2, 1, 2, 2, 2, 1, 1, 4,
1935 2, 0, 0, 0, 1, 0, 1, 0,
1936 1, 0, 1, 1, 0, 2, 1, 1,
1937 1, 2, 2, 1, 1, 2, 2, 1,
1938 1, 3, 2, 2, 0, 0, 2, 0,
1939 0, 0, 0, 1, 4, 1, 0, 2,
1940 1, 2, 2, 0, 2, 2, 1, 1,
1941 2, 6, 1, 1, 1, 1, 2, 2,
1942 1, 1, 1, 2, 2, 0, 1, 1,
1943 1, 1, 0, 1, 0, 3, 3, 1,
1944 2, 2, 2, 0, 5, 1, 1, 0,
1945 1, 1, 1, 1, 1, 2, 1, 1,
1946 4, 1, 1, 1, 1, 1, 4, 1,
1947 2, 2, 5, 2, 6, 2, 8, 4,
1948 2, 5, 0, 3, 2, 4, 1, 6,
1949 2, 4, 4, 1, 1, 2, 1, 2,
1950 1, 4, 0, 0, 4, 4, 1, 1,
1951 2, 2, 2, 2, 1, 1, 6, 2,
1952 5, 1, 3, 3, 4, 4, 4, 4,
1953 2, 0, 0, 1, 1, 0, 1, 0,
1954 1, 1, 0, 2, 1, 1, 2, 4,
1955 1, 2, 4, 1, 5, 0, 3, 2,
1956 1, 0, 0, 2, 0, 0, 0, 0,
1957 1, 4, 1, 0, 2, 1, 4, 2,
1958 0, 4, 3, 4, 2, 2, 6, 2,
1959 2, 4, 1, 4, 2, 4, 1, 3,
1960 3, 2, 2, 0, 1, 1, 1, 0,
1961 1, 0, 3, 3, 1, 2, 2, 2,
1962 0, 5, 1, 1, 0, 1, 0, 1,
1963 1, 1, 0, 0, 0, 0, 1, 1,
1964 1, 0, 0, 1, 2, 2, 1, 1,
1965 1, 1, 2, 1, 1, 4, 1, 1,
1966 1, 1, 2, 4, 1, 2, 1, 2,
1967 2, 5, 6, 2, 2, 5, 1, 3,
1968 2, 3, 5, 2, 3, 1, 3, 1,
1969 1, 2, 1, 2, 1, 4, 0, 0,
1970 2, 3, 1, 1, 2, 2, 1, 2,
1971 1, 1, 2, 1, 2, 1, 2, 2,
1972 2, 1, 1, 4, 2, 0, 0, 0,
1973 1, 0, 1, 0, 1, 0, 1, 1,
1974 0, 2, 1, 1, 1, 2, 2, 1,
1975 1, 2, 2, 1, 1, 3, 2, 2,
1976 0, 0, 2, 0, 0, 0, 0, 1,
1977 4, 1, 0, 2, 1, 2, 2, 0,
1978 2, 2, 1, 1, 2, 6, 1, 1,
1979 1, 1, 2, 2, 1, 1, 1, 2,
1980 2, 0, 1, 1, 1, 1, 0, 1,
1981 0, 3, 3, 1, 2, 2, 2, 0,
1982 5, 1, 1, 0, 1, 1, 1, 1,
1983 1, 2, 1, 1, 4, 1, 1, 1,
1984 1, 1, 4, 1, 2, 2, 5, 2,
1985 6, 2, 8, 4, 2, 5, 0, 3,
1986 2, 4, 1, 6, 2, 4, 4, 1,
1987 1, 2, 1, 2, 1, 4, 0, 0,
1988 4, 4, 1, 1, 2, 2, 2, 2,
1989 1, 1, 6, 2, 5, 1, 3, 3,
1990 4, 4, 4, 4, 2, 0, 0, 1,
1991 1, 0, 1, 0, 1, 1, 0, 2,
1992 1, 1, 2, 4, 1, 2, 4, 1,
1993 5, 0, 3, 2, 1, 0, 0, 2,
1994 0, 0, 0, 0, 1, 4, 1, 0,
1995 2, 1, 4, 2, 0, 4, 3, 4,
1996 2, 2, 6, 2, 2, 4, 1, 4,
1997 2, 4, 1, 3, 3, 2, 2, 0,
1998 1, 1, 1, 0, 1, 0, 3, 3,
1999 1, 2, 2, 2, 0, 5, 1, 1,
2000 0, 1, 0, 1, 1, 1, 0, 0,
2001 0, 0, 1, 1, 1, 0, 0, 1,
2002 2, 3, 1, 1, 1, 1, 1, 1,
2003 1, 0, 1, 0, 1, 1, 0, 1,
2004 1, 0, 1, 0, 1, 3, 1, 2,
2005 2, 1, 0, 0, 1, 0, 0, 0,
2006 0, 0, 1, 0, 1, 1, 2, 2,
2007 2, 1, 3, 2, 1, 1, 3, 1,
2008 3, 3, 1, 0, 0, 0, 0, 0,
2009 1, 1, 1, 2, 2, 4, 1, 1,
2010 2, 1, 1, 1, 3, 1, 2, 1,
2011 2, 1, 2, 0, 0, 1, 1, 5,
2012 9, 2, 1, 3, 5, 3, 1, 6,
2013 1, 1, 1, 1, 1, 1, 1, 1,
2014 1, 1, 1, 1, 1, 1, 1, 2,
2015 1, 1, 4, 1, 1, 1, 1, 2,
2016 4, 1, 2, 1, 2, 2, 5, 6,
2017 2, 2, 5, 1, 3, 2, 3, 5,
2018 2, 3, 1, 3, 1, 1, 2, 1,
2019 2, 1, 4, 0, 0, 2, 3, 1,
2020 1, 2, 2, 1, 2, 1, 1, 2,
2021 1, 2, 1, 2, 2, 2, 1, 1,
2022 4, 2, 0, 0, 0, 1, 0, 1,
2023 0, 1, 0, 1, 1, 0, 2, 1,
2024 1, 1, 2, 2, 1, 1, 2, 2,
2025 1, 1, 3, 2, 2, 0, 0, 2,
2026 0, 0, 0, 0, 1, 4, 1, 0,
2027 2, 1, 2, 2, 0, 2, 2, 1,
2028 1, 2, 6, 1, 1, 1, 1, 2,
2029 2, 1, 1, 1, 2, 2, 0, 1,
2030 1, 1, 1, 0, 1, 0, 3, 3,
2031 1, 2, 2, 2, 0, 5, 1, 1,
2032 0, 1, 1, 1, 1, 1, 2, 1,
2033 1, 4, 1, 1, 1, 1, 1, 4,
2034 1, 2, 2, 5, 2, 6, 2, 8,
2035 4, 2, 5, 0, 3, 2, 4, 1,
2036 6, 2, 4, 4, 1, 1, 2, 1,
2037 2, 1, 4, 0, 0, 4, 4, 1,
2038 1, 2, 2, 2, 2, 1, 1, 6,
2039 2, 5, 1, 3, 3, 4, 4, 4,
2040 4, 2, 0, 0, 1, 1, 0, 1,
2041 0, 1, 1, 0, 2, 1, 1, 2,
2042 4, 1, 2, 4, 1, 5, 0, 3,
2043 2, 1, 0, 0, 2, 0, 0, 0,
2044 0, 1, 4, 1, 0, 2, 1, 4,
2045 2, 0, 4, 3, 4, 2, 2, 6,
2046 2, 2, 4, 1, 4, 2, 4, 1,
2047 3, 3, 2, 2, 0, 1, 1, 1,
2048 0, 1, 0, 3, 3, 1, 2, 2,
2049 2, 0, 5, 1, 1, 0, 1, 0,
2050 1, 1, 1, 0, 0, 0, 0, 1,
2051 1, 1, 0, 0, 0, 1, 0, 1,
2052 1, 0, 1, 1, 0, 1, 0, 1,
2053 3, 1, 2, 2, 1, 0, 0, 1,
2054 0, 0, 0, 0, 0, 1, 0, 1,
2055 1, 2, 2, 1, 1, 5, 1, 1,
2056 1, 1, 2, 1, 1, 4, 1, 1,
2057 1, 1, 2, 4, 1, 2, 1, 2,
2058 2, 5, 6, 2, 2, 5, 1, 3,
2059 2, 3, 5, 2, 3, 1, 3, 1,
2060 1, 2, 1, 2, 1, 4, 0, 0,
2061 2, 3, 1, 1, 2, 2, 1, 2,
2062 1, 1, 2, 1, 2, 1, 2, 2,
2063 2, 1, 1, 4, 2, 0, 0, 1,
2064 1, 0, 1, 0, 1, 1, 0, 2,
2065 1, 1, 1, 2, 2, 1, 1, 2,
2066 2, 1, 1, 3, 2, 2, 0, 0,
2067 2, 0, 0, 0, 0, 1, 4, 1,
2068 0, 2, 1, 2, 2, 0, 2, 2,
2069 1, 1, 2, 6, 1, 1, 1, 1,
2070 2, 2, 1, 1, 1, 2, 2, 0,
2071 1, 1, 1, 1, 0, 1, 0, 3,
2072 3, 1, 2, 2, 2, 0, 5, 1,
2073 1, 0, 1, 1, 1, 0, 0, 0,
2074 0, 0, 1, 1, 1, 1, 1, 2,
2075 1, 1, 4, 1, 1, 1, 1, 1,
2076 4, 1, 2, 2, 5, 2, 6, 2,
2077 8, 4, 2, 5, 0, 3, 2, 4,
2078 1, 6, 2, 4, 4, 1, 1, 2,
2079 1, 2, 1, 4, 0, 0, 4, 4,
2080 1, 1, 2, 2, 2, 2, 1, 1,
2081 6, 2, 5, 1, 3, 3, 4, 4,
2082 4, 4, 2, 0, 0, 1, 1, 0,
2083 1, 0, 1, 1, 0, 2, 1, 1,
2084 2, 4, 1, 2, 4, 1, 5, 0,
2085 3, 2, 1, 0, 0, 2, 0, 0,
2086 0, 0, 1, 4, 1, 0, 2, 1,
2087 4, 2, 0, 4, 3, 4, 2, 2,
2088 6, 2, 2, 4, 1, 4, 2, 4,
2089 1, 3, 3, 2, 2, 0, 1, 1,
2090 1, 0, 1, 0, 3, 3, 1, 2,
2091 2, 2, 0, 5, 1, 1, 0, 1,
2092 0, 1, 1, 1, 0, 0, 0, 3,
2093 1, 1, 1, 1, 1, 2, 1, 1,
2094 4, 1, 1, 1, 1, 1, 4, 1,
2095 2, 2, 1, 1, 1, 1, 2, 1,
2096 1, 4, 1, 1, 1, 1, 2, 4,
2097 1, 2, 1, 2, 2, 5, 6, 2,
2098 2, 5, 1, 3, 2, 3, 5, 2,
2099 3, 1, 3, 1, 1, 2, 1, 2,
2100 1, 4, 0, 0, 2, 3, 1, 1,
2101 2, 2, 1, 2, 1, 1, 2, 1,
2102 2, 1, 2, 2, 2, 1, 1, 4,
2103 2, 0, 0, 0, 1, 0, 1, 0,
2104 1, 0, 1, 1, 0, 2, 1, 1,
2105 1, 2, 2, 1, 1, 2, 2, 1,
2106 1, 3, 2, 2, 0, 0, 2, 0,
2107 0, 0, 0, 1, 4, 1, 0, 2,
2108 1, 2, 2, 0, 2, 2, 1, 1,
2109 2, 6, 1, 1, 1, 1, 2, 2,
2110 1, 1, 1, 2, 2, 0, 1, 1,
2111 1, 1, 0, 1, 0, 3, 3, 1,
2112 2, 2, 2, 0, 5, 1, 1, 0,
2113 1, 0, 0, 0, 1, 1, 1, 0,
2114 0, 5, 2, 6, 2, 8, 4, 2,
2115 5, 0, 3, 2, 4, 1, 6, 2,
2116 4, 4, 1, 1, 2, 1, 2, 1,
2117 4, 0, 0, 4, 4, 1, 1, 2,
2118 2, 2, 2, 1, 1, 6, 2, 5,
2119 1, 3, 3, 4, 4, 4, 4, 2,
2120 0, 0, 1, 1, 0, 1, 0, 1,
2121 1, 0, 2, 1, 1, 2, 4, 1,
2122 2, 4, 1, 5, 0, 3, 2, 1,
2123 0, 0, 2, 0, 0, 0, 0, 1,
2124 4, 1, 0, 2, 1, 4, 2, 0,
2125 4, 3, 4, 2, 2, 6, 2, 2,
2126 4, 1, 4, 2, 4, 1, 3, 3,
2127 2, 2, 0, 1, 1, 1, 0, 1,
2128 0, 3, 3, 1, 2, 2, 2, 0,
2129 5, 1, 1, 0, 1, 0, 1, 1,
2130 1, 0, 1, 3, 1, 3, 3, 1,
2131 0, 0, 0, 0, 0, 1, 1, 1,
2132 3, 2, 4, 1, 0, 1, 1, 1,
2133 3, 1, 1, 1, 3, 1, 3, 1,
2134 3, 1, 2, 1, 1, 1, 1, 2,
2135 1, 1, 4, 1, 1, 1, 1, 2,
2136 4, 1, 2, 1, 2, 2, 5, 6,
2137 2, 2, 5, 1, 3, 2, 3, 5,
2138 2, 3, 1, 3, 1, 1, 2, 1,
2139 2, 1, 4, 0, 0, 2, 3, 1,
2140 1, 2, 2, 1, 2, 1, 1, 2,
2141 1, 2, 1, 2, 2, 2, 1, 1,
2142 4, 2, 0, 0, 0, 1, 0, 1,
2143 0, 1, 0, 1, 1, 0, 2, 1,
2144 1, 1, 2, 2, 1, 1, 2, 2,
2145 1, 1, 3, 2, 2, 0, 0, 2,
2146 0, 0, 0, 0, 1, 4, 1, 0,
2147 2, 1, 2, 2, 0, 2, 2, 1,
2148 1, 2, 6, 1, 1, 1, 1, 2,
2149 2, 1, 1, 1, 2, 2, 0, 1,
2150 1, 1, 1, 0, 1, 0, 3, 3,
2151 1, 2, 2, 2, 0, 5, 1, 1,
2152 0, 1, 1, 1, 1, 1, 2, 1,
2153 1, 4, 1, 1, 1, 1, 1, 4,
2154 1, 2, 2, 5, 2, 6, 2, 8,
2155 4, 2, 5, 0, 3, 2, 4, 1,
2156 6, 2, 4, 4, 1, 1, 2, 1,
2157 2, 1, 4, 0, 0, 4, 4, 1,
2158 1, 2, 2, 2, 2, 1, 1, 6,
2159 2, 5, 1, 3, 3, 4, 4, 4,
2160 4, 2, 0, 0, 1, 1, 0, 1,
2161 0, 1, 1, 0, 2, 1, 1, 2,
2162 4, 1, 2, 4, 1, 5, 0, 3,
2163 2, 1, 0, 0, 2, 0, 0, 0,
2164 0, 1, 4, 1, 0, 2, 1, 4,
2165 2, 0, 4, 3, 4, 2, 2, 6,
2166 2, 2, 4, 1, 4, 2, 4, 1,
2167 3, 3, 2, 2, 0, 1, 1, 1,
2168 0, 1, 0, 3, 3, 1, 2, 2,
2169 2, 0, 5, 1, 1, 0, 1, 0,
2170 1, 1, 1, 0, 0, 0, 0, 1,
2171 1, 1, 0, 0, 0, 0, 1, 1,
2172 5, 9, 2, 1, 3, 5, 3, 1,
2173 6, 1, 1, 2, 2, 2, 6, 0,
2174 0, 0, 4, 0, 0, 0, 0, 0,
2175 0, 0, 0, 0, 0, 0, 0, 0,
2176 0, 0, 0, 0, 0, 0, 0, 0,
2177 0, 0, 0, 0,
2178}
2179
2180var _graphclust_index_offsets []int16 = []int16{
2181 0, 0, 2, 4, 6, 8, 11, 15,
2182 17, 20, 25, 28, 30, 32, 34, 63,
2183 68, 70, 73, 76, 80, 84, 90, 97,
2184 102, 106, 112, 115, 120, 123, 129, 135,
2185 138, 144, 146, 152, 155, 157, 161, 163,
2186 169, 171, 176, 178, 200, 203, 207, 212,
2187 214, 217, 220, 222, 225, 227, 230, 233,
2188 235, 241, 243, 246, 249, 252, 254, 256,
2189 262, 265, 271, 274, 281, 283, 285, 287,
2190 289, 291, 294, 296, 298, 314, 317, 319,
2191 321, 326, 329, 332, 334, 336, 339, 342,
2192 344, 348, 353, 357, 360, 364, 366, 369,
2193 377, 383, 385, 387, 389, 395, 397, 421,
2194 424, 426, 429, 432, 434, 437, 440, 443,
2195 445, 449, 457, 459, 461, 463, 465, 468,
2196 471, 473, 475, 477, 480, 483, 488, 490,
2197 492, 494, 496, 498, 500, 507, 511, 515,
2198 517, 520, 523, 527, 531, 537, 539, 541,
2199 545, 547, 549, 551, 553, 556, 560, 562,
2200 565, 570, 573, 575, 577, 579, 610, 615,
2201 617, 620, 626, 634, 640, 649, 654, 665,
2202 673, 678, 686, 690, 697, 701, 708, 714,
2203 723, 728, 737, 746, 750, 752, 757, 759,
2204 765, 768, 773, 775, 797, 803, 808, 814,
2205 816, 819, 822, 826, 831, 833, 836, 844,
2206 848, 858, 860, 867, 872, 880, 887, 892,
2207 900, 903, 909, 912, 914, 916, 918, 920,
2208 923, 925, 927, 943, 946, 948, 950, 957,
2209 962, 964, 967, 975, 978, 984, 989, 994,
2210 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032,
2211 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080,
2212 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122,
2213 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158,
2214 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191,
2215 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220,
2216 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244,
2217 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267,
2218 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291,
2219 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315,
2220 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363,
2221 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402,
2222 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439,
2223 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465,
2224 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509,
2225 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533,
2226 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561,
2227 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583,
2228 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619,
2229 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644,
2230 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674,
2231 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719,
2232 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746,
2233 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764,
2234 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785,
2235 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814,
2236 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838,
2237 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862,
2238 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921,
2239 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977,
2240 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033,
2241 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062,
2242 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113,
2243 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154,
2244 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199,
2245 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230,
2246 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262,
2247 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300,
2248 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331,
2249 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383,
2250 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427,
2251 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467,
2252 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495,
2253 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523,
2254 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542,
2255 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566,
2256 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591,
2257 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680,
2258 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699,
2259 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773,
2260 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930,
2261 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963,
2262 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005,
2263 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167,
2264 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196,
2265 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233,
2266 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280,
2267 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312,
2268 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336,
2269 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359,
2270 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415,
2271 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454,
2272 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486,
2273 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532,
2274 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555,
2275 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579,
2276 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610,
2277 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642,
2278 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664,
2279 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691,
2280 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722,
2281 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765,
2282 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790,
2283 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813,
2284 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836,
2285 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864,
2286 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885,
2287 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935,
2288 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979,
2289 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033,
2290 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082,
2291 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133,
2292 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161,
2293 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212,
2294 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243,
2295 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275,
2296 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314,
2297 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355,
2298 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399,
2299 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438,
2300 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481,
2301 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514,
2302 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542,
2303 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567,
2304 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586,
2305 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656,
2306 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695,
2307 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769,
2308 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792,
2309 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958,
2310 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982,
2311 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030,
2312 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069,
2313 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106,
2314 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132,
2315 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176,
2316 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200,
2317 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228,
2318 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259,
2319 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281,
2320 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309,
2321 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339,
2322 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381,
2323 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407,
2324 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427,
2325 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451,
2326 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481,
2327 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505,
2328 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530,
2329 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553,
2330 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623,
2331 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675,
2332 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726,
2333 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777,
2334 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807,
2335 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854,
2336 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890,
2337 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922,
2338 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958,
2339 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998,
2340 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046,
2341 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081,
2342 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124,
2343 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161,
2344 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188,
2345 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214,
2346 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233,
2347 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255,
2348 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308,
2349 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332,
2350 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380,
2351 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414,
2352 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452,
2353 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480,
2354 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529,
2355 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550,
2356 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573,
2357 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604,
2358 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636,
2359 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659,
2360 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686,
2361 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738,
2362 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760,
2363 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785,
2364 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807,
2365 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832,
2366 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858,
2367 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880,
2368 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935,
2369 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985,
2370 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027,
2371 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078,
2372 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110,
2373 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162,
2374 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185,
2375 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224,
2376 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263,
2377 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296,
2378 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346,
2379 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387,
2380 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426,
2381 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455,
2382 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486,
2383 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512,
2384 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548,
2385 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629,
2386 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736,
2387 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768,
2388 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800,
2389 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823,
2390 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879,
2391 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918,
2392 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950,
2393 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996,
2394 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019,
2395 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043,
2396 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074,
2397 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106,
2398 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128,
2399 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155,
2400 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186,
2401 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229,
2402 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254,
2403 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277,
2404 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300,
2405 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328,
2406 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349,
2407 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399,
2408 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443,
2409 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497,
2410 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546,
2411 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597,
2412 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625,
2413 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676,
2414 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707,
2415 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739,
2416 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778,
2417 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819,
2418 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863,
2419 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902,
2420 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945,
2421 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978,
2422 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006,
2423 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031,
2424 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050,
2425 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080,
2426 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127,
2427 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183,
2428 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352,
2429 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526,
2430 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697,
2431 9718, 9739, 9760, 9781,
2432}
2433
2434var _graphclust_indicies []int16 = []int16{
2435 0, 1, 3, 2, 2, 3, 3, 2,
2436 3, 3, 2, 3, 3, 3, 2, 3,
2437 2, 3, 3, 2, 3, 3, 3, 3,
2438 2, 3, 3, 2, 2, 3, 3, 2,
2439 3, 2, 4, 5, 6, 7, 8, 10,
2440 11, 12, 14, 15, 16, 17, 18, 19,
2441 20, 21, 22, 23, 24, 25, 26, 27,
2442 28, 29, 30, 31, 9, 13, 2, 3,
2443 3, 3, 3, 2, 3, 2, 3, 3,
2444 2, 2, 2, 3, 2, 2, 2, 3,
2445 3, 3, 3, 2, 2, 2, 2, 2,
2446 2, 3, 2, 2, 2, 2, 2, 2,
2447 3, 2, 2, 2, 2, 3, 3, 3,
2448 3, 2, 3, 3, 3, 3, 3, 2,
2449 3, 3, 2, 3, 3, 3, 3, 2,
2450 3, 3, 2, 2, 2, 2, 2, 2,
2451 3, 3, 3, 3, 3, 3, 2, 3,
2452 3, 2, 2, 2, 2, 2, 2, 3,
2453 3, 2, 3, 3, 3, 3, 3, 2,
2454 3, 3, 2, 3, 2, 3, 3, 3,
2455 2, 3, 2, 3, 3, 3, 3, 3,
2456 2, 3, 2, 3, 3, 3, 3, 2,
2457 3, 2, 32, 33, 34, 35, 36, 37,
2458 38, 39, 40, 41, 42, 43, 44, 45,
2459 46, 47, 48, 49, 50, 51, 52, 2,
2460 3, 3, 2, 3, 3, 3, 2, 3,
2461 3, 3, 3, 2, 3, 2, 3, 3,
2462 2, 3, 3, 2, 3, 2, 2, 2,
2463 3, 3, 2, 3, 3, 2, 3, 3,
2464 2, 3, 2, 3, 3, 3, 3, 3,
2465 2, 3, 2, 3, 3, 2, 2, 2,
2466 3, 3, 3, 2, 3, 2, 3, 2,
2467 3, 3, 3, 3, 3, 2, 3, 3,
2468 2, 53, 54, 55, 56, 57, 2, 3,
2469 58, 2, 53, 54, 59, 55, 56, 57,
2470 2, 3, 2, 3, 2, 3, 2, 3,
2471 2, 3, 2, 60, 61, 2, 3, 2,
2472 3, 2, 62, 63, 64, 65, 66, 67,
2473 68, 69, 70, 71, 72, 73, 74, 75,
2474 76, 2, 3, 3, 2, 3, 2, 3,
2475 2, 3, 3, 3, 3, 2, 3, 3,
2476 2, 2, 2, 3, 3, 2, 3, 2,
2477 3, 3, 2, 2, 2, 3, 3, 2,
2478 3, 3, 3, 2, 3, 3, 3, 3,
2479 2, 3, 3, 3, 2, 3, 3, 2,
2480 77, 78, 63, 2, 3, 2, 3, 3,
2481 2, 79, 80, 81, 82, 83, 84, 85,
2482 2, 86, 87, 88, 89, 90, 2, 3,
2483 2, 3, 2, 3, 2, 3, 3, 3,
2484 3, 3, 2, 3, 2, 91, 92, 93,
2485 94, 95, 96, 97, 98, 99, 100, 101,
2486 102, 103, 104, 105, 106, 107, 104, 108,
2487 109, 110, 111, 112, 2, 3, 3, 2,
2488 2, 3, 2, 2, 3, 3, 3, 2,
2489 3, 2, 3, 3, 2, 2, 2, 3,
2490 3, 3, 2, 3, 2, 3, 3, 3,
2491 2, 3, 3, 3, 3, 3, 3, 3,
2492 2, 3, 2, 3, 2, 3, 2, 2,
2493 3, 3, 3, 2, 2, 2, 3, 2,
2494 3, 3, 2, 3, 2, 3, 3, 2,
2495 3, 3, 2, 113, 114, 115, 116, 2,
2496 3, 2, 3, 2, 3, 2, 3, 2,
2497 117, 2, 3, 2, 118, 119, 120, 121,
2498 122, 123, 2, 3, 3, 3, 2, 2,
2499 2, 2, 3, 3, 2, 3, 3, 2,
2500 2, 2, 3, 3, 3, 3, 2, 124,
2501 125, 126, 2, 3, 3, 3, 3, 3,
2502 2, 3, 2, 3, 2, 127, 128, 129,
2503 2, 130, 2, 2, 130, 2, 130, 130,
2504 2, 130, 130, 2, 130, 130, 130, 2,
2505 130, 2, 130, 130, 2, 130, 130, 130,
2506 130, 2, 130, 130, 2, 2, 130, 130,
2507 2, 130, 2, 131, 132, 133, 134, 135,
2508 136, 137, 139, 140, 141, 142, 143, 144,
2509 145, 146, 147, 148, 149, 150, 22, 151,
2510 152, 153, 154, 155, 156, 157, 158, 159,
2511 138, 2, 130, 130, 130, 130, 2, 130,
2512 2, 130, 130, 2, 3, 3, 2, 2,
2513 3, 130, 130, 2, 130, 130, 2, 130,
2514 2, 3, 130, 130, 130, 3, 3, 2,
2515 130, 130, 130, 2, 2, 2, 130, 2,
2516 3, 3, 130, 130, 3, 2, 130, 130,
2517 130, 2, 130, 2, 130, 2, 130, 2,
2518 3, 2, 2, 130, 130, 2, 130, 2,
2519 3, 130, 130, 3, 130, 2, 3, 130,
2520 130, 3, 3, 130, 130, 2, 130, 130,
2521 3, 2, 130, 130, 130, 3, 3, 3,
2522 2, 130, 3, 130, 2, 2, 2, 3,
2523 2, 2, 2, 130, 130, 130, 3, 130,
2524 3, 2, 130, 130, 3, 3, 3, 130,
2525 130, 130, 2, 130, 130, 3, 3, 2,
2526 2, 2, 130, 130, 130, 2, 130, 2,
2527 3, 130, 130, 130, 130, 3, 130, 3,
2528 3, 2, 130, 3, 130, 2, 130, 2,
2529 130, 3, 130, 130, 2, 130, 2, 130,
2530 130, 130, 130, 3, 2, 3, 130, 2,
2531 130, 130, 130, 130, 2, 130, 2, 160,
2532 161, 162, 163, 164, 165, 166, 167, 168,
2533 169, 170, 171, 172, 173, 174, 175, 176,
2534 177, 178, 179, 180, 2, 3, 130, 130,
2535 3, 130, 2, 3, 130, 130, 130, 2,
2536 130, 3, 130, 130, 130, 2, 130, 2,
2537 130, 130, 2, 130, 130, 2, 3, 130,
2538 3, 2, 130, 130, 130, 2, 3, 130,
2539 2, 130, 130, 2, 130, 130, 3, 130,
2540 3, 3, 130, 2, 130, 130, 3, 2,
2541 130, 130, 130, 130, 3, 130, 130, 3,
2542 130, 2, 130, 2, 3, 3, 3, 130,
2543 130, 3, 2, 130, 2, 130, 2, 3,
2544 3, 3, 3, 130, 130, 3, 130, 2,
2545 3, 130, 130, 3, 130, 3, 2, 3,
2546 130, 3, 130, 2, 3, 130, 130, 130,
2547 130, 3, 130, 2, 130, 130, 2, 181,
2548 182, 183, 184, 185, 2, 130, 58, 2,
2549 130, 2, 130, 2, 130, 2, 130, 2,
2550 186, 187, 2, 130, 2, 130, 2, 188,
2551 189, 190, 191, 66, 192, 193, 194, 195,
2552 196, 197, 198, 199, 200, 201, 2, 130,
2553 130, 2, 130, 2, 130, 2, 130, 130,
2554 130, 3, 3, 130, 2, 130, 2, 130,
2555 2, 3, 130, 2, 130, 3, 2, 3,
2556 130, 130, 130, 3, 130, 3, 2, 130,
2557 2, 3, 130, 3, 130, 3, 130, 2,
2558 130, 130, 3, 130, 2, 130, 130, 130,
2559 130, 2, 130, 3, 3, 130, 130, 3,
2560 2, 130, 130, 3, 130, 3, 2, 202,
2561 203, 189, 2, 130, 2, 130, 130, 2,
2562 204, 205, 206, 207, 208, 209, 210, 2,
2563 211, 212, 213, 214, 215, 2, 130, 2,
2564 130, 2, 130, 2, 130, 130, 130, 130,
2565 130, 2, 130, 2, 216, 217, 218, 219,
2566 220, 221, 222, 223, 224, 225, 226, 227,
2567 228, 229, 230, 231, 232, 233, 234, 235,
2568 236, 237, 238, 2, 130, 3, 130, 2,
2569 2, 130, 3, 2, 3, 3, 2, 130,
2570 3, 130, 130, 2, 130, 2, 3, 130,
2571 3, 130, 3, 2, 2, 130, 2, 3,
2572 130, 130, 3, 130, 3, 130, 2, 130,
2573 3, 130, 2, 130, 130, 3, 130, 3,
2574 2, 130, 130, 3, 3, 3, 3, 130,
2575 130, 2, 3, 130, 2, 3, 3, 130,
2576 2, 130, 3, 130, 3, 130, 3, 130,
2577 2, 3, 2, 130, 130, 3, 3, 130,
2578 3, 130, 2, 2, 2, 130, 130, 3,
2579 130, 3, 130, 2, 2, 130, 3, 3,
2580 130, 3, 130, 2, 3, 130, 3, 130,
2581 2, 3, 3, 130, 130, 2, 3, 3,
2582 3, 130, 130, 2, 239, 240, 115, 241,
2583 2, 130, 2, 130, 2, 130, 2, 242,
2584 2, 130, 2, 243, 244, 245, 246, 247,
2585 248, 2, 3, 3, 130, 130, 130, 2,
2586 2, 2, 2, 130, 130, 2, 130, 130,
2587 2, 2, 2, 130, 130, 130, 130, 2,
2588 249, 250, 251, 2, 130, 130, 130, 130,
2589 130, 2, 130, 2, 130, 2, 252, 2,
2590 3, 2, 253, 2, 254, 255, 256, 258,
2591 257, 2, 130, 2, 2, 130, 130, 3,
2592 2, 3, 2, 259, 2, 260, 261, 262,
2593 264, 263, 2, 3, 2, 2, 3, 3,
2594 79, 80, 81, 82, 83, 84, 2, 3,
2595 1, 265, 265, 3, 1, 265, 266, 3,
2596 1, 267, 268, 267, 268, 268, 267, 268,
2597 268, 267, 268, 268, 268, 267, 268, 267,
2598 268, 268, 267, 268, 268, 268, 268, 267,
2599 268, 268, 267, 267, 268, 268, 267, 268,
2600 267, 269, 270, 271, 272, 273, 275, 276,
2601 277, 279, 280, 281, 282, 283, 284, 285,
2602 286, 287, 288, 289, 290, 291, 292, 293,
2603 294, 295, 296, 274, 278, 267, 268, 268,
2604 268, 268, 267, 268, 267, 268, 268, 267,
2605 267, 267, 268, 267, 267, 267, 268, 268,
2606 268, 268, 267, 267, 267, 267, 267, 267,
2607 268, 267, 267, 267, 267, 267, 267, 268,
2608 267, 267, 267, 267, 268, 268, 268, 268,
2609 267, 268, 268, 268, 268, 268, 267, 268,
2610 268, 267, 268, 268, 268, 268, 267, 268,
2611 268, 267, 267, 267, 267, 267, 267, 268,
2612 268, 268, 268, 268, 268, 267, 268, 268,
2613 267, 267, 267, 267, 267, 267, 268, 268,
2614 267, 268, 268, 268, 268, 268, 267, 268,
2615 268, 267, 268, 267, 268, 268, 268, 267,
2616 268, 267, 268, 268, 268, 268, 268, 267,
2617 268, 267, 268, 268, 268, 268, 267, 268,
2618 267, 297, 298, 299, 300, 301, 302, 303,
2619 304, 305, 306, 307, 308, 309, 310, 311,
2620 312, 313, 314, 315, 316, 317, 267, 268,
2621 268, 267, 268, 268, 268, 267, 268, 268,
2622 268, 268, 267, 268, 267, 268, 268, 267,
2623 268, 268, 267, 268, 267, 267, 267, 268,
2624 268, 267, 268, 268, 267, 268, 268, 267,
2625 268, 267, 268, 268, 268, 268, 268, 267,
2626 268, 267, 268, 268, 267, 267, 267, 268,
2627 268, 268, 267, 268, 267, 268, 267, 268,
2628 268, 268, 268, 268, 267, 268, 268, 267,
2629 318, 319, 320, 321, 322, 267, 268, 323,
2630 267, 318, 319, 324, 320, 321, 322, 267,
2631 268, 267, 268, 267, 268, 267, 268, 267,
2632 268, 267, 325, 326, 267, 268, 267, 268,
2633 267, 327, 328, 329, 330, 331, 332, 333,
2634 334, 335, 336, 337, 338, 339, 340, 341,
2635 267, 268, 268, 267, 268, 267, 268, 267,
2636 268, 268, 268, 268, 267, 268, 268, 267,
2637 267, 267, 268, 268, 267, 268, 267, 268,
2638 268, 267, 267, 267, 268, 268, 267, 268,
2639 268, 268, 267, 268, 268, 268, 268, 267,
2640 268, 268, 268, 267, 268, 268, 267, 342,
2641 343, 328, 267, 268, 267, 268, 268, 267,
2642 344, 345, 346, 347, 348, 349, 350, 267,
2643 351, 352, 353, 354, 355, 267, 268, 267,
2644 268, 267, 268, 267, 268, 268, 268, 268,
2645 268, 267, 268, 267, 356, 357, 358, 359,
2646 360, 361, 362, 363, 364, 365, 366, 367,
2647 368, 369, 370, 371, 372, 369, 373, 374,
2648 375, 376, 377, 267, 268, 268, 267, 267,
2649 268, 267, 267, 268, 268, 268, 267, 268,
2650 267, 268, 268, 267, 267, 267, 268, 268,
2651 268, 267, 268, 267, 268, 268, 268, 267,
2652 268, 268, 268, 268, 268, 268, 268, 267,
2653 268, 267, 268, 267, 268, 267, 267, 268,
2654 268, 268, 267, 267, 267, 268, 267, 268,
2655 268, 267, 268, 267, 268, 268, 267, 268,
2656 268, 267, 378, 379, 380, 381, 267, 268,
2657 267, 268, 267, 268, 267, 268, 267, 382,
2658 267, 268, 267, 383, 384, 385, 386, 387,
2659 388, 267, 268, 268, 268, 267, 267, 267,
2660 267, 268, 268, 267, 268, 268, 267, 267,
2661 267, 268, 268, 268, 268, 267, 389, 390,
2662 391, 267, 268, 268, 268, 268, 268, 267,
2663 268, 267, 268, 267, 392, 393, 394, 267,
2664 395, 267, 395, 267, 267, 395, 395, 267,
2665 395, 395, 267, 395, 395, 395, 267, 395,
2666 267, 395, 395, 267, 395, 395, 395, 395,
2667 267, 395, 395, 267, 267, 395, 395, 267,
2668 395, 267, 396, 397, 398, 399, 400, 401,
2669 402, 404, 405, 406, 407, 408, 409, 410,
2670 411, 412, 413, 414, 415, 287, 416, 417,
2671 418, 419, 420, 421, 422, 423, 424, 403,
2672 267, 395, 395, 395, 395, 267, 395, 267,
2673 395, 395, 267, 268, 268, 267, 267, 268,
2674 395, 395, 267, 395, 395, 267, 395, 267,
2675 268, 395, 395, 395, 268, 268, 267, 395,
2676 395, 395, 267, 267, 267, 395, 267, 268,
2677 268, 395, 395, 268, 267, 395, 395, 395,
2678 267, 395, 267, 395, 267, 395, 267, 268,
2679 267, 267, 395, 395, 267, 395, 267, 268,
2680 395, 395, 268, 395, 267, 268, 395, 395,
2681 268, 268, 395, 395, 267, 395, 395, 268,
2682 267, 395, 395, 395, 268, 268, 268, 267,
2683 395, 268, 395, 267, 267, 267, 268, 267,
2684 267, 267, 395, 395, 395, 268, 395, 268,
2685 267, 395, 395, 268, 268, 268, 395, 395,
2686 395, 267, 395, 395, 268, 268, 267, 267,
2687 267, 395, 395, 395, 267, 395, 267, 268,
2688 395, 395, 395, 395, 268, 395, 268, 268,
2689 267, 395, 268, 395, 267, 395, 267, 395,
2690 268, 395, 395, 267, 395, 267, 395, 395,
2691 395, 395, 268, 267, 268, 395, 267, 395,
2692 395, 395, 395, 267, 395, 267, 425, 426,
2693 427, 428, 429, 430, 431, 432, 433, 434,
2694 435, 436, 437, 438, 439, 440, 441, 442,
2695 443, 444, 445, 267, 268, 395, 395, 268,
2696 395, 267, 268, 395, 395, 395, 267, 395,
2697 268, 395, 395, 395, 267, 395, 267, 395,
2698 395, 267, 395, 395, 267, 268, 395, 268,
2699 267, 395, 395, 395, 267, 268, 395, 267,
2700 395, 395, 267, 395, 395, 268, 395, 268,
2701 268, 395, 267, 395, 395, 268, 267, 395,
2702 395, 395, 395, 268, 395, 395, 268, 395,
2703 267, 395, 267, 268, 268, 268, 395, 395,
2704 268, 267, 395, 267, 395, 267, 268, 268,
2705 268, 268, 395, 395, 268, 395, 267, 268,
2706 395, 395, 268, 395, 268, 267, 268, 395,
2707 268, 395, 267, 268, 395, 395, 395, 395,
2708 268, 395, 267, 395, 395, 267, 446, 447,
2709 448, 449, 450, 267, 395, 323, 267, 395,
2710 267, 395, 267, 395, 267, 395, 267, 451,
2711 452, 267, 395, 267, 395, 267, 453, 454,
2712 455, 456, 331, 457, 458, 459, 460, 461,
2713 462, 463, 464, 465, 466, 267, 395, 395,
2714 267, 395, 267, 395, 267, 395, 395, 395,
2715 268, 268, 395, 267, 395, 267, 395, 267,
2716 268, 395, 267, 395, 268, 267, 268, 395,
2717 395, 395, 268, 395, 268, 267, 395, 267,
2718 268, 395, 268, 395, 268, 395, 267, 395,
2719 395, 268, 395, 267, 395, 395, 395, 395,
2720 267, 395, 268, 268, 395, 395, 268, 267,
2721 395, 395, 268, 395, 268, 267, 467, 468,
2722 454, 267, 395, 267, 395, 395, 267, 469,
2723 470, 471, 472, 473, 474, 475, 267, 476,
2724 477, 478, 479, 480, 267, 395, 267, 395,
2725 267, 395, 267, 395, 395, 395, 395, 395,
2726 267, 395, 267, 481, 482, 483, 484, 485,
2727 486, 487, 488, 489, 490, 491, 492, 493,
2728 494, 495, 496, 497, 498, 499, 500, 501,
2729 502, 503, 267, 395, 268, 395, 267, 267,
2730 395, 268, 267, 268, 268, 267, 395, 268,
2731 395, 395, 267, 395, 267, 268, 395, 268,
2732 395, 268, 267, 267, 395, 267, 268, 395,
2733 395, 268, 395, 268, 395, 267, 395, 268,
2734 395, 267, 395, 395, 268, 395, 268, 267,
2735 395, 395, 268, 268, 268, 268, 395, 395,
2736 267, 268, 395, 267, 268, 268, 395, 267,
2737 395, 268, 395, 268, 395, 268, 395, 267,
2738 268, 267, 395, 395, 268, 268, 395, 268,
2739 395, 267, 267, 267, 395, 395, 268, 395,
2740 268, 395, 267, 267, 395, 268, 268, 395,
2741 268, 395, 267, 268, 395, 268, 395, 267,
2742 268, 268, 395, 395, 267, 268, 268, 268,
2743 395, 395, 267, 504, 505, 380, 506, 267,
2744 395, 267, 395, 267, 395, 267, 507, 267,
2745 395, 267, 508, 509, 510, 511, 512, 513,
2746 267, 268, 268, 395, 395, 395, 267, 267,
2747 267, 267, 395, 395, 267, 395, 395, 267,
2748 267, 267, 395, 395, 395, 395, 267, 514,
2749 515, 516, 267, 395, 395, 395, 395, 395,
2750 267, 395, 267, 395, 267, 517, 267, 268,
2751 267, 518, 267, 519, 520, 521, 523, 522,
2752 267, 395, 267, 267, 395, 395, 268, 267,
2753 268, 267, 524, 267, 525, 526, 527, 529,
2754 528, 267, 268, 267, 267, 268, 268, 344,
2755 345, 346, 347, 348, 349, 267, 268, 267,
2756 268, 268, 267, 266, 268, 268, 267, 266,
2757 268, 267, 266, 268, 267, 531, 532, 530,
2758 267, 266, 268, 267, 266, 268, 267, 533,
2759 534, 535, 536, 537, 530, 267, 538, 267,
2760 297, 298, 299, 533, 534, 539, 300, 301,
2761 302, 303, 304, 305, 306, 307, 308, 309,
2762 310, 311, 312, 313, 314, 315, 316, 317,
2763 267, 540, 538, 297, 298, 299, 541, 535,
2764 536, 300, 301, 302, 303, 304, 305, 306,
2765 307, 308, 309, 310, 311, 312, 313, 314,
2766 315, 316, 317, 267, 540, 267, 542, 540,
2767 297, 298, 299, 543, 536, 300, 301, 302,
2768 303, 304, 305, 306, 307, 308, 309, 310,
2769 311, 312, 313, 314, 315, 316, 317, 267,
2770 542, 267, 267, 542, 544, 267, 542, 267,
2771 545, 546, 267, 540, 267, 267, 542, 267,
2772 540, 267, 540, 327, 328, 329, 330, 331,
2773 332, 333, 547, 335, 336, 337, 338, 339,
2774 340, 341, 549, 550, 551, 552, 553, 554,
2775 549, 550, 551, 552, 553, 554, 549, 548,
2776 555, 267, 268, 538, 267, 556, 556, 556,
2777 542, 267, 297, 298, 299, 541, 539, 300,
2778 301, 302, 303, 304, 305, 306, 307, 308,
2779 309, 310, 311, 312, 313, 314, 315, 316,
2780 317, 267, 545, 557, 267, 267, 540, 556,
2781 556, 542, 556, 556, 542, 556, 556, 556,
2782 542, 556, 556, 542, 556, 556, 542, 556,
2783 556, 267, 542, 542, 551, 552, 553, 554,
2784 548, 549, 551, 552, 553, 554, 548, 549,
2785 551, 552, 553, 554, 548, 549, 551, 552,
2786 553, 554, 548, 549, 551, 552, 553, 554,
2787 548, 549, 551, 552, 553, 554, 548, 549,
2788 551, 552, 553, 554, 548, 549, 551, 552,
2789 553, 554, 548, 549, 551, 552, 553, 554,
2790 548, 549, 550, 555, 552, 553, 554, 548,
2791 549, 550, 552, 553, 554, 548, 549, 550,
2792 552, 553, 554, 548, 549, 550, 552, 553,
2793 554, 548, 549, 550, 552, 553, 554, 548,
2794 549, 550, 552, 553, 554, 548, 549, 550,
2795 552, 553, 554, 548, 549, 550, 552, 553,
2796 554, 548, 549, 550, 552, 553, 554, 548,
2797 549, 550, 551, 555, 553, 554, 548, 549,
2798 550, 551, 553, 554, 548, 549, 550, 551,
2799 553, 554, 548, 549, 550, 551, 553, 554,
2800 548, 549, 550, 551, 553, 558, 557, 552,
2801 267, 555, 556, 267, 540, 542, 268, 268,
2802 267, 559, 560, 561, 562, 563, 530, 267,
2803 268, 323, 268, 268, 268, 267, 268, 268,
2804 267, 395, 268, 267, 395, 268, 267, 268,
2805 395, 268, 267, 530, 267, 564, 566, 567,
2806 568, 569, 570, 571, 566, 567, 568, 569,
2807 570, 571, 566, 530, 565, 555, 267, 268,
2808 538, 268, 267, 540, 540, 540, 542, 267,
2809 540, 540, 542, 540, 540, 542, 540, 540,
2810 540, 542, 540, 540, 542, 540, 540, 542,
2811 540, 540, 267, 542, 568, 569, 570, 571,
2812 565, 566, 568, 569, 570, 571, 565, 566,
2813 568, 569, 570, 571, 565, 566, 568, 569,
2814 570, 571, 565, 566, 568, 569, 570, 571,
2815 565, 566, 568, 569, 570, 571, 565, 566,
2816 568, 569, 570, 571, 565, 566, 568, 569,
2817 570, 571, 565, 566, 568, 569, 570, 571,
2818 565, 566, 567, 555, 569, 570, 571, 565,
2819 566, 567, 569, 570, 571, 565, 566, 567,
2820 569, 570, 571, 565, 566, 567, 569, 570,
2821 571, 565, 566, 567, 569, 570, 571, 565,
2822 566, 567, 569, 570, 571, 565, 566, 567,
2823 569, 570, 571, 565, 566, 567, 569, 570,
2824 571, 565, 566, 567, 569, 570, 571, 565,
2825 566, 567, 568, 555, 570, 571, 565, 566,
2826 567, 568, 570, 571, 565, 566, 567, 568,
2827 570, 571, 565, 566, 567, 568, 570, 571,
2828 565, 566, 567, 568, 570, 572, 573, 569,
2829 267, 555, 540, 268, 540, 542, 268, 542,
2830 268, 267, 540, 574, 575, 530, 267, 268,
2831 267, 268, 268, 268, 267, 577, 578, 579,
2832 580, 576, 267, 581, 582, 530, 267, 266,
2833 268, 267, 268, 266, 268, 267, 583, 530,
2834 267, 268, 268, 267, 584, 530, 267, 268,
2835 268, 267, 585, 586, 587, 588, 589, 590,
2836 591, 592, 593, 594, 595, 530, 267, 268,
2837 596, 267, 344, 345, 346, 347, 348, 349,
2838 597, 267, 598, 267, 268, 267, 395, 268,
2839 267, 268, 395, 268, 395, 268, 267, 395,
2840 395, 268, 395, 268, 395, 268, 395, 268,
2841 395, 268, 267, 268, 268, 395, 395, 268,
2842 267, 395, 395, 268, 267, 395, 268, 395,
2843 268, 267, 268, 395, 268, 395, 268, 267,
2844 395, 268, 395, 268, 267, 395, 268, 267,
2845 395, 395, 268, 268, 395, 268, 395, 268,
2846 395, 267, 576, 267, 599, 576, 267, 322,
2847 530, 600, 530, 267, 268, 267, 266, 3,
2848 1, 266, 3, 1, 602, 603, 601, 1,
2849 266, 3, 1, 266, 3, 1, 604, 605,
2850 606, 607, 608, 601, 1, 609, 610, 612,
2851 611, 611, 612, 612, 611, 612, 612, 611,
2852 612, 612, 612, 611, 612, 611, 612, 612,
2853 611, 612, 612, 612, 612, 611, 612, 612,
2854 611, 611, 612, 612, 611, 612, 611, 613,
2855 614, 615, 616, 617, 619, 620, 621, 623,
2856 624, 625, 626, 627, 628, 629, 630, 631,
2857 632, 633, 634, 635, 636, 637, 638, 639,
2858 640, 618, 622, 611, 612, 612, 612, 612,
2859 611, 612, 611, 612, 612, 611, 611, 611,
2860 612, 611, 611, 611, 612, 612, 612, 612,
2861 611, 611, 611, 611, 611, 611, 612, 611,
2862 611, 611, 611, 611, 611, 612, 611, 611,
2863 611, 611, 612, 612, 612, 612, 611, 612,
2864 612, 612, 612, 612, 611, 612, 612, 611,
2865 612, 612, 612, 612, 611, 612, 612, 611,
2866 611, 611, 611, 611, 611, 612, 612, 612,
2867 612, 612, 612, 611, 612, 612, 611, 611,
2868 611, 611, 611, 611, 612, 612, 611, 612,
2869 612, 612, 612, 612, 611, 612, 612, 611,
2870 612, 611, 612, 612, 612, 611, 612, 611,
2871 612, 612, 612, 612, 612, 611, 612, 611,
2872 612, 612, 612, 612, 611, 612, 611, 641,
2873 642, 643, 644, 645, 646, 647, 648, 649,
2874 650, 651, 652, 653, 654, 655, 656, 657,
2875 658, 659, 660, 661, 611, 612, 612, 611,
2876 612, 612, 612, 611, 612, 612, 612, 612,
2877 611, 612, 611, 612, 612, 611, 612, 612,
2878 611, 612, 611, 611, 611, 612, 612, 611,
2879 612, 612, 611, 612, 612, 611, 612, 611,
2880 612, 612, 612, 612, 612, 611, 612, 611,
2881 612, 612, 611, 611, 611, 612, 612, 612,
2882 611, 612, 611, 612, 611, 612, 612, 612,
2883 612, 612, 611, 612, 612, 611, 662, 663,
2884 664, 665, 666, 611, 612, 667, 611, 662,
2885 663, 668, 664, 665, 666, 611, 612, 611,
2886 612, 611, 612, 611, 612, 611, 612, 611,
2887 669, 670, 611, 612, 611, 612, 611, 671,
2888 672, 673, 674, 675, 676, 677, 678, 679,
2889 680, 681, 682, 683, 684, 685, 611, 612,
2890 612, 611, 612, 611, 612, 611, 612, 612,
2891 612, 612, 611, 612, 612, 611, 611, 611,
2892 612, 612, 611, 612, 611, 612, 612, 611,
2893 611, 611, 612, 612, 611, 612, 612, 612,
2894 611, 612, 612, 612, 612, 611, 612, 612,
2895 612, 611, 612, 612, 611, 686, 687, 672,
2896 611, 612, 611, 612, 612, 611, 688, 689,
2897 690, 691, 692, 693, 694, 611, 695, 696,
2898 697, 698, 699, 611, 612, 611, 612, 611,
2899 612, 611, 612, 612, 612, 612, 612, 611,
2900 612, 611, 700, 701, 702, 703, 704, 705,
2901 706, 707, 708, 709, 710, 711, 712, 713,
2902 714, 715, 716, 713, 717, 718, 719, 720,
2903 721, 611, 612, 612, 611, 611, 612, 611,
2904 611, 612, 612, 612, 611, 612, 611, 612,
2905 612, 611, 611, 611, 612, 612, 612, 611,
2906 612, 611, 612, 612, 612, 611, 612, 612,
2907 612, 612, 612, 612, 612, 611, 612, 611,
2908 612, 611, 612, 611, 611, 612, 612, 612,
2909 611, 611, 611, 612, 611, 612, 612, 611,
2910 612, 611, 612, 612, 611, 612, 612, 611,
2911 722, 723, 724, 725, 611, 612, 611, 612,
2912 611, 612, 611, 612, 611, 726, 611, 612,
2913 611, 727, 728, 729, 730, 731, 732, 611,
2914 612, 612, 612, 611, 611, 611, 611, 612,
2915 612, 611, 612, 612, 611, 611, 611, 612,
2916 612, 612, 612, 611, 733, 734, 735, 611,
2917 612, 612, 612, 612, 612, 611, 612, 611,
2918 612, 611, 736, 737, 738, 611, 739, 611,
2919 739, 611, 611, 739, 739, 611, 739, 739,
2920 611, 739, 739, 739, 611, 739, 611, 739,
2921 739, 611, 739, 739, 739, 739, 611, 739,
2922 739, 611, 611, 739, 739, 611, 739, 611,
2923 740, 741, 742, 743, 744, 745, 746, 748,
2924 749, 750, 751, 752, 753, 754, 755, 756,
2925 757, 758, 759, 631, 760, 761, 762, 763,
2926 764, 765, 766, 767, 768, 747, 611, 739,
2927 739, 739, 739, 611, 739, 611, 739, 739,
2928 611, 612, 612, 611, 611, 612, 739, 739,
2929 611, 739, 739, 611, 739, 611, 612, 739,
2930 739, 739, 612, 612, 611, 739, 739, 739,
2931 611, 611, 611, 739, 611, 612, 612, 739,
2932 739, 612, 611, 739, 739, 739, 611, 739,
2933 611, 739, 611, 739, 611, 612, 611, 611,
2934 739, 739, 611, 739, 611, 612, 739, 739,
2935 612, 739, 611, 612, 739, 739, 612, 612,
2936 739, 739, 611, 739, 739, 612, 611, 739,
2937 739, 739, 612, 612, 612, 611, 739, 612,
2938 739, 611, 611, 611, 612, 611, 611, 611,
2939 739, 739, 739, 612, 739, 612, 611, 739,
2940 739, 612, 612, 612, 739, 739, 739, 611,
2941 739, 739, 612, 612, 611, 611, 611, 739,
2942 739, 739, 611, 739, 611, 612, 739, 739,
2943 739, 739, 612, 739, 612, 612, 611, 739,
2944 612, 739, 611, 739, 611, 739, 612, 739,
2945 739, 611, 739, 611, 739, 739, 739, 739,
2946 612, 611, 612, 739, 611, 739, 739, 739,
2947 739, 611, 739, 611, 769, 770, 771, 772,
2948 773, 774, 775, 776, 777, 778, 779, 780,
2949 781, 782, 783, 784, 785, 786, 787, 788,
2950 789, 611, 612, 739, 739, 612, 739, 611,
2951 612, 739, 739, 739, 611, 739, 612, 739,
2952 739, 739, 611, 739, 611, 739, 739, 611,
2953 739, 739, 611, 612, 739, 612, 611, 739,
2954 739, 739, 611, 612, 739, 611, 739, 739,
2955 611, 739, 739, 612, 739, 612, 612, 739,
2956 611, 739, 739, 612, 611, 739, 739, 739,
2957 739, 612, 739, 739, 612, 739, 611, 739,
2958 611, 612, 612, 612, 739, 739, 612, 611,
2959 739, 611, 739, 611, 612, 612, 612, 612,
2960 739, 739, 612, 739, 611, 612, 739, 739,
2961 612, 739, 612, 611, 612, 739, 612, 739,
2962 611, 612, 739, 739, 739, 739, 612, 739,
2963 611, 739, 739, 611, 790, 791, 792, 793,
2964 794, 611, 739, 667, 611, 739, 611, 739,
2965 611, 739, 611, 739, 611, 795, 796, 611,
2966 739, 611, 739, 611, 797, 798, 799, 800,
2967 675, 801, 802, 803, 804, 805, 806, 807,
2968 808, 809, 810, 611, 739, 739, 611, 739,
2969 611, 739, 611, 739, 739, 739, 612, 612,
2970 739, 611, 739, 611, 739, 611, 612, 739,
2971 611, 739, 612, 611, 612, 739, 739, 739,
2972 612, 739, 612, 611, 739, 611, 612, 739,
2973 612, 739, 612, 739, 611, 739, 739, 612,
2974 739, 611, 739, 739, 739, 739, 611, 739,
2975 612, 612, 739, 739, 612, 611, 739, 739,
2976 612, 739, 612, 611, 811, 812, 798, 611,
2977 739, 611, 739, 739, 611, 813, 814, 815,
2978 816, 817, 818, 819, 611, 820, 821, 822,
2979 823, 824, 611, 739, 611, 739, 611, 739,
2980 611, 739, 739, 739, 739, 739, 611, 739,
2981 611, 825, 826, 827, 828, 829, 830, 831,
2982 832, 833, 834, 835, 836, 837, 838, 839,
2983 840, 841, 842, 843, 844, 845, 846, 847,
2984 611, 739, 612, 739, 611, 611, 739, 612,
2985 611, 612, 612, 611, 739, 612, 739, 739,
2986 611, 739, 611, 612, 739, 612, 739, 612,
2987 611, 611, 739, 611, 612, 739, 739, 612,
2988 739, 612, 739, 611, 739, 612, 739, 611,
2989 739, 739, 612, 739, 612, 611, 739, 739,
2990 612, 612, 612, 612, 739, 739, 611, 612,
2991 739, 611, 612, 612, 739, 611, 739, 612,
2992 739, 612, 739, 612, 739, 611, 612, 611,
2993 739, 739, 612, 612, 739, 612, 739, 611,
2994 611, 611, 739, 739, 612, 739, 612, 739,
2995 611, 611, 739, 612, 612, 739, 612, 739,
2996 611, 612, 739, 612, 739, 611, 612, 612,
2997 739, 739, 611, 612, 612, 612, 739, 739,
2998 611, 848, 849, 724, 850, 611, 739, 611,
2999 739, 611, 739, 611, 851, 611, 739, 611,
3000 852, 853, 854, 855, 856, 857, 611, 612,
3001 612, 739, 739, 739, 611, 611, 611, 611,
3002 739, 739, 611, 739, 739, 611, 611, 611,
3003 739, 739, 739, 739, 611, 858, 859, 860,
3004 611, 739, 739, 739, 739, 739, 611, 739,
3005 611, 739, 611, 861, 611, 612, 611, 862,
3006 611, 863, 864, 865, 867, 866, 611, 739,
3007 611, 611, 739, 739, 612, 611, 612, 611,
3008 868, 611, 869, 870, 871, 873, 872, 611,
3009 612, 611, 611, 612, 612, 688, 689, 690,
3010 691, 692, 693, 611, 641, 642, 643, 604,
3011 605, 874, 644, 645, 646, 647, 648, 649,
3012 650, 651, 652, 653, 654, 655, 656, 657,
3013 658, 659, 660, 661, 611, 875, 610, 641,
3014 642, 643, 876, 606, 607, 644, 645, 646,
3015 647, 648, 649, 650, 651, 652, 653, 654,
3016 655, 656, 657, 658, 659, 660, 661, 611,
3017 875, 611, 877, 875, 641, 642, 643, 878,
3018 607, 644, 645, 646, 647, 648, 649, 650,
3019 651, 652, 653, 654, 655, 656, 657, 658,
3020 659, 660, 661, 611, 877, 611, 609, 877,
3021 879, 611, 877, 611, 880, 881, 611, 875,
3022 611, 611, 877, 611, 875, 611, 875, 671,
3023 672, 673, 674, 675, 676, 677, 882, 679,
3024 680, 681, 682, 683, 684, 685, 884, 885,
3025 886, 887, 888, 889, 884, 885, 886, 887,
3026 888, 889, 884, 883, 890, 611, 612, 610,
3027 611, 891, 891, 891, 877, 611, 641, 642,
3028 643, 876, 874, 644, 645, 646, 647, 648,
3029 649, 650, 651, 652, 653, 654, 655, 656,
3030 657, 658, 659, 660, 661, 611, 880, 892,
3031 611, 611, 875, 891, 891, 877, 891, 891,
3032 877, 891, 891, 891, 877, 891, 891, 877,
3033 891, 891, 877, 891, 891, 611, 877, 877,
3034 886, 887, 888, 889, 883, 884, 886, 887,
3035 888, 889, 883, 884, 886, 887, 888, 889,
3036 883, 884, 886, 887, 888, 889, 883, 884,
3037 886, 887, 888, 889, 883, 884, 886, 887,
3038 888, 889, 883, 884, 886, 887, 888, 889,
3039 883, 884, 886, 887, 888, 889, 883, 884,
3040 886, 887, 888, 889, 883, 884, 885, 890,
3041 887, 888, 889, 883, 884, 885, 887, 888,
3042 889, 883, 884, 885, 887, 888, 889, 883,
3043 884, 885, 887, 888, 889, 883, 884, 885,
3044 887, 888, 889, 883, 884, 885, 887, 888,
3045 889, 883, 884, 885, 887, 888, 889, 883,
3046 884, 885, 887, 888, 889, 883, 884, 885,
3047 887, 888, 889, 883, 884, 885, 886, 890,
3048 888, 889, 883, 884, 885, 886, 888, 889,
3049 883, 884, 885, 886, 888, 889, 883, 884,
3050 885, 886, 888, 889, 883, 884, 885, 886,
3051 888, 893, 892, 887, 611, 890, 891, 611,
3052 875, 877, 265, 3, 1, 894, 895, 896,
3053 897, 898, 601, 1, 265, 899, 3, 265,
3054 3, 265, 3, 1, 901, 900, 900, 901,
3055 901, 900, 901, 901, 900, 901, 901, 901,
3056 900, 901, 900, 901, 901, 900, 901, 901,
3057 901, 901, 900, 901, 901, 900, 900, 901,
3058 901, 900, 901, 900, 902, 903, 904, 905,
3059 906, 908, 909, 910, 912, 913, 914, 915,
3060 916, 917, 918, 919, 920, 921, 922, 923,
3061 924, 925, 926, 927, 928, 929, 907, 911,
3062 900, 901, 901, 901, 901, 900, 901, 900,
3063 901, 901, 900, 900, 900, 901, 900, 900,
3064 900, 901, 901, 901, 901, 900, 900, 900,
3065 900, 900, 900, 901, 900, 900, 900, 900,
3066 900, 900, 901, 900, 900, 900, 900, 901,
3067 901, 901, 901, 900, 901, 901, 901, 901,
3068 901, 900, 901, 901, 900, 901, 901, 901,
3069 901, 900, 901, 901, 900, 900, 900, 900,
3070 900, 900, 901, 901, 901, 901, 901, 901,
3071 900, 901, 901, 900, 900, 900, 900, 900,
3072 900, 901, 901, 900, 901, 901, 901, 901,
3073 901, 900, 901, 901, 900, 901, 900, 901,
3074 901, 901, 900, 901, 900, 901, 901, 901,
3075 901, 901, 900, 901, 900, 901, 901, 901,
3076 901, 900, 901, 900, 930, 931, 932, 933,
3077 934, 935, 936, 937, 938, 939, 940, 941,
3078 942, 943, 944, 945, 946, 947, 948, 949,
3079 950, 900, 901, 901, 900, 901, 901, 901,
3080 900, 901, 901, 901, 901, 900, 901, 900,
3081 901, 901, 900, 901, 901, 900, 901, 900,
3082 900, 900, 901, 901, 900, 901, 901, 900,
3083 901, 901, 900, 901, 900, 901, 901, 901,
3084 901, 901, 900, 901, 900, 901, 901, 900,
3085 900, 900, 901, 901, 901, 900, 901, 900,
3086 901, 900, 901, 901, 901, 901, 901, 900,
3087 901, 901, 900, 951, 952, 953, 954, 955,
3088 900, 901, 899, 900, 901, 900, 901, 900,
3089 901, 900, 901, 900, 956, 957, 900, 901,
3090 900, 901, 900, 958, 959, 960, 961, 962,
3091 963, 964, 965, 966, 967, 968, 969, 970,
3092 971, 972, 900, 901, 901, 900, 901, 900,
3093 901, 900, 901, 901, 901, 901, 900, 901,
3094 901, 900, 900, 900, 901, 901, 900, 901,
3095 900, 901, 901, 900, 900, 900, 901, 901,
3096 900, 901, 901, 901, 900, 901, 901, 901,
3097 901, 900, 901, 901, 901, 900, 901, 901,
3098 900, 973, 974, 959, 900, 901, 900, 901,
3099 901, 900, 975, 976, 977, 978, 979, 980,
3100 900, 981, 982, 983, 984, 985, 900, 901,
3101 900, 901, 900, 901, 900, 901, 901, 901,
3102 901, 901, 900, 901, 900, 986, 987, 988,
3103 989, 990, 991, 992, 993, 994, 995, 996,
3104 997, 998, 999, 1000, 1001, 1002, 999, 1003,
3105 1004, 1005, 1006, 1007, 900, 901, 901, 900,
3106 900, 901, 900, 900, 901, 901, 901, 900,
3107 901, 900, 901, 901, 900, 900, 900, 901,
3108 901, 901, 900, 901, 900, 901, 901, 901,
3109 900, 901, 901, 901, 901, 901, 901, 901,
3110 900, 901, 900, 901, 900, 901, 900, 900,
3111 901, 901, 901, 900, 900, 900, 901, 900,
3112 901, 901, 900, 901, 900, 901, 901, 900,
3113 901, 901, 900, 1008, 1009, 1010, 1011, 900,
3114 901, 900, 901, 900, 901, 900, 901, 900,
3115 1012, 900, 901, 900, 1013, 1014, 1015, 1016,
3116 1017, 1018, 900, 901, 901, 901, 900, 900,
3117 900, 900, 901, 901, 900, 901, 901, 900,
3118 900, 900, 901, 901, 901, 901, 900, 1019,
3119 1020, 1021, 900, 901, 901, 901, 901, 901,
3120 900, 901, 900, 901, 900, 1022, 900, 1023,
3121 1024, 1025, 1027, 1026, 900, 901, 900, 900,
3122 901, 901, 951, 952, 1028, 953, 954, 955,
3123 900, 901, 900, 975, 976, 977, 978, 979,
3124 980, 1029, 900, 1030, 1031, 1032, 900, 1033,
3125 900, 1033, 900, 900, 1033, 1033, 900, 1033,
3126 1033, 900, 1033, 1033, 1033, 900, 1033, 900,
3127 1033, 1033, 900, 1033, 1033, 1033, 1033, 900,
3128 1033, 1033, 900, 900, 1033, 1033, 900, 1033,
3129 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040,
3130 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049,
3131 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056,
3132 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900,
3133 1033, 1033, 1033, 1033, 900, 1033, 900, 1033,
3134 1033, 900, 901, 901, 900, 900, 901, 1033,
3135 1033, 900, 1033, 1033, 900, 1033, 900, 901,
3136 1033, 1033, 1033, 901, 901, 900, 1033, 1033,
3137 1033, 900, 900, 900, 1033, 900, 901, 901,
3138 1033, 1033, 901, 900, 1033, 1033, 1033, 900,
3139 1033, 900, 1033, 900, 1033, 900, 901, 900,
3140 900, 1033, 1033, 900, 1033, 900, 901, 1033,
3141 1033, 901, 1033, 900, 901, 1033, 1033, 901,
3142 901, 1033, 1033, 900, 1033, 1033, 901, 900,
3143 1033, 1033, 1033, 901, 901, 901, 900, 1033,
3144 901, 1033, 900, 900, 900, 901, 900, 900,
3145 900, 1033, 1033, 1033, 901, 1033, 901, 900,
3146 1033, 1033, 901, 901, 901, 1033, 1033, 1033,
3147 900, 1033, 1033, 901, 901, 900, 900, 900,
3148 1033, 1033, 1033, 900, 1033, 900, 901, 1033,
3149 1033, 1033, 1033, 901, 1033, 901, 901, 900,
3150 1033, 901, 1033, 900, 1033, 900, 1033, 901,
3151 1033, 1033, 900, 1033, 900, 1033, 1033, 1033,
3152 1033, 901, 900, 901, 1033, 900, 1033, 1033,
3153 1033, 1033, 900, 1033, 900, 1063, 1064, 1065,
3154 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073,
3155 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081,
3156 1082, 1083, 900, 901, 1033, 1033, 901, 1033,
3157 900, 901, 1033, 1033, 1033, 900, 1033, 901,
3158 1033, 1033, 1033, 900, 1033, 900, 1033, 1033,
3159 900, 1033, 1033, 900, 901, 1033, 901, 900,
3160 1033, 1033, 1033, 900, 901, 1033, 900, 1033,
3161 1033, 900, 1033, 1033, 901, 1033, 901, 901,
3162 1033, 900, 1033, 1033, 901, 900, 1033, 1033,
3163 1033, 1033, 901, 1033, 1033, 901, 1033, 900,
3164 1033, 900, 901, 901, 901, 1033, 1033, 901,
3165 900, 1033, 900, 1033, 900, 901, 901, 901,
3166 901, 1033, 1033, 901, 1033, 900, 901, 1033,
3167 1033, 901, 1033, 901, 900, 901, 1033, 901,
3168 1033, 900, 901, 1033, 1033, 1033, 1033, 901,
3169 1033, 900, 1033, 1033, 900, 1084, 1085, 1086,
3170 1087, 1088, 900, 1033, 899, 900, 1033, 900,
3171 1033, 900, 1033, 900, 1033, 900, 1089, 1090,
3172 900, 1033, 900, 1033, 900, 1091, 1092, 1093,
3173 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100,
3174 1101, 1102, 1103, 1104, 900, 1033, 1033, 900,
3175 1033, 900, 1033, 900, 1033, 1033, 1033, 901,
3176 901, 1033, 900, 1033, 900, 1033, 900, 901,
3177 1033, 900, 1033, 901, 900, 901, 1033, 1033,
3178 1033, 901, 1033, 901, 900, 1033, 900, 901,
3179 1033, 901, 1033, 901, 1033, 900, 1033, 1033,
3180 901, 1033, 900, 1033, 1033, 1033, 1033, 900,
3181 1033, 901, 901, 1033, 1033, 901, 900, 1033,
3182 1033, 901, 1033, 901, 900, 1105, 1106, 1092,
3183 900, 1033, 900, 1033, 1033, 900, 1107, 1108,
3184 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115,
3185 1116, 1117, 1118, 900, 1033, 900, 1033, 900,
3186 1033, 900, 1033, 1033, 1033, 1033, 1033, 900,
3187 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124,
3188 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132,
3189 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140,
3190 1141, 900, 1033, 901, 1033, 900, 900, 1033,
3191 901, 900, 901, 901, 900, 1033, 901, 1033,
3192 1033, 900, 1033, 900, 901, 1033, 901, 1033,
3193 901, 900, 900, 1033, 900, 901, 1033, 1033,
3194 901, 1033, 901, 1033, 900, 1033, 901, 1033,
3195 900, 1033, 1033, 901, 1033, 901, 900, 1033,
3196 1033, 901, 901, 901, 901, 1033, 1033, 900,
3197 901, 1033, 900, 901, 901, 1033, 900, 1033,
3198 901, 1033, 901, 1033, 901, 1033, 900, 901,
3199 900, 1033, 1033, 901, 901, 1033, 901, 1033,
3200 900, 900, 900, 1033, 1033, 901, 1033, 901,
3201 1033, 900, 900, 1033, 901, 901, 1033, 901,
3202 1033, 900, 901, 1033, 901, 1033, 900, 901,
3203 901, 1033, 1033, 900, 901, 901, 901, 1033,
3204 1033, 900, 1142, 1143, 1010, 1144, 900, 1033,
3205 900, 1033, 900, 1033, 900, 1145, 900, 1033,
3206 900, 1146, 1147, 1148, 1149, 1150, 1151, 900,
3207 901, 901, 1033, 1033, 1033, 900, 900, 900,
3208 900, 1033, 1033, 900, 1033, 1033, 900, 900,
3209 900, 1033, 1033, 1033, 1033, 900, 1152, 1153,
3210 1154, 900, 1033, 1033, 1033, 1033, 1033, 900,
3211 1033, 900, 1033, 900, 1155, 900, 901, 900,
3212 1156, 900, 1157, 1158, 1159, 1161, 1160, 900,
3213 1033, 900, 900, 1033, 1033, 901, 900, 901,
3214 900, 3, 265, 3, 1, 1162, 3, 1,
3215 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162,
3216 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162,
3217 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162,
3218 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163,
3219 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172,
3220 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180,
3221 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188,
3222 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162,
3223 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162,
3224 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194,
3225 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163,
3226 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194,
3227 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194,
3228 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195,
3229 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205,
3230 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184,
3231 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220,
3232 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194,
3233 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163,
3234 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194,
3235 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163,
3236 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163,
3237 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194,
3238 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163,
3239 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163,
3240 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194,
3241 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163,
3242 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194,
3243 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163,
3244 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163,
3245 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163,
3246 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222,
3247 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230,
3248 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238,
3249 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163,
3250 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194,
3251 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194,
3252 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163,
3253 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163,
3254 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163,
3255 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194,
3256 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194,
3257 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244,
3258 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243,
3259 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163,
3260 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163,
3261 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252,
3262 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260,
3263 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194,
3264 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194,
3265 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163,
3266 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163,
3267 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194,
3268 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194,
3269 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253,
3270 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270,
3271 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277,
3272 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163,
3273 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163,
3274 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286,
3275 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294,
3276 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301,
3277 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163,
3278 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194,
3279 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163,
3280 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194,
3281 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163,
3282 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194,
3283 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163,
3284 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163,
3285 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194,
3286 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194,
3287 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163,
3288 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194,
3289 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194,
3290 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163,
3291 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163,
3292 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163,
3293 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322,
3294 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194,
3295 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163,
3296 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194,
3297 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162,
3298 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194,
3299 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163,
3300 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163,
3301 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162,
3302 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194,
3303 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163,
3304 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162,
3305 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163,
3306 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163,
3307 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162,
3308 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163,
3309 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162,
3310 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163,
3311 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194,
3312 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162,
3313 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162,
3314 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328,
3315 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336,
3316 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344,
3317 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162,
3318 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194,
3319 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162,
3320 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163,
3321 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162,
3322 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194,
3323 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162,
3324 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163,
3325 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194,
3326 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194,
3327 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162,
3328 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194,
3329 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194,
3330 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349,
3331 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163,
3332 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353,
3333 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356,
3334 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363,
3335 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163,
3336 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194,
3337 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194,
3338 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162,
3339 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194,
3340 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162,
3341 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163,
3342 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162,
3343 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355,
3344 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371,
3345 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378,
3346 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163,
3347 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163,
3348 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387,
3349 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395,
3350 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403,
3351 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162,
3352 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162,
3353 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162,
3354 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162,
3355 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162,
3356 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162,
3357 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163,
3358 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162,
3359 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194,
3360 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162,
3361 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194,
3362 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194,
3363 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194,
3364 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162,
3365 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162,
3366 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162,
3367 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163,
3368 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163,
3369 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163,
3370 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416,
3371 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163,
3372 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163,
3373 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163,
3374 1162, 1163, 1163, 1162, 1162, 1162, 3, 1,
3375 3, 1162, 3, 1, 601, 1, 1425, 1427,
3376 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429,
3377 1430, 1431, 1432, 1427, 601, 1426, 890, 1,
3378 3, 610, 3, 1, 875, 875, 875, 877,
3379 1, 875, 875, 877, 875, 875, 877, 875,
3380 875, 875, 877, 875, 875, 877, 875, 875,
3381 877, 875, 875, 1, 877, 1429, 1430, 1431,
3382 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426,
3383 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429,
3384 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431,
3385 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426,
3386 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429,
3387 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431,
3388 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432,
3389 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427,
3390 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430,
3391 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432,
3392 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427,
3393 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430,
3394 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432,
3395 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426,
3396 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428,
3397 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431,
3398 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434,
3399 1435, 1437, 1430, 1436, 1, 890, 875, 3,
3400 875, 877, 3, 877, 3, 1, 875, 1,
3401 265, 265, 1, 265, 1438, 1439, 601, 1,
3402 265, 3, 1, 3, 3, 265, 3, 1,
3403 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446,
3404 601, 1, 266, 3, 1, 3, 266, 3,
3405 1, 1447, 601, 1, 3, 265, 3, 1,
3406 1448, 601, 1, 3, 265, 3, 1, 1449,
3407 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457,
3408 1458, 1459, 601, 1, 3, 1460, 1, 1462,
3409 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461,
3410 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462,
3411 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462,
3412 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463,
3413 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473,
3414 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481,
3415 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489,
3416 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462,
3417 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461,
3418 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462,
3419 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461,
3420 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461,
3421 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462,
3422 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461,
3423 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461,
3424 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462,
3425 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461,
3426 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462,
3427 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461,
3428 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461,
3429 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461,
3430 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491,
3431 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499,
3432 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507,
3433 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461,
3434 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462,
3435 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462,
3436 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461,
3437 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461,
3438 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461,
3439 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462,
3440 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462,
3441 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513,
3442 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512,
3443 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461,
3444 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461,
3445 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521,
3446 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529,
3447 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462,
3448 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462,
3449 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461,
3450 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461,
3451 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462,
3452 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462,
3453 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522,
3454 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539,
3455 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546,
3456 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461,
3457 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461,
3458 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555,
3459 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563,
3460 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570,
3461 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461,
3462 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462,
3463 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461,
3464 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462,
3465 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461,
3466 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462,
3467 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461,
3468 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461,
3469 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462,
3470 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462,
3471 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461,
3472 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462,
3473 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462,
3474 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461,
3475 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461,
3476 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461,
3477 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589,
3478 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589,
3479 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589,
3480 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461,
3481 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598,
3482 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606,
3483 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613,
3484 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589,
3485 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589,
3486 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589,
3487 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589,
3488 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589,
3489 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589,
3490 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589,
3491 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461,
3492 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589,
3493 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462,
3494 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589,
3495 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462,
3496 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461,
3497 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589,
3498 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461,
3499 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589,
3500 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589,
3501 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589,
3502 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589,
3503 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589,
3504 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589,
3505 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622,
3506 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630,
3507 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638,
3508 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461,
3509 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589,
3510 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461,
3511 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589,
3512 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589,
3513 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589,
3514 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589,
3515 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589,
3516 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461,
3517 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462,
3518 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589,
3519 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589,
3520 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589,
3521 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643,
3522 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589,
3523 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461,
3524 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650,
3525 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657,
3526 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589,
3527 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462,
3528 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589,
3529 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589,
3530 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589,
3531 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462,
3532 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589,
3533 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589,
3534 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461,
3535 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665,
3536 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672,
3537 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589,
3538 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589,
3539 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681,
3540 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689,
3541 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697,
3542 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462,
3543 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589,
3544 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462,
3545 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462,
3546 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461,
3547 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589,
3548 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462,
3549 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462,
3550 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461,
3551 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461,
3552 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589,
3553 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589,
3554 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462,
3555 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589,
3556 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461,
3557 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461,
3558 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462,
3559 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461,
3560 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461,
3561 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710,
3562 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589,
3563 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712,
3564 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589,
3565 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461,
3566 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461,
3567 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540,
3568 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541,
3569 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461,
3570 1162, 3, 1, 3, 1162, 3, 1162, 3,
3571 1, 1162, 1162, 3, 1162, 3, 1162, 3,
3572 1162, 3, 1162, 3, 1, 3, 3, 1162,
3573 1162, 3, 1, 1162, 1162, 3, 1, 1162,
3574 3, 1162, 3, 1, 3, 1162, 3, 1162,
3575 3, 1, 1162, 3, 1162, 3, 1, 1162,
3576 3, 1, 1162, 1162, 3, 3, 1162, 3,
3577 1162, 3, 1162, 1, 1440, 1, 1726, 1440,
3578 1, 1727, 1435, 1437, 1728, 1437, 601, 1436,
3579 1, 265, 3, 1, 3, 265, 1, 1,
3580 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738,
3581 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748,
3582 1729, 1, 1732, 1740, 1747, 1, 1731, 262,
3583 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756,
3584 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764,
3585 1765, 1766, 1767, 1749, 262, 264, 1750, 1751,
3586 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759,
3587 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767,
3588 1749, 256, 258, 1770, 1771, 1772, 1773, 1774,
3589 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782,
3590 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790,
3591 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798,
3592 1799, 1800, 1801, 1803, 268, 530, 576, 1802,
3593 1788, 527, 529, 1804, 1805, 1806, 1807, 1808,
3594 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816,
3595 1817, 1818, 1819, 1820, 1821, 1788, 527, 529,
3596 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811,
3597 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819,
3598 1823, 1821, 1788, 521, 523, 1824, 1825, 1826,
3599 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834,
3600 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788,
3601 527, 529, 1804, 1805, 1806, 1807, 1808, 1809,
3602 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817,
3603 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788,
3604 527, 529, 1804, 1805, 1806, 1807, 1808, 1809,
3605 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817,
3606 1818, 1848, 1819, 1820, 1821, 1788, 527, 529,
3607 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811,
3608 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850,
3609 1819, 1820, 1821, 1788, 527, 529, 1804, 1805,
3610 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813,
3611 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820,
3612 1821, 1788, 527, 529, 1804, 1805, 1806, 1807,
3613 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815,
3614 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871,
3615 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861,
3616 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869,
3617 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871,
3618 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861,
3619 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877,
3620 1873, 1874, 1875, 1854, 871, 873, 1855, 1856,
3621 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864,
3622 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875,
3623 1854, 865, 867, 1880, 1881, 1882, 1883, 1884,
3624 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892,
3625 1893, 1894, 1895, 1896, 1897, 1854, 871, 873,
3626 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862,
3627 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899,
3628 1873, 1874, 1875, 1854, 871, 873, 1855, 1856,
3629 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864,
3630 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874,
3631 1875, 1854, 871, 873, 1855, 1856, 1857, 1858,
3632 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902,
3633 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854,
3634 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910,
3635 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918,
3636 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905,
3637 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913,
3638 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924,
3639 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928,
3640 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936,
3641 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422,
3642 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950,
3643 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958,
3644 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963,
3645 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971,
3646 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979,
3647 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966,
3648 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980,
3649 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723,
3650 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990,
3651 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3652 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985,
3653 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993,
3654 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982,
3655 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988,
3656 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996,
3657 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004,
3658 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012,
3659 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020,
3660 2021, 1982,
3661}
3662
3663var _graphclust_trans_targs []int16 = []int16{
3664 1974, 0, 1974, 1975, 15, 16, 17, 18,
3665 19, 20, 21, 22, 23, 24, 25, 26,
3666 27, 28, 29, 30, 31, 32, 33, 34,
3667 35, 36, 37, 38, 39, 40, 41, 42,
3668 44, 45, 46, 47, 48, 49, 50, 51,
3669 52, 53, 54, 55, 56, 57, 58, 59,
3670 60, 61, 62, 63, 64, 66, 68, 70,
3671 71, 72, 1976, 69, 74, 75, 77, 78,
3672 79, 80, 81, 82, 83, 84, 85, 86,
3673 87, 88, 89, 90, 91, 93, 94, 96,
3674 102, 125, 130, 132, 139, 143, 97, 98,
3675 99, 100, 101, 103, 104, 105, 106, 107,
3676 108, 109, 110, 111, 112, 113, 114, 115,
3677 116, 117, 118, 119, 120, 121, 122, 123,
3678 124, 126, 127, 128, 129, 131, 133, 134,
3679 135, 136, 137, 138, 140, 141, 142, 144,
3680 291, 292, 1977, 158, 159, 160, 161, 162,
3681 163, 164, 165, 166, 167, 168, 169, 170,
3682 171, 172, 173, 174, 175, 176, 177, 178,
3683 179, 180, 181, 182, 183, 184, 185, 186,
3684 188, 189, 190, 191, 192, 193, 194, 195,
3685 196, 197, 198, 199, 200, 201, 202, 203,
3686 204, 205, 206, 207, 208, 210, 211, 212,
3687 213, 214, 216, 217, 219, 220, 221, 222,
3688 223, 224, 225, 226, 227, 228, 229, 230,
3689 231, 232, 234, 235, 237, 243, 267, 271,
3690 273, 280, 284, 238, 239, 240, 241, 242,
3691 244, 245, 246, 247, 248, 249, 250, 251,
3692 252, 253, 254, 255, 256, 257, 258, 259,
3693 260, 261, 262, 263, 264, 265, 266, 268,
3694 269, 270, 272, 274, 275, 276, 277, 278,
3695 279, 281, 282, 283, 285, 287, 288, 289,
3696 145, 290, 146, 294, 295, 296, 2, 297,
3697 3, 1974, 1978, 1974, 1979, 315, 316, 317,
3698 318, 319, 320, 321, 322, 323, 324, 325,
3699 326, 327, 328, 329, 330, 331, 332, 333,
3700 334, 335, 336, 337, 338, 339, 340, 341,
3701 342, 344, 345, 346, 347, 348, 349, 350,
3702 351, 352, 353, 354, 355, 356, 357, 358,
3703 359, 360, 361, 362, 363, 364, 366, 368,
3704 370, 371, 372, 1980, 369, 374, 375, 377,
3705 378, 379, 380, 381, 382, 383, 384, 385,
3706 386, 387, 388, 389, 390, 391, 393, 394,
3707 396, 402, 425, 430, 432, 439, 443, 397,
3708 398, 399, 400, 401, 403, 404, 405, 406,
3709 407, 408, 409, 410, 411, 412, 413, 414,
3710 415, 416, 417, 418, 419, 420, 421, 422,
3711 423, 424, 426, 427, 428, 429, 431, 433,
3712 434, 435, 436, 437, 438, 440, 441, 442,
3713 444, 591, 592, 1981, 458, 459, 460, 461,
3714 462, 463, 464, 465, 466, 467, 468, 469,
3715 470, 471, 472, 473, 474, 475, 476, 477,
3716 478, 479, 480, 481, 482, 483, 484, 485,
3717 486, 488, 489, 490, 491, 492, 493, 494,
3718 495, 496, 497, 498, 499, 500, 501, 502,
3719 503, 504, 505, 506, 507, 508, 510, 511,
3720 512, 513, 514, 516, 517, 519, 520, 521,
3721 522, 523, 524, 525, 526, 527, 528, 529,
3722 530, 531, 532, 534, 535, 537, 543, 567,
3723 571, 573, 580, 584, 538, 539, 540, 541,
3724 542, 544, 545, 546, 547, 548, 549, 550,
3725 551, 552, 553, 554, 555, 556, 557, 558,
3726 559, 560, 561, 562, 563, 564, 565, 566,
3727 568, 569, 570, 572, 574, 575, 576, 577,
3728 578, 579, 581, 582, 583, 585, 587, 588,
3729 589, 445, 590, 446, 594, 595, 596, 302,
3730 597, 303, 599, 605, 606, 608, 610, 613,
3731 616, 640, 1982, 622, 1983, 612, 1984, 615,
3732 618, 620, 621, 624, 625, 629, 630, 631,
3733 632, 633, 634, 635, 1985, 628, 639, 642,
3734 643, 644, 645, 646, 649, 650, 651, 652,
3735 653, 654, 655, 656, 660, 661, 663, 664,
3736 647, 666, 669, 671, 673, 667, 668, 670,
3737 672, 674, 678, 679, 680, 681, 682, 683,
3738 684, 685, 686, 687, 1986, 676, 677, 690,
3739 691, 299, 695, 696, 698, 997, 1000, 1003,
3740 1027, 1974, 1987, 1974, 1988, 712, 713, 714,
3741 715, 716, 717, 718, 719, 720, 721, 722,
3742 723, 724, 725, 726, 727, 728, 729, 730,
3743 731, 732, 733, 734, 735, 736, 737, 738,
3744 739, 741, 742, 743, 744, 745, 746, 747,
3745 748, 749, 750, 751, 752, 753, 754, 755,
3746 756, 757, 758, 759, 760, 761, 763, 765,
3747 767, 768, 769, 1989, 766, 771, 772, 774,
3748 775, 776, 777, 778, 779, 780, 781, 782,
3749 783, 784, 785, 786, 787, 788, 790, 791,
3750 793, 799, 822, 827, 829, 836, 840, 794,
3751 795, 796, 797, 798, 800, 801, 802, 803,
3752 804, 805, 806, 807, 808, 809, 810, 811,
3753 812, 813, 814, 815, 816, 817, 818, 819,
3754 820, 821, 823, 824, 825, 826, 828, 830,
3755 831, 832, 833, 834, 835, 837, 838, 839,
3756 841, 988, 989, 1990, 855, 856, 857, 858,
3757 859, 860, 861, 862, 863, 864, 865, 866,
3758 867, 868, 869, 870, 871, 872, 873, 874,
3759 875, 876, 877, 878, 879, 880, 881, 882,
3760 883, 885, 886, 887, 888, 889, 890, 891,
3761 892, 893, 894, 895, 896, 897, 898, 899,
3762 900, 901, 902, 903, 904, 905, 907, 908,
3763 909, 910, 911, 913, 914, 916, 917, 918,
3764 919, 920, 921, 922, 923, 924, 925, 926,
3765 927, 928, 929, 931, 932, 934, 940, 964,
3766 968, 970, 977, 981, 935, 936, 937, 938,
3767 939, 941, 942, 943, 944, 945, 946, 947,
3768 948, 949, 950, 951, 952, 953, 954, 955,
3769 956, 957, 958, 959, 960, 961, 962, 963,
3770 965, 966, 967, 969, 971, 972, 973, 974,
3771 975, 976, 978, 979, 980, 982, 984, 985,
3772 986, 842, 987, 843, 991, 992, 993, 699,
3773 994, 700, 1009, 1991, 999, 1992, 1002, 1005,
3774 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019,
3775 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327,
3776 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044,
3777 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052,
3778 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060,
3779 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068,
3780 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077,
3781 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085,
3782 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094,
3783 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104,
3784 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112,
3785 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122,
3786 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125,
3787 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134,
3788 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142,
3789 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150,
3790 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161,
3791 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171,
3792 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325,
3793 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197,
3794 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205,
3795 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213,
3796 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222,
3797 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230,
3798 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238,
3799 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247,
3800 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257,
3801 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265,
3802 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307,
3803 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278,
3804 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286,
3805 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294,
3806 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303,
3807 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313,
3808 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179,
3809 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345,
3810 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504,
3811 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512,
3812 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519,
3813 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363,
3814 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371,
3815 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380,
3816 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389,
3817 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397,
3818 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405,
3819 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416,
3820 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424,
3821 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432,
3822 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469,
3823 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444,
3824 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453,
3825 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461,
3826 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470,
3827 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480,
3828 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490,
3829 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524,
3830 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532,
3831 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540,
3832 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549,
3833 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559,
3834 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567,
3835 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615,
3836 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580,
3837 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588,
3838 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596,
3839 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605,
3840 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616,
3841 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625,
3842 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636,
3843 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648,
3844 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654,
3845 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964,
3846 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672,
3847 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680,
3848 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688,
3849 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696,
3850 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705,
3851 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713,
3852 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721,
3853 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731,
3854 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740,
3855 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748,
3856 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796,
3857 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761,
3858 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769,
3859 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777,
3860 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786,
3861 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797,
3862 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816,
3863 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824,
3864 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832,
3865 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840,
3866 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849,
3867 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857,
3868 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865,
3869 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876,
3870 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884,
3871 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894,
3872 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896,
3873 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905,
3874 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913,
3875 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921,
3876 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932,
3877 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942,
3878 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952,
3879 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972,
3880 1973, 1974, 1, 1975, 299, 300, 301, 692,
3881 693, 694, 697, 1028, 1628, 1629, 1638, 1639,
3882 1640, 1646, 1649, 1969, 1970, 1974, 4, 5,
3883 6, 7, 8, 9, 10, 11, 12, 13,
3884 14, 43, 65, 73, 76, 92, 298, 293,
3885 67, 95, 147, 148, 149, 150, 151, 152,
3886 153, 154, 155, 156, 157, 187, 209, 215,
3887 218, 233, 236, 286, 1974, 600, 601, 602,
3888 603, 604, 607, 641, 648, 657, 658, 659,
3889 662, 665, 688, 689, 304, 305, 306, 307,
3890 308, 309, 310, 311, 312, 313, 314, 343,
3891 365, 373, 376, 392, 598, 593, 367, 395,
3892 447, 448, 449, 450, 451, 452, 453, 454,
3893 455, 456, 457, 487, 509, 515, 518, 533,
3894 536, 586, 609, 623, 636, 637, 638, 611,
3895 619, 614, 617, 626, 627, 675, 1974, 701,
3896 702, 703, 704, 705, 706, 707, 708, 709,
3897 710, 711, 996, 762, 770, 1010, 1023, 1024,
3898 1025, 789, 995, 990, 740, 773, 764, 792,
3899 844, 845, 846, 847, 848, 849, 850, 851,
3900 852, 853, 854, 884, 906, 912, 915, 930,
3901 933, 983, 998, 1006, 1001, 1004, 1013, 1014,
3902 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038,
3903 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102,
3904 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183,
3905 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191,
3906 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974,
3907 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338,
3908 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568,
3909 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353,
3910 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417,
3911 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661,
3912 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669,
3913 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956,
3914 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807,
3915 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844,
3916 1866, 1872, 1875, 1890, 1893, 1943,
3917}
3918
3919var _graphclust_trans_actions []byte = []byte{
3920 31, 0, 27, 40, 0, 0, 0, 0,
3921 0, 0, 0, 0, 0, 0, 0, 0,
3922 0, 0, 0, 0, 0, 0, 0, 0,
3923 0, 0, 0, 0, 0, 0, 0, 0,
3924 0, 0, 0, 0, 0, 0, 0, 0,
3925 0, 0, 0, 0, 0, 0, 0, 0,
3926 0, 0, 0, 0, 0, 0, 0, 0,
3927 0, 0, 40, 0, 0, 0, 0, 0,
3928 0, 0, 0, 0, 0, 0, 0, 0,
3929 0, 0, 0, 0, 0, 0, 0, 0,
3930 0, 0, 0, 0, 0, 0, 0, 0,
3931 0, 0, 0, 0, 0, 0, 0, 0,
3932 0, 0, 0, 0, 0, 0, 0, 0,
3933 0, 0, 0, 0, 0, 0, 0, 0,
3934 0, 0, 0, 0, 0, 0, 0, 0,
3935 0, 0, 0, 0, 0, 0, 0, 0,
3936 0, 0, 40, 0, 0, 0, 0, 0,
3937 0, 0, 0, 0, 0, 0, 0, 0,
3938 0, 0, 0, 0, 0, 0, 0, 0,
3939 0, 0, 0, 0, 0, 0, 0, 0,
3940 0, 0, 0, 0, 0, 0, 0, 0,
3941 0, 0, 0, 0, 0, 0, 0, 0,
3942 0, 0, 0, 0, 0, 0, 0, 0,
3943 0, 0, 0, 0, 0, 0, 0, 0,
3944 0, 0, 0, 0, 0, 0, 0, 0,
3945 0, 0, 0, 0, 0, 0, 0, 0,
3946 0, 0, 0, 0, 0, 0, 0, 0,
3947 0, 0, 0, 0, 0, 0, 0, 0,
3948 0, 0, 0, 0, 0, 0, 0, 0,
3949 0, 0, 0, 0, 0, 0, 0, 0,
3950 0, 0, 0, 0, 0, 0, 0, 0,
3951 0, 0, 0, 0, 0, 0, 0, 0,
3952 0, 0, 0, 0, 0, 0, 0, 0,
3953 0, 34, 40, 25, 40, 0, 0, 0,
3954 0, 0, 0, 0, 0, 0, 0, 0,
3955 0, 0, 0, 0, 0, 0, 0, 0,
3956 0, 0, 0, 0, 0, 0, 0, 0,
3957 0, 0, 0, 0, 0, 0, 0, 0,
3958 0, 0, 0, 0, 0, 0, 0, 0,
3959 0, 0, 0, 0, 0, 0, 0, 0,
3960 0, 0, 0, 40, 0, 0, 0, 0,
3961 0, 0, 0, 0, 0, 0, 0, 0,
3962 0, 0, 0, 0, 0, 0, 0, 0,
3963 0, 0, 0, 0, 0, 0, 0, 0,
3964 0, 0, 0, 0, 0, 0, 0, 0,
3965 0, 0, 0, 0, 0, 0, 0, 0,
3966 0, 0, 0, 0, 0, 0, 0, 0,
3967 0, 0, 0, 0, 0, 0, 0, 0,
3968 0, 0, 0, 0, 0, 0, 0, 0,
3969 0, 0, 0, 40, 0, 0, 0, 0,
3970 0, 0, 0, 0, 0, 0, 0, 0,
3971 0, 0, 0, 0, 0, 0, 0, 0,
3972 0, 0, 0, 0, 0, 0, 0, 0,
3973 0, 0, 0, 0, 0, 0, 0, 0,
3974 0, 0, 0, 0, 0, 0, 0, 0,
3975 0, 0, 0, 0, 0, 0, 0, 0,
3976 0, 0, 0, 0, 0, 0, 0, 0,
3977 0, 0, 0, 0, 0, 0, 0, 0,
3978 0, 0, 0, 0, 0, 0, 0, 0,
3979 0, 0, 0, 0, 0, 0, 0, 0,
3980 0, 0, 0, 0, 0, 0, 0, 0,
3981 0, 0, 0, 0, 0, 0, 0, 0,
3982 0, 0, 0, 0, 0, 0, 0, 0,
3983 0, 0, 0, 0, 0, 0, 0, 0,
3984 0, 0, 0, 0, 0, 0, 0, 0,
3985 0, 0, 0, 0, 0, 0, 0, 0,
3986 0, 0, 0, 0, 0, 0, 0, 0,
3987 0, 0, 40, 0, 40, 0, 40, 0,
3988 0, 0, 0, 0, 0, 0, 0, 0,
3989 0, 0, 0, 0, 40, 0, 0, 0,
3990 0, 0, 0, 0, 0, 0, 0, 0,
3991 0, 0, 0, 0, 0, 0, 0, 0,
3992 0, 0, 0, 0, 0, 0, 0, 0,
3993 0, 0, 0, 0, 0, 0, 0, 0,
3994 0, 0, 0, 0, 40, 0, 0, 0,
3995 0, 0, 0, 0, 0, 0, 0, 0,
3996 0, 29, 51, 17, 40, 0, 0, 0,
3997 0, 0, 0, 0, 0, 0, 0, 0,
3998 0, 0, 0, 0, 0, 0, 0, 0,
3999 0, 0, 0, 0, 0, 0, 0, 0,
4000 0, 0, 0, 0, 0, 0, 0, 0,
4001 0, 0, 0, 0, 0, 0, 0, 0,
4002 0, 0, 0, 0, 0, 0, 0, 0,
4003 0, 0, 0, 40, 0, 0, 0, 0,
4004 0, 0, 0, 0, 0, 0, 0, 0,
4005 0, 0, 0, 0, 0, 0, 0, 0,
4006 0, 0, 0, 0, 0, 0, 0, 0,
4007 0, 0, 0, 0, 0, 0, 0, 0,
4008 0, 0, 0, 0, 0, 0, 0, 0,
4009 0, 0, 0, 0, 0, 0, 0, 0,
4010 0, 0, 0, 0, 0, 0, 0, 0,
4011 0, 0, 0, 0, 0, 0, 0, 0,
4012 0, 0, 0, 40, 0, 0, 0, 0,
4013 0, 0, 0, 0, 0, 0, 0, 0,
4014 0, 0, 0, 0, 0, 0, 0, 0,
4015 0, 0, 0, 0, 0, 0, 0, 0,
4016 0, 0, 0, 0, 0, 0, 0, 0,
4017 0, 0, 0, 0, 0, 0, 0, 0,
4018 0, 0, 0, 0, 0, 0, 0, 0,
4019 0, 0, 0, 0, 0, 0, 0, 0,
4020 0, 0, 0, 0, 0, 0, 0, 0,
4021 0, 0, 0, 0, 0, 0, 0, 0,
4022 0, 0, 0, 0, 0, 0, 0, 0,
4023 0, 0, 0, 0, 0, 0, 0, 0,
4024 0, 0, 0, 0, 0, 0, 0, 0,
4025 0, 0, 0, 0, 0, 0, 0, 0,
4026 0, 0, 0, 0, 0, 0, 0, 0,
4027 0, 0, 0, 0, 0, 0, 0, 0,
4028 0, 0, 0, 0, 0, 0, 0, 0,
4029 0, 0, 0, 51, 0, 51, 0, 0,
4030 0, 0, 0, 0, 0, 0, 0, 0,
4031 0, 0, 0, 40, 0, 0, 0, 0,
4032 0, 0, 0, 40, 21, 40, 0, 0,
4033 0, 0, 0, 0, 0, 0, 0, 0,
4034 0, 0, 0, 0, 0, 0, 0, 0,
4035 0, 0, 0, 0, 0, 0, 0, 0,
4036 0, 0, 0, 0, 0, 0, 0, 0,
4037 0, 0, 0, 0, 0, 0, 0, 0,
4038 0, 0, 0, 0, 0, 0, 0, 0,
4039 0, 0, 0, 0, 0, 0, 0, 0,
4040 0, 0, 0, 0, 0, 0, 0, 0,
4041 0, 0, 0, 0, 0, 0, 0, 0,
4042 0, 0, 0, 0, 0, 0, 0, 0,
4043 0, 0, 0, 0, 0, 0, 0, 0,
4044 0, 0, 0, 0, 0, 0, 0, 0,
4045 0, 0, 0, 0, 0, 0, 0, 0,
4046 0, 0, 0, 0, 0, 0, 0, 0,
4047 0, 0, 0, 0, 0, 0, 0, 0,
4048 0, 0, 0, 0, 0, 0, 0, 0,
4049 0, 40, 0, 0, 0, 0, 0, 0,
4050 0, 0, 0, 0, 0, 0, 0, 0,
4051 0, 0, 0, 0, 0, 0, 0, 0,
4052 0, 0, 0, 0, 0, 0, 0, 0,
4053 0, 0, 0, 0, 0, 0, 0, 0,
4054 0, 0, 0, 0, 0, 0, 0, 0,
4055 0, 0, 0, 0, 0, 0, 0, 0,
4056 0, 0, 0, 0, 0, 0, 0, 0,
4057 0, 0, 0, 0, 0, 0, 0, 0,
4058 0, 0, 0, 0, 0, 0, 0, 0,
4059 0, 0, 0, 0, 0, 0, 0, 0,
4060 0, 0, 0, 0, 0, 0, 0, 0,
4061 0, 0, 0, 0, 0, 0, 0, 0,
4062 0, 0, 0, 0, 0, 0, 0, 0,
4063 0, 0, 0, 0, 0, 0, 0, 0,
4064 0, 0, 0, 0, 0, 0, 0, 0,
4065 0, 0, 40, 19, 0, 0, 0, 0,
4066 0, 0, 0, 0, 0, 0, 0, 0,
4067 0, 0, 0, 0, 0, 0, 0, 0,
4068 0, 0, 0, 0, 0, 0, 0, 0,
4069 0, 0, 40, 0, 0, 0, 0, 0,
4070 0, 0, 0, 0, 0, 0, 0, 0,
4071 0, 0, 0, 0, 0, 0, 0, 0,
4072 0, 0, 0, 0, 0, 0, 0, 0,
4073 0, 0, 0, 0, 0, 0, 0, 0,
4074 0, 0, 0, 0, 0, 0, 0, 0,
4075 0, 0, 0, 0, 0, 0, 0, 0,
4076 40, 0, 0, 0, 0, 0, 0, 0,
4077 0, 0, 0, 0, 0, 0, 0, 0,
4078 0, 0, 0, 0, 0, 0, 0, 0,
4079 0, 0, 0, 0, 0, 0, 0, 0,
4080 0, 0, 0, 0, 0, 0, 0, 0,
4081 0, 0, 0, 0, 0, 0, 0, 0,
4082 0, 0, 0, 0, 0, 0, 0, 0,
4083 0, 0, 0, 0, 0, 0, 0, 0,
4084 0, 0, 0, 0, 0, 0, 0, 0,
4085 0, 0, 0, 0, 0, 0, 0, 0,
4086 0, 0, 0, 0, 0, 0, 0, 0,
4087 0, 0, 0, 0, 0, 0, 0, 0,
4088 0, 0, 0, 0, 0, 0, 0, 0,
4089 0, 0, 0, 0, 0, 0, 0, 0,
4090 0, 0, 0, 0, 0, 0, 0, 0,
4091 0, 0, 0, 0, 0, 0, 0, 0,
4092 0, 0, 0, 0, 0, 0, 0, 0,
4093 0, 0, 0, 0, 0, 0, 0, 0,
4094 0, 0, 0, 0, 0, 0, 0, 0,
4095 0, 0, 0, 0, 0, 0, 0, 0,
4096 0, 0, 0, 0, 0, 0, 0, 0,
4097 0, 0, 0, 0, 0, 0, 0, 0,
4098 0, 0, 0, 0, 0, 0, 0, 0,
4099 0, 0, 0, 0, 0, 0, 0, 0,
4100 0, 0, 0, 0, 0, 0, 0, 0,
4101 0, 0, 0, 0, 0, 0, 0, 0,
4102 0, 0, 0, 0, 40, 23, 40, 0,
4103 0, 0, 0, 0, 0, 0, 0, 0,
4104 0, 0, 0, 0, 0, 0, 0, 0,
4105 0, 0, 0, 0, 0, 0, 0, 0,
4106 0, 0, 0, 0, 0, 0, 0, 0,
4107 0, 0, 0, 0, 0, 0, 0, 0,
4108 0, 0, 0, 0, 0, 0, 0, 0,
4109 0, 0, 0, 0, 0, 40, 0, 0,
4110 0, 0, 0, 0, 0, 0, 0, 0,
4111 0, 0, 0, 0, 0, 0, 0, 0,
4112 0, 0, 0, 0, 0, 0, 0, 0,
4113 0, 0, 0, 0, 0, 0, 0, 0,
4114 0, 0, 0, 0, 0, 0, 0, 0,
4115 0, 0, 0, 0, 0, 0, 0, 0,
4116 0, 0, 0, 0, 0, 0, 0, 0,
4117 0, 0, 0, 0, 0, 0, 0, 0,
4118 0, 0, 0, 0, 0, 40, 0, 0,
4119 0, 0, 0, 0, 0, 0, 0, 0,
4120 0, 0, 0, 0, 0, 0, 0, 0,
4121 0, 0, 0, 0, 0, 0, 0, 0,
4122 0, 0, 0, 0, 0, 0, 0, 0,
4123 0, 0, 0, 0, 0, 0, 0, 0,
4124 0, 0, 0, 0, 0, 0, 0, 0,
4125 0, 0, 0, 0, 0, 0, 0, 0,
4126 0, 0, 0, 0, 0, 0, 0, 0,
4127 0, 0, 0, 0, 0, 0, 0, 0,
4128 0, 0, 0, 0, 0, 0, 0, 0,
4129 0, 0, 0, 0, 0, 0, 0, 0,
4130 0, 0, 0, 0, 0, 0, 0, 0,
4131 0, 0, 0, 0, 0, 0, 0, 0,
4132 0, 0, 0, 0, 0, 0, 0, 0,
4133 0, 0, 0, 0, 0, 0, 0, 0,
4134 0, 0, 0, 0, 0, 0, 0, 0,
4135 0, 0, 0, 0, 0, 0, 0, 0,
4136 0, 43, 1, 47, 1, 1, 1, 1,
4137 1, 1, 1, 1, 1, 1, 1, 1,
4138 1, 1, 1, 1, 1, 15, 0, 0,
4139 0, 0, 0, 0, 0, 0, 0, 0,
4140 0, 0, 0, 0, 0, 0, 0, 0,
4141 0, 0, 0, 0, 0, 0, 0, 0,
4142 0, 0, 0, 0, 0, 0, 0, 0,
4143 0, 0, 0, 0, 13, 0, 0, 0,
4144 0, 0, 0, 0, 0, 0, 0, 0,
4145 0, 0, 0, 0, 0, 0, 0, 0,
4146 0, 0, 0, 0, 0, 0, 0, 0,
4147 0, 0, 0, 0, 0, 0, 0, 0,
4148 0, 0, 0, 0, 0, 0, 0, 0,
4149 0, 0, 0, 0, 0, 0, 0, 0,
4150 0, 0, 0, 0, 0, 0, 0, 0,
4151 0, 0, 0, 0, 0, 0, 5, 0,
4152 0, 0, 0, 0, 0, 0, 0, 0,
4153 0, 0, 0, 0, 0, 0, 0, 0,
4154 0, 0, 0, 0, 0, 0, 0, 0,
4155 0, 0, 0, 0, 0, 0, 0, 0,
4156 0, 0, 0, 0, 0, 0, 0, 0,
4157 0, 0, 0, 0, 0, 0, 0, 0,
4158 9, 0, 0, 0, 0, 0, 0, 0,
4159 0, 0, 0, 0, 0, 0, 0, 0,
4160 0, 0, 0, 0, 0, 0, 0, 0,
4161 0, 0, 0, 0, 0, 0, 0, 0,
4162 0, 0, 0, 0, 0, 0, 0, 7,
4163 0, 0, 0, 0, 0, 0, 0, 0,
4164 0, 0, 0, 0, 0, 0, 0, 0,
4165 0, 0, 0, 0, 0, 0, 0, 0,
4166 0, 0, 0, 0, 0, 0, 0, 0,
4167 0, 0, 0, 0, 0, 0, 11, 0,
4168 0, 0, 0, 0, 0, 0, 0, 0,
4169 0, 0, 0, 0, 0, 0, 0, 0,
4170 0, 0, 0, 0, 0, 0, 0, 0,
4171 0, 0, 0, 0, 0, 0, 0, 0,
4172 0, 0, 0, 0, 0, 0,
4173}
4174
4175var _graphclust_to_state_actions []byte = []byte{
4176 0, 0, 0, 0, 0, 0, 0, 0,
4177 0, 0, 0, 0, 0, 0, 0, 0,
4178 0, 0, 0, 0, 0, 0, 0, 0,
4179 0, 0, 0, 0, 0, 0, 0, 0,
4180 0, 0, 0, 0, 0, 0, 0, 0,
4181 0, 0, 0, 0, 0, 0, 0, 0,
4182 0, 0, 0, 0, 0, 0, 0, 0,
4183 0, 0, 0, 0, 0, 0, 0, 0,
4184 0, 0, 0, 0, 0, 0, 0, 0,
4185 0, 0, 0, 0, 0, 0, 0, 0,
4186 0, 0, 0, 0, 0, 0, 0, 0,
4187 0, 0, 0, 0, 0, 0, 0, 0,
4188 0, 0, 0, 0, 0, 0, 0, 0,
4189 0, 0, 0, 0, 0, 0, 0, 0,
4190 0, 0, 0, 0, 0, 0, 0, 0,
4191 0, 0, 0, 0, 0, 0, 0, 0,
4192 0, 0, 0, 0, 0, 0, 0, 0,
4193 0, 0, 0, 0, 0, 0, 0, 0,
4194 0, 0, 0, 0, 0, 0, 0, 0,
4195 0, 0, 0, 0, 0, 0, 0, 0,
4196 0, 0, 0, 0, 0, 0, 0, 0,
4197 0, 0, 0, 0, 0, 0, 0, 0,
4198 0, 0, 0, 0, 0, 0, 0, 0,
4199 0, 0, 0, 0, 0, 0, 0, 0,
4200 0, 0, 0, 0, 0, 0, 0, 0,
4201 0, 0, 0, 0, 0, 0, 0, 0,
4202 0, 0, 0, 0, 0, 0, 0, 0,
4203 0, 0, 0, 0, 0, 0, 0, 0,
4204 0, 0, 0, 0, 0, 0, 0, 0,
4205 0, 0, 0, 0, 0, 0, 0, 0,
4206 0, 0, 0, 0, 0, 0, 0, 0,
4207 0, 0, 0, 0, 0, 0, 0, 0,
4208 0, 0, 0, 0, 0, 0, 0, 0,
4209 0, 0, 0, 0, 0, 0, 0, 0,
4210 0, 0, 0, 0, 0, 0, 0, 0,
4211 0, 0, 0, 0, 0, 0, 0, 0,
4212 0, 0, 0, 0, 0, 0, 0, 0,
4213 0, 0, 0, 0, 0, 0, 0, 0,
4214 0, 0, 0, 0, 0, 0, 0, 0,
4215 0, 0, 0, 0, 0, 0, 0, 0,
4216 0, 0, 0, 0, 0, 0, 0, 0,
4217 0, 0, 0, 0, 0, 0, 0, 0,
4218 0, 0, 0, 0, 0, 0, 0, 0,
4219 0, 0, 0, 0, 0, 0, 0, 0,
4220 0, 0, 0, 0, 0, 0, 0, 0,
4221 0, 0, 0, 0, 0, 0, 0, 0,
4222 0, 0, 0, 0, 0, 0, 0, 0,
4223 0, 0, 0, 0, 0, 0, 0, 0,
4224 0, 0, 0, 0, 0, 0, 0, 0,
4225 0, 0, 0, 0, 0, 0, 0, 0,
4226 0, 0, 0, 0, 0, 0, 0, 0,
4227 0, 0, 0, 0, 0, 0, 0, 0,
4228 0, 0, 0, 0, 0, 0, 0, 0,
4229 0, 0, 0, 0, 0, 0, 0, 0,
4230 0, 0, 0, 0, 0, 0, 0, 0,
4231 0, 0, 0, 0, 0, 0, 0, 0,
4232 0, 0, 0, 0, 0, 0, 0, 0,
4233 0, 0, 0, 0, 0, 0, 0, 0,
4234 0, 0, 0, 0, 0, 0, 0, 0,
4235 0, 0, 0, 0, 0, 0, 0, 0,
4236 0, 0, 0, 0, 0, 0, 0, 0,
4237 0, 0, 0, 0, 0, 0, 0, 0,
4238 0, 0, 0, 0, 0, 0, 0, 0,
4239 0, 0, 0, 0, 0, 0, 0, 0,
4240 0, 0, 0, 0, 0, 0, 0, 0,
4241 0, 0, 0, 0, 0, 0, 0, 0,
4242 0, 0, 0, 0, 0, 0, 0, 0,
4243 0, 0, 0, 0, 0, 0, 0, 0,
4244 0, 0, 0, 0, 0, 0, 0, 0,
4245 0, 0, 0, 0, 0, 0, 0, 0,
4246 0, 0, 0, 0, 0, 0, 0, 0,
4247 0, 0, 0, 0, 0, 0, 0, 0,
4248 0, 0, 0, 0, 0, 0, 0, 0,
4249 0, 0, 0, 0, 0, 0, 0, 0,
4250 0, 0, 0, 0, 0, 0, 0, 0,
4251 0, 0, 0, 0, 0, 0, 0, 0,
4252 0, 0, 0, 0, 0, 0, 0, 0,
4253 0, 0, 0, 0, 0, 0, 0, 0,
4254 0, 0, 0, 0, 0, 0, 0, 0,
4255 0, 0, 0, 0, 0, 0, 0, 0,
4256 0, 0, 0, 0, 0, 0, 0, 0,
4257 0, 0, 0, 0, 0, 0, 0, 0,
4258 0, 0, 0, 0, 0, 0, 0, 0,
4259 0, 0, 0, 0, 0, 0, 0, 0,
4260 0, 0, 0, 0, 0, 0, 0, 0,
4261 0, 0, 0, 0, 0, 0, 0, 0,
4262 0, 0, 0, 0, 0, 0, 0, 0,
4263 0, 0, 0, 0, 0, 0, 0, 0,
4264 0, 0, 0, 0, 0, 0, 0, 0,
4265 0, 0, 0, 0, 0, 0, 0, 0,
4266 0, 0, 0, 0, 0, 0, 0, 0,
4267 0, 0, 0, 0, 0, 0, 0, 0,
4268 0, 0, 0, 0, 0, 0, 0, 0,
4269 0, 0, 0, 0, 0, 0, 0, 0,
4270 0, 0, 0, 0, 0, 0, 0, 0,
4271 0, 0, 0, 0, 0, 0, 0, 0,
4272 0, 0, 0, 0, 0, 0, 0, 0,
4273 0, 0, 0, 0, 0, 0, 0, 0,
4274 0, 0, 0, 0, 0, 0, 0, 0,
4275 0, 0, 0, 0, 0, 0, 0, 0,
4276 0, 0, 0, 0, 0, 0, 0, 0,
4277 0, 0, 0, 0, 0, 0, 0, 0,
4278 0, 0, 0, 0, 0, 0, 0, 0,
4279 0, 0, 0, 0, 0, 0, 0, 0,
4280 0, 0, 0, 0, 0, 0, 0, 0,
4281 0, 0, 0, 0, 0, 0, 0, 0,
4282 0, 0, 0, 0, 0, 0, 0, 0,
4283 0, 0, 0, 0, 0, 0, 0, 0,
4284 0, 0, 0, 0, 0, 0, 0, 0,
4285 0, 0, 0, 0, 0, 0, 0, 0,
4286 0, 0, 0, 0, 0, 0, 0, 0,
4287 0, 0, 0, 0, 0, 0, 0, 0,
4288 0, 0, 0, 0, 0, 0, 0, 0,
4289 0, 0, 0, 0, 0, 0, 0, 0,
4290 0, 0, 0, 0, 0, 0, 0, 0,
4291 0, 0, 0, 0, 0, 0, 0, 0,
4292 0, 0, 0, 0, 0, 0, 0, 0,
4293 0, 0, 0, 0, 0, 0, 0, 0,
4294 0, 0, 0, 0, 0, 0, 0, 0,
4295 0, 0, 0, 0, 0, 0, 0, 0,
4296 0, 0, 0, 0, 0, 0, 0, 0,
4297 0, 0, 0, 0, 0, 0, 0, 0,
4298 0, 0, 0, 0, 0, 0, 0, 0,
4299 0, 0, 0, 0, 0, 0, 0, 0,
4300 0, 0, 0, 0, 0, 0, 0, 0,
4301 0, 0, 0, 0, 0, 0, 0, 0,
4302 0, 0, 0, 0, 0, 0, 0, 0,
4303 0, 0, 0, 0, 0, 0, 0, 0,
4304 0, 0, 0, 0, 0, 0, 0, 0,
4305 0, 0, 0, 0, 0, 0, 0, 0,
4306 0, 0, 0, 0, 0, 0, 0, 0,
4307 0, 0, 0, 0, 0, 0, 0, 0,
4308 0, 0, 0, 0, 0, 0, 0, 0,
4309 0, 0, 0, 0, 0, 0, 0, 0,
4310 0, 0, 0, 0, 0, 0, 0, 0,
4311 0, 0, 0, 0, 0, 0, 0, 0,
4312 0, 0, 0, 0, 0, 0, 0, 0,
4313 0, 0, 0, 0, 0, 0, 0, 0,
4314 0, 0, 0, 0, 0, 0, 0, 0,
4315 0, 0, 0, 0, 0, 0, 0, 0,
4316 0, 0, 0, 0, 0, 0, 0, 0,
4317 0, 0, 0, 0, 0, 0, 0, 0,
4318 0, 0, 0, 0, 0, 0, 0, 0,
4319 0, 0, 0, 0, 0, 0, 0, 0,
4320 0, 0, 0, 0, 0, 0, 0, 0,
4321 0, 0, 0, 0, 0, 0, 0, 0,
4322 0, 0, 0, 0, 0, 0, 0, 0,
4323 0, 0, 0, 0, 0, 0, 0, 0,
4324 0, 0, 0, 0, 0, 0, 0, 0,
4325 0, 0, 0, 0, 0, 0, 0, 0,
4326 0, 0, 0, 0, 0, 0, 0, 0,
4327 0, 0, 0, 0, 0, 0, 0, 0,
4328 0, 0, 0, 0, 0, 0, 0, 0,
4329 0, 0, 0, 0, 0, 0, 0, 0,
4330 0, 0, 0, 0, 0, 0, 0, 0,
4331 0, 0, 0, 0, 0, 0, 0, 0,
4332 0, 0, 0, 0, 0, 0, 0, 0,
4333 0, 0, 0, 0, 0, 0, 0, 0,
4334 0, 0, 0, 0, 0, 0, 0, 0,
4335 0, 0, 0, 0, 0, 0, 0, 0,
4336 0, 0, 0, 0, 0, 0, 0, 0,
4337 0, 0, 0, 0, 0, 0, 0, 0,
4338 0, 0, 0, 0, 0, 0, 0, 0,
4339 0, 0, 0, 0, 0, 0, 0, 0,
4340 0, 0, 0, 0, 0, 0, 0, 0,
4341 0, 0, 0, 0, 0, 0, 0, 0,
4342 0, 0, 0, 0, 0, 0, 0, 0,
4343 0, 0, 0, 0, 0, 0, 0, 0,
4344 0, 0, 0, 0, 0, 0, 0, 0,
4345 0, 0, 0, 0, 0, 0, 0, 0,
4346 0, 0, 0, 0, 0, 0, 0, 0,
4347 0, 0, 0, 0, 0, 0, 0, 0,
4348 0, 0, 0, 0, 0, 0, 0, 0,
4349 0, 0, 0, 0, 0, 0, 0, 0,
4350 0, 0, 0, 0, 0, 0, 0, 0,
4351 0, 0, 0, 0, 0, 0, 0, 0,
4352 0, 0, 0, 0, 0, 0, 0, 0,
4353 0, 0, 0, 0, 0, 0, 0, 0,
4354 0, 0, 0, 0, 0, 0, 0, 0,
4355 0, 0, 0, 0, 0, 0, 0, 0,
4356 0, 0, 0, 0, 0, 0, 0, 0,
4357 0, 0, 0, 0, 0, 0, 0, 0,
4358 0, 0, 0, 0, 0, 0, 0, 0,
4359 0, 0, 0, 0, 0, 0, 0, 0,
4360 0, 0, 0, 0, 0, 0, 0, 0,
4361 0, 0, 0, 0, 0, 0, 0, 0,
4362 0, 0, 0, 0, 0, 0, 0, 0,
4363 0, 0, 0, 0, 0, 0, 0, 0,
4364 0, 0, 0, 0, 0, 0, 0, 0,
4365 0, 0, 0, 0, 0, 0, 0, 0,
4366 0, 0, 0, 0, 0, 0, 0, 0,
4367 0, 0, 0, 0, 0, 0, 0, 0,
4368 0, 0, 0, 0, 0, 0, 0, 0,
4369 0, 0, 0, 0, 0, 0, 0, 0,
4370 0, 0, 0, 0, 0, 0, 0, 0,
4371 0, 0, 0, 0, 0, 0, 0, 0,
4372 0, 0, 0, 0, 0, 0, 0, 0,
4373 0, 0, 0, 0, 0, 0, 0, 0,
4374 0, 0, 0, 0, 0, 0, 0, 0,
4375 0, 0, 0, 0, 0, 0, 0, 0,
4376 0, 0, 0, 0, 0, 0, 0, 0,
4377 0, 0, 0, 0, 0, 0, 0, 0,
4378 0, 0, 0, 0, 0, 0, 0, 0,
4379 0, 0, 0, 0, 0, 0, 0, 0,
4380 0, 0, 0, 0, 0, 0, 0, 0,
4381 0, 0, 0, 0, 0, 0, 0, 0,
4382 0, 0, 0, 0, 0, 0, 0, 0,
4383 0, 0, 0, 0, 0, 0, 0, 0,
4384 0, 0, 0, 0, 0, 0, 0, 0,
4385 0, 0, 0, 0, 0, 0, 0, 0,
4386 0, 0, 0, 0, 0, 0, 0, 0,
4387 0, 0, 0, 0, 0, 0, 0, 0,
4388 0, 0, 0, 0, 0, 0, 0, 0,
4389 0, 0, 0, 0, 0, 0, 0, 0,
4390 0, 0, 0, 0, 0, 0, 0, 0,
4391 0, 0, 0, 0, 0, 0, 0, 0,
4392 0, 0, 0, 0, 0, 0, 0, 0,
4393 0, 0, 0, 0, 0, 0, 0, 0,
4394 0, 0, 0, 0, 0, 0, 0, 0,
4395 0, 0, 0, 0, 0, 0, 0, 0,
4396 0, 0, 0, 0, 0, 0, 0, 0,
4397 0, 0, 0, 0, 0, 0, 0, 0,
4398 0, 0, 0, 0, 0, 0, 0, 0,
4399 0, 0, 0, 0, 0, 0, 0, 0,
4400 0, 0, 0, 0, 0, 0, 0, 0,
4401 0, 0, 0, 0, 0, 0, 0, 0,
4402 0, 0, 0, 0, 0, 0, 0, 0,
4403 0, 0, 0, 0, 0, 0, 0, 0,
4404 0, 0, 0, 0, 0, 0, 0, 0,
4405 0, 0, 0, 0, 0, 0, 0, 0,
4406 0, 0, 0, 0, 0, 0, 0, 0,
4407 0, 0, 0, 0, 0, 0, 0, 0,
4408 0, 0, 0, 0, 0, 0, 0, 0,
4409 0, 0, 0, 0, 0, 0, 0, 0,
4410 0, 0, 0, 0, 0, 0, 0, 0,
4411 0, 0, 0, 0, 0, 0, 0, 0,
4412 0, 0, 0, 0, 0, 0, 0, 0,
4413 0, 0, 0, 0, 0, 0, 0, 0,
4414 0, 0, 0, 0, 0, 0, 0, 0,
4415 0, 0, 0, 0, 0, 0, 0, 0,
4416 0, 0, 0, 0, 0, 0, 0, 0,
4417 0, 0, 0, 0, 0, 0, 0, 0,
4418 0, 0, 0, 0, 0, 0, 0, 0,
4419 0, 0, 0, 0, 0, 0, 0, 0,
4420 0, 0, 0, 0, 0, 0, 0, 0,
4421 0, 0, 0, 0, 0, 0, 0, 0,
4422 0, 0, 0, 0, 0, 0, 37, 0,
4423 0, 0, 0, 0, 0, 0, 0, 0,
4424 0, 0, 0, 0, 0, 0, 0, 0,
4425 0, 0, 0, 0, 0, 0, 0, 0,
4426 0, 0, 0, 0,
4427}
4428
4429var _graphclust_from_state_actions []byte = []byte{
4430 0, 0, 0, 0, 0, 0, 0, 0,
4431 0, 0, 0, 0, 0, 0, 0, 0,
4432 0, 0, 0, 0, 0, 0, 0, 0,
4433 0, 0, 0, 0, 0, 0, 0, 0,
4434 0, 0, 0, 0, 0, 0, 0, 0,
4435 0, 0, 0, 0, 0, 0, 0, 0,
4436 0, 0, 0, 0, 0, 0, 0, 0,
4437 0, 0, 0, 0, 0, 0, 0, 0,
4438 0, 0, 0, 0, 0, 0, 0, 0,
4439 0, 0, 0, 0, 0, 0, 0, 0,
4440 0, 0, 0, 0, 0, 0, 0, 0,
4441 0, 0, 0, 0, 0, 0, 0, 0,
4442 0, 0, 0, 0, 0, 0, 0, 0,
4443 0, 0, 0, 0, 0, 0, 0, 0,
4444 0, 0, 0, 0, 0, 0, 0, 0,
4445 0, 0, 0, 0, 0, 0, 0, 0,
4446 0, 0, 0, 0, 0, 0, 0, 0,
4447 0, 0, 0, 0, 0, 0, 0, 0,
4448 0, 0, 0, 0, 0, 0, 0, 0,
4449 0, 0, 0, 0, 0, 0, 0, 0,
4450 0, 0, 0, 0, 0, 0, 0, 0,
4451 0, 0, 0, 0, 0, 0, 0, 0,
4452 0, 0, 0, 0, 0, 0, 0, 0,
4453 0, 0, 0, 0, 0, 0, 0, 0,
4454 0, 0, 0, 0, 0, 0, 0, 0,
4455 0, 0, 0, 0, 0, 0, 0, 0,
4456 0, 0, 0, 0, 0, 0, 0, 0,
4457 0, 0, 0, 0, 0, 0, 0, 0,
4458 0, 0, 0, 0, 0, 0, 0, 0,
4459 0, 0, 0, 0, 0, 0, 0, 0,
4460 0, 0, 0, 0, 0, 0, 0, 0,
4461 0, 0, 0, 0, 0, 0, 0, 0,
4462 0, 0, 0, 0, 0, 0, 0, 0,
4463 0, 0, 0, 0, 0, 0, 0, 0,
4464 0, 0, 0, 0, 0, 0, 0, 0,
4465 0, 0, 0, 0, 0, 0, 0, 0,
4466 0, 0, 0, 0, 0, 0, 0, 0,
4467 0, 0, 0, 0, 0, 0, 0, 0,
4468 0, 0, 0, 0, 0, 0, 0, 0,
4469 0, 0, 0, 0, 0, 0, 0, 0,
4470 0, 0, 0, 0, 0, 0, 0, 0,
4471 0, 0, 0, 0, 0, 0, 0, 0,
4472 0, 0, 0, 0, 0, 0, 0, 0,
4473 0, 0, 0, 0, 0, 0, 0, 0,
4474 0, 0, 0, 0, 0, 0, 0, 0,
4475 0, 0, 0, 0, 0, 0, 0, 0,
4476 0, 0, 0, 0, 0, 0, 0, 0,
4477 0, 0, 0, 0, 0, 0, 0, 0,
4478 0, 0, 0, 0, 0, 0, 0, 0,
4479 0, 0, 0, 0, 0, 0, 0, 0,
4480 0, 0, 0, 0, 0, 0, 0, 0,
4481 0, 0, 0, 0, 0, 0, 0, 0,
4482 0, 0, 0, 0, 0, 0, 0, 0,
4483 0, 0, 0, 0, 0, 0, 0, 0,
4484 0, 0, 0, 0, 0, 0, 0, 0,
4485 0, 0, 0, 0, 0, 0, 0, 0,
4486 0, 0, 0, 0, 0, 0, 0, 0,
4487 0, 0, 0, 0, 0, 0, 0, 0,
4488 0, 0, 0, 0, 0, 0, 0, 0,
4489 0, 0, 0, 0, 0, 0, 0, 0,
4490 0, 0, 0, 0, 0, 0, 0, 0,
4491 0, 0, 0, 0, 0, 0, 0, 0,
4492 0, 0, 0, 0, 0, 0, 0, 0,
4493 0, 0, 0, 0, 0, 0, 0, 0,
4494 0, 0, 0, 0, 0, 0, 0, 0,
4495 0, 0, 0, 0, 0, 0, 0, 0,
4496 0, 0, 0, 0, 0, 0, 0, 0,
4497 0, 0, 0, 0, 0, 0, 0, 0,
4498 0, 0, 0, 0, 0, 0, 0, 0,
4499 0, 0, 0, 0, 0, 0, 0, 0,
4500 0, 0, 0, 0, 0, 0, 0, 0,
4501 0, 0, 0, 0, 0, 0, 0, 0,
4502 0, 0, 0, 0, 0, 0, 0, 0,
4503 0, 0, 0, 0, 0, 0, 0, 0,
4504 0, 0, 0, 0, 0, 0, 0, 0,
4505 0, 0, 0, 0, 0, 0, 0, 0,
4506 0, 0, 0, 0, 0, 0, 0, 0,
4507 0, 0, 0, 0, 0, 0, 0, 0,
4508 0, 0, 0, 0, 0, 0, 0, 0,
4509 0, 0, 0, 0, 0, 0, 0, 0,
4510 0, 0, 0, 0, 0, 0, 0, 0,
4511 0, 0, 0, 0, 0, 0, 0, 0,
4512 0, 0, 0, 0, 0, 0, 0, 0,
4513 0, 0, 0, 0, 0, 0, 0, 0,
4514 0, 0, 0, 0, 0, 0, 0, 0,
4515 0, 0, 0, 0, 0, 0, 0, 0,
4516 0, 0, 0, 0, 0, 0, 0, 0,
4517 0, 0, 0, 0, 0, 0, 0, 0,
4518 0, 0, 0, 0, 0, 0, 0, 0,
4519 0, 0, 0, 0, 0, 0, 0, 0,
4520 0, 0, 0, 0, 0, 0, 0, 0,
4521 0, 0, 0, 0, 0, 0, 0, 0,
4522 0, 0, 0, 0, 0, 0, 0, 0,
4523 0, 0, 0, 0, 0, 0, 0, 0,
4524 0, 0, 0, 0, 0, 0, 0, 0,
4525 0, 0, 0, 0, 0, 0, 0, 0,
4526 0, 0, 0, 0, 0, 0, 0, 0,
4527 0, 0, 0, 0, 0, 0, 0, 0,
4528 0, 0, 0, 0, 0, 0, 0, 0,
4529 0, 0, 0, 0, 0, 0, 0, 0,
4530 0, 0, 0, 0, 0, 0, 0, 0,
4531 0, 0, 0, 0, 0, 0, 0, 0,
4532 0, 0, 0, 0, 0, 0, 0, 0,
4533 0, 0, 0, 0, 0, 0, 0, 0,
4534 0, 0, 0, 0, 0, 0, 0, 0,
4535 0, 0, 0, 0, 0, 0, 0, 0,
4536 0, 0, 0, 0, 0, 0, 0, 0,
4537 0, 0, 0, 0, 0, 0, 0, 0,
4538 0, 0, 0, 0, 0, 0, 0, 0,
4539 0, 0, 0, 0, 0, 0, 0, 0,
4540 0, 0, 0, 0, 0, 0, 0, 0,
4541 0, 0, 0, 0, 0, 0, 0, 0,
4542 0, 0, 0, 0, 0, 0, 0, 0,
4543 0, 0, 0, 0, 0, 0, 0, 0,
4544 0, 0, 0, 0, 0, 0, 0, 0,
4545 0, 0, 0, 0, 0, 0, 0, 0,
4546 0, 0, 0, 0, 0, 0, 0, 0,
4547 0, 0, 0, 0, 0, 0, 0, 0,
4548 0, 0, 0, 0, 0, 0, 0, 0,
4549 0, 0, 0, 0, 0, 0, 0, 0,
4550 0, 0, 0, 0, 0, 0, 0, 0,
4551 0, 0, 0, 0, 0, 0, 0, 0,
4552 0, 0, 0, 0, 0, 0, 0, 0,
4553 0, 0, 0, 0, 0, 0, 0, 0,
4554 0, 0, 0, 0, 0, 0, 0, 0,
4555 0, 0, 0, 0, 0, 0, 0, 0,
4556 0, 0, 0, 0, 0, 0, 0, 0,
4557 0, 0, 0, 0, 0, 0, 0, 0,
4558 0, 0, 0, 0, 0, 0, 0, 0,
4559 0, 0, 0, 0, 0, 0, 0, 0,
4560 0, 0, 0, 0, 0, 0, 0, 0,
4561 0, 0, 0, 0, 0, 0, 0, 0,
4562 0, 0, 0, 0, 0, 0, 0, 0,
4563 0, 0, 0, 0, 0, 0, 0, 0,
4564 0, 0, 0, 0, 0, 0, 0, 0,
4565 0, 0, 0, 0, 0, 0, 0, 0,
4566 0, 0, 0, 0, 0, 0, 0, 0,
4567 0, 0, 0, 0, 0, 0, 0, 0,
4568 0, 0, 0, 0, 0, 0, 0, 0,
4569 0, 0, 0, 0, 0, 0, 0, 0,
4570 0, 0, 0, 0, 0, 0, 0, 0,
4571 0, 0, 0, 0, 0, 0, 0, 0,
4572 0, 0, 0, 0, 0, 0, 0, 0,
4573 0, 0, 0, 0, 0, 0, 0, 0,
4574 0, 0, 0, 0, 0, 0, 0, 0,
4575 0, 0, 0, 0, 0, 0, 0, 0,
4576 0, 0, 0, 0, 0, 0, 0, 0,
4577 0, 0, 0, 0, 0, 0, 0, 0,
4578 0, 0, 0, 0, 0, 0, 0, 0,
4579 0, 0, 0, 0, 0, 0, 0, 0,
4580 0, 0, 0, 0, 0, 0, 0, 0,
4581 0, 0, 0, 0, 0, 0, 0, 0,
4582 0, 0, 0, 0, 0, 0, 0, 0,
4583 0, 0, 0, 0, 0, 0, 0, 0,
4584 0, 0, 0, 0, 0, 0, 0, 0,
4585 0, 0, 0, 0, 0, 0, 0, 0,
4586 0, 0, 0, 0, 0, 0, 0, 0,
4587 0, 0, 0, 0, 0, 0, 0, 0,
4588 0, 0, 0, 0, 0, 0, 0, 0,
4589 0, 0, 0, 0, 0, 0, 0, 0,
4590 0, 0, 0, 0, 0, 0, 0, 0,
4591 0, 0, 0, 0, 0, 0, 0, 0,
4592 0, 0, 0, 0, 0, 0, 0, 0,
4593 0, 0, 0, 0, 0, 0, 0, 0,
4594 0, 0, 0, 0, 0, 0, 0, 0,
4595 0, 0, 0, 0, 0, 0, 0, 0,
4596 0, 0, 0, 0, 0, 0, 0, 0,
4597 0, 0, 0, 0, 0, 0, 0, 0,
4598 0, 0, 0, 0, 0, 0, 0, 0,
4599 0, 0, 0, 0, 0, 0, 0, 0,
4600 0, 0, 0, 0, 0, 0, 0, 0,
4601 0, 0, 0, 0, 0, 0, 0, 0,
4602 0, 0, 0, 0, 0, 0, 0, 0,
4603 0, 0, 0, 0, 0, 0, 0, 0,
4604 0, 0, 0, 0, 0, 0, 0, 0,
4605 0, 0, 0, 0, 0, 0, 0, 0,
4606 0, 0, 0, 0, 0, 0, 0, 0,
4607 0, 0, 0, 0, 0, 0, 0, 0,
4608 0, 0, 0, 0, 0, 0, 0, 0,
4609 0, 0, 0, 0, 0, 0, 0, 0,
4610 0, 0, 0, 0, 0, 0, 0, 0,
4611 0, 0, 0, 0, 0, 0, 0, 0,
4612 0, 0, 0, 0, 0, 0, 0, 0,
4613 0, 0, 0, 0, 0, 0, 0, 0,
4614 0, 0, 0, 0, 0, 0, 0, 0,
4615 0, 0, 0, 0, 0, 0, 0, 0,
4616 0, 0, 0, 0, 0, 0, 0, 0,
4617 0, 0, 0, 0, 0, 0, 0, 0,
4618 0, 0, 0, 0, 0, 0, 0, 0,
4619 0, 0, 0, 0, 0, 0, 0, 0,
4620 0, 0, 0, 0, 0, 0, 0, 0,
4621 0, 0, 0, 0, 0, 0, 0, 0,
4622 0, 0, 0, 0, 0, 0, 0, 0,
4623 0, 0, 0, 0, 0, 0, 0, 0,
4624 0, 0, 0, 0, 0, 0, 0, 0,
4625 0, 0, 0, 0, 0, 0, 0, 0,
4626 0, 0, 0, 0, 0, 0, 0, 0,
4627 0, 0, 0, 0, 0, 0, 0, 0,
4628 0, 0, 0, 0, 0, 0, 0, 0,
4629 0, 0, 0, 0, 0, 0, 0, 0,
4630 0, 0, 0, 0, 0, 0, 0, 0,
4631 0, 0, 0, 0, 0, 0, 0, 0,
4632 0, 0, 0, 0, 0, 0, 0, 0,
4633 0, 0, 0, 0, 0, 0, 0, 0,
4634 0, 0, 0, 0, 0, 0, 0, 0,
4635 0, 0, 0, 0, 0, 0, 0, 0,
4636 0, 0, 0, 0, 0, 0, 0, 0,
4637 0, 0, 0, 0, 0, 0, 0, 0,
4638 0, 0, 0, 0, 0, 0, 0, 0,
4639 0, 0, 0, 0, 0, 0, 0, 0,
4640 0, 0, 0, 0, 0, 0, 0, 0,
4641 0, 0, 0, 0, 0, 0, 0, 0,
4642 0, 0, 0, 0, 0, 0, 0, 0,
4643 0, 0, 0, 0, 0, 0, 0, 0,
4644 0, 0, 0, 0, 0, 0, 0, 0,
4645 0, 0, 0, 0, 0, 0, 0, 0,
4646 0, 0, 0, 0, 0, 0, 0, 0,
4647 0, 0, 0, 0, 0, 0, 0, 0,
4648 0, 0, 0, 0, 0, 0, 0, 0,
4649 0, 0, 0, 0, 0, 0, 0, 0,
4650 0, 0, 0, 0, 0, 0, 0, 0,
4651 0, 0, 0, 0, 0, 0, 0, 0,
4652 0, 0, 0, 0, 0, 0, 0, 0,
4653 0, 0, 0, 0, 0, 0, 0, 0,
4654 0, 0, 0, 0, 0, 0, 0, 0,
4655 0, 0, 0, 0, 0, 0, 0, 0,
4656 0, 0, 0, 0, 0, 0, 0, 0,
4657 0, 0, 0, 0, 0, 0, 0, 0,
4658 0, 0, 0, 0, 0, 0, 0, 0,
4659 0, 0, 0, 0, 0, 0, 0, 0,
4660 0, 0, 0, 0, 0, 0, 0, 0,
4661 0, 0, 0, 0, 0, 0, 0, 0,
4662 0, 0, 0, 0, 0, 0, 0, 0,
4663 0, 0, 0, 0, 0, 0, 0, 0,
4664 0, 0, 0, 0, 0, 0, 0, 0,
4665 0, 0, 0, 0, 0, 0, 0, 0,
4666 0, 0, 0, 0, 0, 0, 0, 0,
4667 0, 0, 0, 0, 0, 0, 0, 0,
4668 0, 0, 0, 0, 0, 0, 0, 0,
4669 0, 0, 0, 0, 0, 0, 0, 0,
4670 0, 0, 0, 0, 0, 0, 0, 0,
4671 0, 0, 0, 0, 0, 0, 0, 0,
4672 0, 0, 0, 0, 0, 0, 0, 0,
4673 0, 0, 0, 0, 0, 0, 0, 0,
4674 0, 0, 0, 0, 0, 0, 0, 0,
4675 0, 0, 0, 0, 0, 0, 0, 0,
4676 0, 0, 0, 0, 0, 0, 3, 0,
4677 0, 0, 0, 0, 0, 0, 0, 0,
4678 0, 0, 0, 0, 0, 0, 0, 0,
4679 0, 0, 0, 0, 0, 0, 0, 0,
4680 0, 0, 0, 0,
4681}
4682
4683var _graphclust_eof_trans []int16 = []int16{
4684 0, 0, 3, 3, 3, 3, 3, 3,
4685 3, 3, 3, 3, 3, 3, 3, 3,
4686 3, 3, 3, 3, 3, 3, 3, 3,
4687 3, 3, 3, 3, 3, 3, 3, 3,
4688 3, 3, 3, 3, 3, 3, 3, 3,
4689 3, 3, 3, 3, 3, 3, 3, 3,
4690 3, 3, 3, 3, 3, 3, 3, 3,
4691 3, 3, 3, 3, 3, 3, 3, 3,
4692 3, 3, 3, 3, 3, 3, 3, 3,
4693 3, 3, 3, 3, 3, 3, 3, 3,
4694 3, 3, 3, 3, 3, 3, 3, 3,
4695 3, 3, 3, 3, 3, 3, 3, 3,
4696 3, 3, 3, 3, 3, 3, 3, 3,
4697 3, 3, 3, 3, 3, 3, 3, 3,
4698 3, 3, 3, 3, 3, 3, 3, 3,
4699 3, 3, 3, 3, 3, 3, 3, 3,
4700 3, 3, 3, 3, 3, 3, 3, 3,
4701 3, 3, 3, 3, 3, 3, 3, 3,
4702 3, 3, 3, 3, 3, 3, 3, 3,
4703 3, 3, 3, 3, 3, 3, 3, 3,
4704 3, 3, 3, 3, 3, 3, 3, 3,
4705 3, 3, 3, 3, 3, 3, 3, 3,
4706 3, 3, 3, 3, 3, 3, 3, 3,
4707 3, 3, 3, 3, 3, 3, 3, 3,
4708 3, 3, 3, 3, 3, 3, 3, 3,
4709 3, 3, 3, 3, 3, 3, 3, 3,
4710 3, 3, 3, 3, 3, 3, 3, 3,
4711 3, 3, 3, 3, 3, 3, 3, 3,
4712 3, 3, 3, 3, 3, 3, 3, 3,
4713 3, 3, 3, 3, 3, 3, 3, 3,
4714 3, 3, 3, 3, 3, 3, 3, 3,
4715 3, 3, 3, 3, 3, 3, 3, 3,
4716 3, 3, 3, 3, 3, 3, 3, 3,
4717 3, 3, 3, 3, 3, 3, 3, 3,
4718 3, 3, 3, 3, 3, 3, 3, 3,
4719 3, 3, 3, 3, 3, 3, 3, 3,
4720 3, 3, 3, 3, 3, 3, 3, 3,
4721 3, 3, 3, 0, 0, 0, 268, 268,
4722 268, 268, 268, 268, 268, 268, 268, 268,
4723 268, 268, 268, 268, 268, 268, 268, 268,
4724 268, 268, 268, 268, 268, 268, 268, 268,
4725 268, 268, 268, 268, 268, 268, 268, 268,
4726 268, 268, 268, 268, 268, 268, 268, 268,
4727 268, 268, 268, 268, 268, 268, 268, 268,
4728 268, 268, 268, 268, 268, 268, 268, 268,
4729 268, 268, 268, 268, 268, 268, 268, 268,
4730 268, 268, 268, 268, 268, 268, 268, 268,
4731 268, 268, 268, 268, 268, 268, 268, 268,
4732 268, 268, 268, 268, 268, 268, 268, 268,
4733 268, 268, 268, 268, 268, 268, 268, 268,
4734 268, 268, 268, 268, 268, 268, 268, 268,
4735 268, 268, 268, 268, 268, 268, 268, 268,
4736 268, 268, 268, 268, 268, 268, 268, 268,
4737 268, 268, 268, 268, 268, 268, 268, 268,
4738 268, 268, 268, 268, 268, 268, 268, 268,
4739 268, 268, 268, 268, 268, 268, 268, 268,
4740 268, 268, 268, 268, 268, 268, 268, 268,
4741 268, 268, 268, 268, 268, 268, 268, 268,
4742 268, 268, 268, 268, 268, 268, 268, 268,
4743 268, 268, 268, 268, 268, 268, 268, 268,
4744 268, 268, 268, 268, 268, 268, 268, 268,
4745 268, 268, 268, 268, 268, 268, 268, 268,
4746 268, 268, 268, 268, 268, 268, 268, 268,
4747 268, 268, 268, 268, 268, 268, 268, 268,
4748 268, 268, 268, 268, 268, 268, 268, 268,
4749 268, 268, 268, 268, 268, 268, 268, 268,
4750 268, 268, 268, 268, 268, 268, 268, 268,
4751 268, 268, 268, 268, 268, 268, 268, 268,
4752 268, 268, 268, 268, 268, 268, 268, 268,
4753 268, 268, 268, 268, 268, 268, 268, 268,
4754 268, 268, 268, 268, 268, 268, 268, 268,
4755 268, 268, 268, 268, 268, 268, 268, 268,
4756 268, 268, 268, 268, 268, 268, 268, 268,
4757 268, 268, 268, 268, 268, 268, 268, 268,
4758 268, 268, 268, 268, 268, 268, 268, 268,
4759 268, 268, 268, 268, 268, 268, 268, 268,
4760 268, 268, 268, 268, 268, 268, 268, 268,
4761 268, 268, 268, 268, 268, 268, 268, 268,
4762 268, 268, 268, 268, 268, 268, 268, 268,
4763 268, 268, 268, 268, 268, 268, 268, 268,
4764 268, 268, 268, 268, 268, 268, 268, 268,
4765 268, 268, 268, 268, 268, 268, 268, 268,
4766 268, 268, 268, 268, 268, 268, 268, 268,
4767 268, 268, 268, 268, 268, 268, 268, 268,
4768 268, 268, 268, 268, 268, 268, 268, 268,
4769 268, 268, 268, 268, 268, 268, 268, 268,
4770 268, 268, 268, 268, 0, 0, 0, 0,
4771 0, 0, 610, 612, 612, 612, 612, 612,
4772 612, 612, 612, 612, 612, 612, 612, 612,
4773 612, 612, 612, 612, 612, 612, 612, 612,
4774 612, 612, 612, 612, 612, 612, 612, 612,
4775 612, 612, 612, 612, 612, 612, 612, 612,
4776 612, 612, 612, 612, 612, 612, 612, 612,
4777 612, 612, 612, 612, 612, 612, 612, 612,
4778 612, 612, 612, 612, 612, 612, 612, 612,
4779 612, 612, 612, 612, 612, 612, 612, 612,
4780 612, 612, 612, 612, 612, 612, 612, 612,
4781 612, 612, 612, 612, 612, 612, 612, 612,
4782 612, 612, 612, 612, 612, 612, 612, 612,
4783 612, 612, 612, 612, 612, 612, 612, 612,
4784 612, 612, 612, 612, 612, 612, 612, 612,
4785 612, 612, 612, 612, 612, 612, 612, 612,
4786 612, 612, 612, 612, 612, 612, 612, 612,
4787 612, 612, 612, 612, 612, 612, 612, 612,
4788 612, 612, 612, 612, 612, 612, 612, 612,
4789 612, 612, 612, 612, 612, 612, 612, 612,
4790 612, 612, 612, 612, 612, 612, 612, 612,
4791 612, 612, 612, 612, 612, 612, 612, 612,
4792 612, 612, 612, 612, 612, 612, 612, 612,
4793 612, 612, 612, 612, 612, 612, 612, 612,
4794 612, 612, 612, 612, 612, 612, 612, 612,
4795 612, 612, 612, 612, 612, 612, 612, 612,
4796 612, 612, 612, 612, 612, 612, 612, 612,
4797 612, 612, 612, 612, 612, 612, 612, 612,
4798 612, 612, 612, 612, 612, 612, 612, 612,
4799 612, 612, 612, 612, 612, 612, 612, 612,
4800 612, 612, 612, 612, 612, 612, 612, 612,
4801 612, 612, 612, 612, 612, 612, 612, 612,
4802 612, 612, 612, 612, 612, 612, 612, 612,
4803 612, 612, 612, 612, 612, 612, 612, 612,
4804 612, 612, 612, 612, 612, 612, 612, 612,
4805 612, 612, 612, 612, 612, 612, 612, 612,
4806 612, 612, 612, 612, 612, 612, 612, 612,
4807 612, 612, 612, 612, 612, 612, 612, 612,
4808 612, 612, 612, 612, 612, 610, 612, 612,
4809 610, 612, 612, 610, 612, 612, 612, 612,
4810 612, 612, 612, 612, 612, 612, 612, 612,
4811 612, 612, 612, 612, 612, 612, 610, 612,
4812 612, 612, 612, 0, 0, 0, 901, 901,
4813 901, 901, 901, 901, 901, 901, 901, 901,
4814 901, 901, 901, 901, 901, 901, 901, 901,
4815 901, 901, 901, 901, 901, 901, 901, 901,
4816 901, 901, 901, 901, 901, 901, 901, 901,
4817 901, 901, 901, 901, 901, 901, 901, 901,
4818 901, 901, 901, 901, 901, 901, 901, 901,
4819 901, 901, 901, 901, 901, 901, 901, 901,
4820 901, 901, 901, 901, 901, 901, 901, 901,
4821 901, 901, 901, 901, 901, 901, 901, 901,
4822 901, 901, 901, 901, 901, 901, 901, 901,
4823 901, 901, 901, 901, 901, 901, 901, 901,
4824 901, 901, 901, 901, 901, 901, 901, 901,
4825 901, 901, 901, 901, 901, 901, 901, 901,
4826 901, 901, 901, 901, 901, 901, 901, 901,
4827 901, 901, 901, 901, 901, 901, 901, 901,
4828 901, 901, 901, 901, 901, 901, 901, 901,
4829 901, 901, 901, 901, 901, 901, 901, 901,
4830 901, 901, 901, 901, 901, 901, 901, 901,
4831 901, 901, 901, 901, 901, 901, 901, 901,
4832 901, 901, 901, 901, 901, 901, 901, 901,
4833 901, 901, 901, 901, 901, 901, 901, 901,
4834 901, 901, 901, 901, 901, 901, 901, 901,
4835 901, 901, 901, 901, 901, 901, 901, 901,
4836 901, 901, 901, 901, 901, 901, 901, 901,
4837 901, 901, 901, 901, 901, 901, 901, 901,
4838 901, 901, 901, 901, 901, 901, 901, 901,
4839 901, 901, 901, 901, 901, 901, 901, 901,
4840 901, 901, 901, 901, 901, 901, 901, 901,
4841 901, 901, 901, 901, 901, 901, 901, 901,
4842 901, 901, 901, 901, 901, 901, 901, 901,
4843 901, 901, 901, 901, 901, 901, 901, 901,
4844 901, 901, 901, 901, 901, 901, 901, 901,
4845 901, 901, 901, 901, 901, 901, 901, 901,
4846 901, 901, 901, 901, 901, 901, 901, 901,
4847 901, 901, 901, 901, 901, 901, 901, 901,
4848 901, 901, 901, 901, 901, 901, 901, 901,
4849 901, 901, 901, 901, 901, 901, 901, 0,
4850 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4851 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4852 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4853 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4854 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4855 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4856 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4857 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4858 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4859 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4860 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4861 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4862 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4863 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4864 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4865 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4866 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4867 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4868 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4869 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4870 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4871 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4872 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4873 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4874 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4875 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4876 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4877 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4878 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4879 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4880 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4881 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4882 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4883 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4884 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4885 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4886 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164,
4887 1164, 1164, 0, 0, 0, 0, 0, 0,
4888 0, 0, 0, 0, 0, 0, 0, 0,
4889 0, 0, 0, 0, 0, 0, 0, 0,
4890 0, 0, 0, 0, 0, 0, 0, 0,
4891 0, 0, 0, 1462, 1462, 1462, 1462, 1462,
4892 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4893 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4894 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4895 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4896 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4897 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4898 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4899 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4900 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4901 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4902 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4903 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4904 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4905 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4906 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4907 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4908 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4909 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4910 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4911 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4912 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4913 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4914 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4915 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4916 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4917 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4918 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4919 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4920 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4921 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4922 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4923 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4924 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4925 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4926 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4927 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462,
4928 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0,
4929 0, 0, 0, 0, 0, 0, 0, 0,
4930 0, 0, 0, 0, 0, 0, 0, 1750,
4931 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789,
4932 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855,
4933 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944,
4934 1983, 1983, 1983, 1983,
4935}
4936
4937const graphclust_start int = 1974
4938const graphclust_first_final int = 1974
4939const graphclust_error int = 0
4940
4941const graphclust_en_main int = 1974
4942
4943
4944// line 14 "grapheme_clusters.rl"
4945
4946
4947var Error = errors.New("invalid UTF8 text")
4948
4949// ScanGraphemeClusters is a split function for bufio.Scanner that splits
4950// on grapheme cluster boundaries.
4951func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) {
4952 if len(data) == 0 {
4953 return 0, nil, nil
4954 }
4955
4956 // Ragel state
4957 cs := 0 // Current State
4958 p := 0 // "Pointer" into data
4959 pe := len(data) // End-of-data "pointer"
4960 ts := 0
4961 te := 0
4962 act := 0
4963 eof := pe
4964
4965 // Make Go compiler happy
4966 _ = ts
4967 _ = te
4968 _ = act
4969 _ = eof
4970
4971 startPos := 0
4972 endPos := 0
4973
4974
4975// line 4976 "grapheme_clusters.go"
4976 {
4977 cs = graphclust_start
4978 ts = 0
4979 te = 0
4980 act = 0
4981 }
4982
4983// line 4984 "grapheme_clusters.go"
4984 {
4985 var _klen int
4986 var _trans int
4987 var _acts int
4988 var _nacts uint
4989 var _keys int
4990 if p == pe {
4991 goto _test_eof
4992 }
4993 if cs == 0 {
4994 goto _out
4995 }
4996_resume:
4997 _acts = int(_graphclust_from_state_actions[cs])
4998 _nacts = uint(_graphclust_actions[_acts]); _acts++
4999 for ; _nacts > 0; _nacts-- {
5000 _acts++
5001 switch _graphclust_actions[_acts - 1] {
5002 case 4:
5003// line 1 "NONE"
5004
5005ts = p
5006
5007// line 5008 "grapheme_clusters.go"
5008 }
5009 }
5010
5011 _keys = int(_graphclust_key_offsets[cs])
5012 _trans = int(_graphclust_index_offsets[cs])
5013
5014 _klen = int(_graphclust_single_lengths[cs])
5015 if _klen > 0 {
5016 _lower := int(_keys)
5017 var _mid int
5018 _upper := int(_keys + _klen - 1)
5019 for {
5020 if _upper < _lower {
5021 break
5022 }
5023
5024 _mid = _lower + ((_upper - _lower) >> 1)
5025 switch {
5026 case data[p] < _graphclust_trans_keys[_mid]:
5027 _upper = _mid - 1
5028 case data[p] > _graphclust_trans_keys[_mid]:
5029 _lower = _mid + 1
5030 default:
5031 _trans += int(_mid - int(_keys))
5032 goto _match
5033 }
5034 }
5035 _keys += _klen
5036 _trans += _klen
5037 }
5038
5039 _klen = int(_graphclust_range_lengths[cs])
5040 if _klen > 0 {
5041 _lower := int(_keys)
5042 var _mid int
5043 _upper := int(_keys + (_klen << 1) - 2)
5044 for {
5045 if _upper < _lower {
5046 break
5047 }
5048
5049 _mid = _lower + (((_upper - _lower) >> 1) & ^1)
5050 switch {
5051 case data[p] < _graphclust_trans_keys[_mid]:
5052 _upper = _mid - 2
5053 case data[p] > _graphclust_trans_keys[_mid + 1]:
5054 _lower = _mid + 2
5055 default:
5056 _trans += int((_mid - int(_keys)) >> 1)
5057 goto _match
5058 }
5059 }
5060 _trans += _klen
5061 }
5062
5063_match:
5064 _trans = int(_graphclust_indicies[_trans])
5065_eof_trans:
5066 cs = int(_graphclust_trans_targs[_trans])
5067
5068 if _graphclust_trans_actions[_trans] == 0 {
5069 goto _again
5070 }
5071
5072 _acts = int(_graphclust_trans_actions[_trans])
5073 _nacts = uint(_graphclust_actions[_acts]); _acts++
5074 for ; _nacts > 0; _nacts-- {
5075 _acts++
5076 switch _graphclust_actions[_acts-1] {
5077 case 0:
5078// line 46 "grapheme_clusters.rl"
5079
5080
5081 startPos = p
5082
5083 case 1:
5084// line 50 "grapheme_clusters.rl"
5085
5086
5087 endPos = p
5088
5089 case 5:
5090// line 1 "NONE"
5091
5092te = p+1
5093
5094 case 6:
5095// line 54 "grapheme_clusters.rl"
5096
5097act = 3;
5098 case 7:
5099// line 54 "grapheme_clusters.rl"
5100
5101te = p+1
5102{
5103 return endPos+1, data[startPos:endPos+1], nil
5104 }
5105 case 8:
5106// line 54 "grapheme_clusters.rl"
5107
5108te = p+1
5109{
5110 return endPos+1, data[startPos:endPos+1], nil
5111 }
5112 case 9:
5113// line 54 "grapheme_clusters.rl"
5114
5115te = p
5116p--
5117{
5118 return endPos+1, data[startPos:endPos+1], nil
5119 }
5120 case 10:
5121// line 54 "grapheme_clusters.rl"
5122
5123te = p
5124p--
5125{
5126 return endPos+1, data[startPos:endPos+1], nil
5127 }
5128 case 11:
5129// line 54 "grapheme_clusters.rl"
5130
5131te = p
5132p--
5133{
5134 return endPos+1, data[startPos:endPos+1], nil
5135 }
5136 case 12:
5137// line 54 "grapheme_clusters.rl"
5138
5139te = p
5140p--
5141{
5142 return endPos+1, data[startPos:endPos+1], nil
5143 }
5144 case 13:
5145// line 54 "grapheme_clusters.rl"
5146
5147te = p
5148p--
5149{
5150 return endPos+1, data[startPos:endPos+1], nil
5151 }
5152 case 14:
5153// line 54 "grapheme_clusters.rl"
5154
5155te = p
5156p--
5157{
5158 return endPos+1, data[startPos:endPos+1], nil
5159 }
5160 case 15:
5161// line 54 "grapheme_clusters.rl"
5162
5163p = (te) - 1
5164{
5165 return endPos+1, data[startPos:endPos+1], nil
5166 }
5167 case 16:
5168// line 54 "grapheme_clusters.rl"
5169
5170p = (te) - 1
5171{
5172 return endPos+1, data[startPos:endPos+1], nil
5173 }
5174 case 17:
5175// line 54 "grapheme_clusters.rl"
5176
5177p = (te) - 1
5178{
5179 return endPos+1, data[startPos:endPos+1], nil
5180 }
5181 case 18:
5182// line 54 "grapheme_clusters.rl"
5183
5184p = (te) - 1
5185{
5186 return endPos+1, data[startPos:endPos+1], nil
5187 }
5188 case 19:
5189// line 54 "grapheme_clusters.rl"
5190
5191p = (te) - 1
5192{
5193 return endPos+1, data[startPos:endPos+1], nil
5194 }
5195 case 20:
5196// line 54 "grapheme_clusters.rl"
5197
5198p = (te) - 1
5199{
5200 return endPos+1, data[startPos:endPos+1], nil
5201 }
5202 case 21:
5203// line 1 "NONE"
5204
5205 switch act {
5206 case 0:
5207 {cs = 0
5208goto _again
5209}
5210 case 3:
5211 {p = (te) - 1
5212
5213 return endPos+1, data[startPos:endPos+1], nil
5214 }
5215 }
5216
5217// line 5218 "grapheme_clusters.go"
5218 }
5219 }
5220
5221_again:
5222 _acts = int(_graphclust_to_state_actions[cs])
5223 _nacts = uint(_graphclust_actions[_acts]); _acts++
5224 for ; _nacts > 0; _nacts-- {
5225 _acts++
5226 switch _graphclust_actions[_acts-1] {
5227 case 2:
5228// line 1 "NONE"
5229
5230ts = 0
5231
5232 case 3:
5233// line 1 "NONE"
5234
5235act = 0
5236
5237// line 5238 "grapheme_clusters.go"
5238 }
5239 }
5240
5241 if cs == 0 {
5242 goto _out
5243 }
5244 p++
5245 if p != pe {
5246 goto _resume
5247 }
5248 _test_eof: {}
5249 if p == eof {
5250 if _graphclust_eof_trans[cs] > 0 {
5251 _trans = int(_graphclust_eof_trans[cs] - 1)
5252 goto _eof_trans
5253 }
5254 }
5255
5256 _out: {}
5257 }
5258
5259// line 116 "grapheme_clusters.rl"
5260
5261
5262 // If we fall out here then we were unable to complete a sequence.
5263 // If we weren't able to complete a sequence then either we've
5264 // reached the end of a partial buffer (so there's more data to come)
5265 // or we have an isolated symbol that would normally be part of a
5266 // grapheme cluster but has appeared in isolation here.
5267
5268 if !atEOF {
5269 // Request more
5270 return 0, nil, nil
5271 }
5272
5273 // Just take the first UTF-8 sequence and return that.
5274 _, seqLen := utf8.DecodeRune(data)
5275 return seqLen, data[:seqLen], nil
5276}
diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl
new file mode 100644
index 0000000..003ffbf
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl
@@ -0,0 +1,132 @@
1package textseg
2
3import (
4 "errors"
5 "unicode/utf8"
6)
7
8// Generated from grapheme_clusters.rl. DO NOT EDIT
9%%{
10 # (except you are actually in grapheme_clusters.rl here, so edit away!)
11
12 machine graphclust;
13 write data;
14}%%
15
16var Error = errors.New("invalid UTF8 text")
17
18// ScanGraphemeClusters is a split function for bufio.Scanner that splits
19// on grapheme cluster boundaries.
20func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) {
21 if len(data) == 0 {
22 return 0, nil, nil
23 }
24
25 // Ragel state
26 cs := 0 // Current State
27 p := 0 // "Pointer" into data
28 pe := len(data) // End-of-data "pointer"
29 ts := 0
30 te := 0
31 act := 0
32 eof := pe
33
34 // Make Go compiler happy
35 _ = ts
36 _ = te
37 _ = act
38 _ = eof
39
40 startPos := 0
41 endPos := 0
42
43 %%{
44 include GraphemeCluster "grapheme_clusters_table.rl";
45
46 action start {
47 startPos = p
48 }
49
50 action end {
51 endPos = p
52 }
53
54 action emit {
55 return endPos+1, data[startPos:endPos+1], nil
56 }
57
58 ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?;
59 AnyExtender = Extend | ZWJGlue | SpacingMark;
60 Extension = AnyExtender*;
61 ReplacementChar = (0xEF 0xBF 0xBD);
62
63 CRLFSeq = CR LF;
64 ControlSeq = Control | ReplacementChar;
65 HangulSeq = (
66 L+ (((LV? V+ | LVT) T*)?|LV?) |
67 LV V* T* |
68 V+ T* |
69 LVT T* |
70 T+
71 ) Extension;
72 EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension;
73 ZWJSeq = ZWJGlue Extension;
74 EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension;
75
76 UTF8Cont = 0x80 .. 0xBF;
77 AnyUTF8 = (
78 0x00..0x7F |
79 0xC0..0xDF . UTF8Cont |
80 0xE0..0xEF . UTF8Cont . UTF8Cont |
81 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
82 );
83
84 # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension
85 OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension;
86
87 # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break
88 PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?;
89
90 CRLFTok = CRLFSeq >start @end;
91 ControlTok = ControlSeq >start @end;
92 HangulTok = HangulSeq >start @end;
93 EmojiTok = EmojiSeq >start @end;
94 ZWJTok = ZWJSeq >start @end;
95 EmojiFlagTok = EmojiFlagSeq >start @end;
96 OtherTok = OtherSeq >start @end;
97 PrependTok = PrependSeq >start @end;
98
99 main := |*
100 CRLFTok => emit;
101 ControlTok => emit;
102 HangulTok => emit;
103 EmojiTok => emit;
104 ZWJTok => emit;
105 EmojiFlagTok => emit;
106 PrependTok => emit;
107 OtherTok => emit;
108
109 # any single valid UTF-8 character would also be valid per spec,
110 # but we'll handle that separately after the loop so we can deal
111 # with requesting more bytes if we're not at EOF.
112 *|;
113
114 write init;
115 write exec;
116 }%%
117
118 // If we fall out here then we were unable to complete a sequence.
119 // If we weren't able to complete a sequence then either we've
120 // reached the end of a partial buffer (so there's more data to come)
121 // or we have an isolated symbol that would normally be part of a
122 // grapheme cluster but has appeared in isolation here.
123
124 if !atEOF {
125 // Request more
126 return 0, nil, nil
127 }
128
129 // Just take the first UTF-8 sequence and return that.
130 _, seqLen := utf8.DecodeRune(data)
131 return seqLen, data[:seqLen], nil
132}
diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl
new file mode 100644
index 0000000..fb45118
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl
@@ -0,0 +1,1583 @@
1# The following Ragel file was autogenerated with unicode2ragel.rb
2# from: http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt
3#
4# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "E_Base", "E_Modifier", "ZWJ", "Glue_After_Zwj", "E_Base_GAZ"].
5#
6# To use this, make sure that your alphtype is set to byte,
7# and that your input is in utf8.
8
9%%{
10 machine GraphemeCluster;
11
12 Prepend =
13 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ...
14 | 0xDB 0x9D #Cf ARABIC END OF AYAH
15 | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK
16 | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH
17 | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH
18 | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN
19 | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA...
20 ;
21
22 CR =
23 0x0D #Cc <control-000D>
24 ;
25
26 LF =
27 0x0A #Cc <control-000A>
28 ;
29
30 Control =
31 0x00..0x09 #Cc [10] <control-0000>..<control-0009>
32 | 0x0B..0x0C #Cc [2] <control-000B>..<control-000C>
33 | 0x0E..0x1F #Cc [18] <control-000E>..<control-001F>
34 | 0x7F #Cc [33] <control-007F>..<control-009F>
35 | 0xC2 0x80..0x9F #
36 | 0xC2 0xAD #Cf SOFT HYPHEN
37 | 0xD8 0x9C #Cf ARABIC LETTER MARK
38 | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR
39 | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE
40 | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ...
41 | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR
42 | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR
43 | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-...
44 | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS
45 | 0xE2 0x81 0xA5 #Cn <reserved-2065>
46 | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG...
47 | 0xED 0xA0 0x80..0xFF #Cs [2048] <surrogate-D800>..<surrogate-...
48 | 0xED 0xA1..0xBE 0x00..0xFF #
49 | 0xED 0xBF 0x00..0xBF #
50 | 0xEF 0xBB 0xBF #Cf ZERO WIDTH NO-BREAK SPACE
51 | 0xEF 0xBF 0xB0..0xB8 #Cn [9] <reserved-FFF0>..<reserved-FFF8>
52 | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT...
53 | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP...
54 | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI...
55 | 0xF3 0xA0 0x80 0x80 #Cn <reserved-E0000>
56 | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG
57 | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] <reserved-E0002>..<reserved-E001F>
58 | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] <reserved-E0080>..<reserved-E00FF>
59 | 0xF3 0xA0 0x83 0x00..0xBF #
60 | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] <reserved-E01F0>..<reser...
61 | 0xF3 0xA0 0x88..0xBE 0x00..0xFF #
62 | 0xF3 0xA0 0xBF 0x00..0xBF #
63 ;
64
65 Extend =
66 0xCC 0x80..0xFF #Mn [112] COMBINING GRAVE ACCENT..COMBINING ...
67 | 0xCD 0x00..0xAF #
68 | 0xD2 0x83..0x87 #Mn [5] COMBINING CYRILLIC TITLO..COMBININ...
69 | 0xD2 0x88..0x89 #Me [2] COMBINING CYRILLIC HUNDRED THOUSAN...
70 | 0xD6 0x91..0xBD #Mn [45] HEBREW ACCENT ETNAHTA..HEBREW POIN...
71 | 0xD6 0xBF #Mn HEBREW POINT RAFE
72 | 0xD7 0x81..0x82 #Mn [2] HEBREW POINT SHIN DOT..HEBREW POIN...
73 | 0xD7 0x84..0x85 #Mn [2] HEBREW MARK UPPER DOT..HEBREW MARK...
74 | 0xD7 0x87 #Mn HEBREW POINT QAMATS QATAN
75 | 0xD8 0x90..0x9A #Mn [11] ARABIC SIGN SALLALLAHOU ALAYHE WAS...
76 | 0xD9 0x8B..0x9F #Mn [21] ARABIC FATHATAN..ARABIC WAVY HAMZA...
77 | 0xD9 0xB0 #Mn ARABIC LETTER SUPERSCRIPT ALEF
78 | 0xDB 0x96..0x9C #Mn [7] ARABIC SMALL HIGH LIGATURE SAD WIT...
79 | 0xDB 0x9F..0xA4 #Mn [6] ARABIC SMALL HIGH ROUNDED ZERO..AR...
80 | 0xDB 0xA7..0xA8 #Mn [2] ARABIC SMALL HIGH YEH..ARABIC SMAL...
81 | 0xDB 0xAA..0xAD #Mn [4] ARABIC EMPTY CENTRE LOW STOP..ARAB...
82 | 0xDC 0x91 #Mn SYRIAC LETTER SUPERSCRIPT ALAPH
83 | 0xDC 0xB0..0xFF #Mn [27] SYRIAC PTHAHA ABOVE..SYRIAC BARREKH
84 | 0xDD 0x00..0x8A #
85 | 0xDE 0xA6..0xB0 #Mn [11] THAANA ABAFILI..THAANA SUKUN
86 | 0xDF 0xAB..0xB3 #Mn [9] NKO COMBINING SHORT HIGH TONE..NKO...
87 | 0xE0 0xA0 0x96..0x99 #Mn [4] SAMARITAN MARK IN..SAMARITAN MARK ...
88 | 0xE0 0xA0 0x9B..0xA3 #Mn [9] SAMARITAN MARK EPENTHETIC YUT..SAM...
89 | 0xE0 0xA0 0xA5..0xA7 #Mn [3] SAMARITAN VOWEL SIGN SHORT A..SAMA...
90 | 0xE0 0xA0 0xA9..0xAD #Mn [5] SAMARITAN VOWEL SIGN LONG I..SAMAR...
91 | 0xE0 0xA1 0x99..0x9B #Mn [3] MANDAIC AFFRICATION MARK..MANDAIC ...
92 | 0xE0 0xA3 0x94..0xA1 #Mn [14] ARABIC SMALL HIGH WORD AR-RUB..ARA...
93 | 0xE0 0xA3 0xA3..0xFF #Mn [32] ARABIC TURNED DAMMA BELOW..DEVANAG...
94 | 0xE0 0xA4 0x00..0x82 #
95 | 0xE0 0xA4 0xBA #Mn DEVANAGARI VOWEL SIGN OE
96 | 0xE0 0xA4 0xBC #Mn DEVANAGARI SIGN NUKTA
97 | 0xE0 0xA5 0x81..0x88 #Mn [8] DEVANAGARI VOWEL SIGN U..DEVANAGAR...
98 | 0xE0 0xA5 0x8D #Mn DEVANAGARI SIGN VIRAMA
99 | 0xE0 0xA5 0x91..0x97 #Mn [7] DEVANAGARI STRESS SIGN UDATTA..DEV...
100 | 0xE0 0xA5 0xA2..0xA3 #Mn [2] DEVANAGARI VOWEL SIGN VOCALIC L..D...
101 | 0xE0 0xA6 0x81 #Mn BENGALI SIGN CANDRABINDU
102 | 0xE0 0xA6 0xBC #Mn BENGALI SIGN NUKTA
103 | 0xE0 0xA6 0xBE #Mc BENGALI VOWEL SIGN AA
104 | 0xE0 0xA7 0x81..0x84 #Mn [4] BENGALI VOWEL SIGN U..BENGALI VOWE...
105 | 0xE0 0xA7 0x8D #Mn BENGALI SIGN VIRAMA
106 | 0xE0 0xA7 0x97 #Mc BENGALI AU LENGTH MARK
107 | 0xE0 0xA7 0xA2..0xA3 #Mn [2] BENGALI VOWEL SIGN VOCALIC L..BENG...
108 | 0xE0 0xA8 0x81..0x82 #Mn [2] GURMUKHI SIGN ADAK BINDI..GURMUKHI...
109 | 0xE0 0xA8 0xBC #Mn GURMUKHI SIGN NUKTA
110 | 0xE0 0xA9 0x81..0x82 #Mn [2] GURMUKHI VOWEL SIGN U..GURMUKHI VO...
111 | 0xE0 0xA9 0x87..0x88 #Mn [2] GURMUKHI VOWEL SIGN EE..GURMUKHI V...
112 | 0xE0 0xA9 0x8B..0x8D #Mn [3] GURMUKHI VOWEL SIGN OO..GURMUKHI S...
113 | 0xE0 0xA9 0x91 #Mn GURMUKHI SIGN UDAAT
114 | 0xE0 0xA9 0xB0..0xB1 #Mn [2] GURMUKHI TIPPI..GURMUKHI ADDAK
115 | 0xE0 0xA9 0xB5 #Mn GURMUKHI SIGN YAKASH
116 | 0xE0 0xAA 0x81..0x82 #Mn [2] GUJARATI SIGN CANDRABINDU..GUJARAT...
117 | 0xE0 0xAA 0xBC #Mn GUJARATI SIGN NUKTA
118 | 0xE0 0xAB 0x81..0x85 #Mn [5] GUJARATI VOWEL SIGN U..GUJARATI VO...
119 | 0xE0 0xAB 0x87..0x88 #Mn [2] GUJARATI VOWEL SIGN E..GUJARATI VO...
120 | 0xE0 0xAB 0x8D #Mn GUJARATI SIGN VIRAMA
121 | 0xE0 0xAB 0xA2..0xA3 #Mn [2] GUJARATI VOWEL SIGN VOCALIC L..GUJ...
122 | 0xE0 0xAC 0x81 #Mn ORIYA SIGN CANDRABINDU
123 | 0xE0 0xAC 0xBC #Mn ORIYA SIGN NUKTA
124 | 0xE0 0xAC 0xBE #Mc ORIYA VOWEL SIGN AA
125 | 0xE0 0xAC 0xBF #Mn ORIYA VOWEL SIGN I
126 | 0xE0 0xAD 0x81..0x84 #Mn [4] ORIYA VOWEL SIGN U..ORIYA VOWEL SI...
127 | 0xE0 0xAD 0x8D #Mn ORIYA SIGN VIRAMA
128 | 0xE0 0xAD 0x96 #Mn ORIYA AI LENGTH MARK
129 | 0xE0 0xAD 0x97 #Mc ORIYA AU LENGTH MARK
130 | 0xE0 0xAD 0xA2..0xA3 #Mn [2] ORIYA VOWEL SIGN VOCALIC L..ORIYA ...
131 | 0xE0 0xAE 0x82 #Mn TAMIL SIGN ANUSVARA
132 | 0xE0 0xAE 0xBE #Mc TAMIL VOWEL SIGN AA
133 | 0xE0 0xAF 0x80 #Mn TAMIL VOWEL SIGN II
134 | 0xE0 0xAF 0x8D #Mn TAMIL SIGN VIRAMA
135 | 0xE0 0xAF 0x97 #Mc TAMIL AU LENGTH MARK
136 | 0xE0 0xB0 0x80 #Mn TELUGU SIGN COMBINING CANDRABINDU ...
137 | 0xE0 0xB0 0xBE..0xFF #Mn [3] TELUGU VOWEL SIGN AA..TELUGU VOWEL...
138 | 0xE0 0xB1 0x00..0x80 #
139 | 0xE0 0xB1 0x86..0x88 #Mn [3] TELUGU VOWEL SIGN E..TELUGU VOWEL ...
140 | 0xE0 0xB1 0x8A..0x8D #Mn [4] TELUGU VOWEL SIGN O..TELUGU SIGN V...
141 | 0xE0 0xB1 0x95..0x96 #Mn [2] TELUGU LENGTH MARK..TELUGU AI LENG...
142 | 0xE0 0xB1 0xA2..0xA3 #Mn [2] TELUGU VOWEL SIGN VOCALIC L..TELUG...
143 | 0xE0 0xB2 0x81 #Mn KANNADA SIGN CANDRABINDU
144 | 0xE0 0xB2 0xBC #Mn KANNADA SIGN NUKTA
145 | 0xE0 0xB2 0xBF #Mn KANNADA VOWEL SIGN I
146 | 0xE0 0xB3 0x82 #Mc KANNADA VOWEL SIGN UU
147 | 0xE0 0xB3 0x86 #Mn KANNADA VOWEL SIGN E
148 | 0xE0 0xB3 0x8C..0x8D #Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIG...
149 | 0xE0 0xB3 0x95..0x96 #Mc [2] KANNADA LENGTH MARK..KANNADA AI LE...
150 | 0xE0 0xB3 0xA2..0xA3 #Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANN...
151 | 0xE0 0xB4 0x81 #Mn MALAYALAM SIGN CANDRABINDU
152 | 0xE0 0xB4 0xBE #Mc MALAYALAM VOWEL SIGN AA
153 | 0xE0 0xB5 0x81..0x84 #Mn [4] MALAYALAM VOWEL SIGN U..MALAYALAM ...
154 | 0xE0 0xB5 0x8D #Mn MALAYALAM SIGN VIRAMA
155 | 0xE0 0xB5 0x97 #Mc MALAYALAM AU LENGTH MARK
156 | 0xE0 0xB5 0xA2..0xA3 #Mn [2] MALAYALAM VOWEL SIGN VOCALIC L..MA...
157 | 0xE0 0xB7 0x8A #Mn SINHALA SIGN AL-LAKUNA
158 | 0xE0 0xB7 0x8F #Mc SINHALA VOWEL SIGN AELA-PILLA
159 | 0xE0 0xB7 0x92..0x94 #Mn [3] SINHALA VOWEL SIGN KETTI IS-PILLA....
160 | 0xE0 0xB7 0x96 #Mn SINHALA VOWEL SIGN DIGA PAA-PILLA
161 | 0xE0 0xB7 0x9F #Mc SINHALA VOWEL SIGN GAYANUKITTA
162 | 0xE0 0xB8 0xB1 #Mn THAI CHARACTER MAI HAN-AKAT
163 | 0xE0 0xB8 0xB4..0xBA #Mn [7] THAI CHARACTER SARA I..THAI CHARAC...
164 | 0xE0 0xB9 0x87..0x8E #Mn [8] THAI CHARACTER MAITAIKHU..THAI CHA...
165 | 0xE0 0xBA 0xB1 #Mn LAO VOWEL SIGN MAI KAN
166 | 0xE0 0xBA 0xB4..0xB9 #Mn [6] LAO VOWEL SIGN I..LAO VOWEL SIGN UU
167 | 0xE0 0xBA 0xBB..0xBC #Mn [2] LAO VOWEL SIGN MAI KON..LAO SEMIVO...
168 | 0xE0 0xBB 0x88..0x8D #Mn [6] LAO TONE MAI EK..LAO NIGGAHITA
169 | 0xE0 0xBC 0x98..0x99 #Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD P...
170 | 0xE0 0xBC 0xB5 #Mn TIBETAN MARK NGAS BZUNG NYI ZLA
171 | 0xE0 0xBC 0xB7 #Mn TIBETAN MARK NGAS BZUNG SGOR RTAGS
172 | 0xE0 0xBC 0xB9 #Mn TIBETAN MARK TSA -PHRU
173 | 0xE0 0xBD 0xB1..0xBE #Mn [14] TIBETAN VOWEL SIGN AA..TIBETAN SIG...
174 | 0xE0 0xBE 0x80..0x84 #Mn [5] TIBETAN VOWEL SIGN REVERSED I..TIB...
175 | 0xE0 0xBE 0x86..0x87 #Mn [2] TIBETAN SIGN LCI RTAGS..TIBETAN SI...
176 | 0xE0 0xBE 0x8D..0x97 #Mn [11] TIBETAN SUBJOINED SIGN LCE TSA CAN...
177 | 0xE0 0xBE 0x99..0xBC #Mn [36] TIBETAN SUBJOINED LETTER NYA..TIBE...
178 | 0xE0 0xBF 0x86 #Mn TIBETAN SYMBOL PADMA GDAN
179 | 0xE1 0x80 0xAD..0xB0 #Mn [4] MYANMAR VOWEL SIGN I..MYANMAR VOWE...
180 | 0xE1 0x80 0xB2..0xB7 #Mn [6] MYANMAR VOWEL SIGN AI..MYANMAR SIG...
181 | 0xE1 0x80 0xB9..0xBA #Mn [2] MYANMAR SIGN VIRAMA..MYANMAR SIGN ...
182 | 0xE1 0x80 0xBD..0xBE #Mn [2] MYANMAR CONSONANT SIGN MEDIAL WA.....
183 | 0xE1 0x81 0x98..0x99 #Mn [2] MYANMAR VOWEL SIGN VOCALIC L..MYAN...
184 | 0xE1 0x81 0x9E..0xA0 #Mn [3] MYANMAR CONSONANT SIGN MON MEDIAL ...
185 | 0xE1 0x81 0xB1..0xB4 #Mn [4] MYANMAR VOWEL SIGN GEBA KAREN I..M...
186 | 0xE1 0x82 0x82 #Mn MYANMAR CONSONANT SIGN SHAN MEDIAL WA
187 | 0xE1 0x82 0x85..0x86 #Mn [2] MYANMAR VOWEL SIGN SHAN E ABOVE..M...
188 | 0xE1 0x82 0x8D #Mn MYANMAR SIGN SHAN COUNCIL EMPHATIC...
189 | 0xE1 0x82 0x9D #Mn MYANMAR VOWEL SIGN AITON AI
190 | 0xE1 0x8D 0x9D..0x9F #Mn [3] ETHIOPIC COMBINING GEMINATION AND ...
191 | 0xE1 0x9C 0x92..0x94 #Mn [3] TAGALOG VOWEL SIGN I..TAGALOG SIGN...
192 | 0xE1 0x9C 0xB2..0xB4 #Mn [3] HANUNOO VOWEL SIGN I..HANUNOO SIGN...
193 | 0xE1 0x9D 0x92..0x93 #Mn [2] BUHID VOWEL SIGN I..BUHID VOWEL SI...
194 | 0xE1 0x9D 0xB2..0xB3 #Mn [2] TAGBANWA VOWEL SIGN I..TAGBANWA VO...
195 | 0xE1 0x9E 0xB4..0xB5 #Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOW...
196 | 0xE1 0x9E 0xB7..0xBD #Mn [7] KHMER VOWEL SIGN I..KHMER VOWEL SI...
197 | 0xE1 0x9F 0x86 #Mn KHMER SIGN NIKAHIT
198 | 0xE1 0x9F 0x89..0x93 #Mn [11] KHMER SIGN MUUSIKATOAN..KHMER SIGN...
199 | 0xE1 0x9F 0x9D #Mn KHMER SIGN ATTHACAN
200 | 0xE1 0xA0 0x8B..0x8D #Mn [3] MONGOLIAN FREE VARIATION SELECTOR ...
201 | 0xE1 0xA2 0x85..0x86 #Mn [2] MONGOLIAN LETTER ALI GALI BALUDA.....
202 | 0xE1 0xA2 0xA9 #Mn MONGOLIAN LETTER ALI GALI DAGALGA
203 | 0xE1 0xA4 0xA0..0xA2 #Mn [3] LIMBU VOWEL SIGN A..LIMBU VOWEL SI...
204 | 0xE1 0xA4 0xA7..0xA8 #Mn [2] LIMBU VOWEL SIGN E..LIMBU VOWEL SI...
205 | 0xE1 0xA4 0xB2 #Mn LIMBU SMALL LETTER ANUSVARA
206 | 0xE1 0xA4 0xB9..0xBB #Mn [3] LIMBU SIGN MUKPHRENG..LIMBU SIGN SA-I
207 | 0xE1 0xA8 0x97..0x98 #Mn [2] BUGINESE VOWEL SIGN I..BUGINESE VO...
208 | 0xE1 0xA8 0x9B #Mn BUGINESE VOWEL SIGN AE
209 | 0xE1 0xA9 0x96 #Mn TAI THAM CONSONANT SIGN MEDIAL LA
210 | 0xE1 0xA9 0x98..0x9E #Mn [7] TAI THAM SIGN MAI KANG LAI..TAI TH...
211 | 0xE1 0xA9 0xA0 #Mn TAI THAM SIGN SAKOT
212 | 0xE1 0xA9 0xA2 #Mn TAI THAM VOWEL SIGN MAI SAT
213 | 0xE1 0xA9 0xA5..0xAC #Mn [8] TAI THAM VOWEL SIGN I..TAI THAM VO...
214 | 0xE1 0xA9 0xB3..0xBC #Mn [10] TAI THAM VOWEL SIGN OA ABOVE..TAI ...
215 | 0xE1 0xA9 0xBF #Mn TAI THAM COMBINING CRYPTOGRAMMIC DOT
216 | 0xE1 0xAA 0xB0..0xBD #Mn [14] COMBINING DOUBLED CIRCUMFLEX ACCEN...
217 | 0xE1 0xAA 0xBE #Me COMBINING PARENTHESES OVERLAY
218 | 0xE1 0xAC 0x80..0x83 #Mn [4] BALINESE SIGN ULU RICEM..BALINESE ...
219 | 0xE1 0xAC 0xB4 #Mn BALINESE SIGN REREKAN
220 | 0xE1 0xAC 0xB6..0xBA #Mn [5] BALINESE VOWEL SIGN ULU..BALINESE ...
221 | 0xE1 0xAC 0xBC #Mn BALINESE VOWEL SIGN LA LENGA
222 | 0xE1 0xAD 0x82 #Mn BALINESE VOWEL SIGN PEPET
223 | 0xE1 0xAD 0xAB..0xB3 #Mn [9] BALINESE MUSICAL SYMBOL COMBINING ...
224 | 0xE1 0xAE 0x80..0x81 #Mn [2] SUNDANESE SIGN PANYECEK..SUNDANESE...
225 | 0xE1 0xAE 0xA2..0xA5 #Mn [4] SUNDANESE CONSONANT SIGN PANYAKRA....
226 | 0xE1 0xAE 0xA8..0xA9 #Mn [2] SUNDANESE VOWEL SIGN PAMEPET..SUND...
227 | 0xE1 0xAE 0xAB..0xAD #Mn [3] SUNDANESE SIGN VIRAMA..SUNDANESE C...
228 | 0xE1 0xAF 0xA6 #Mn BATAK SIGN TOMPI
229 | 0xE1 0xAF 0xA8..0xA9 #Mn [2] BATAK VOWEL SIGN PAKPAK E..BATAK V...
230 | 0xE1 0xAF 0xAD #Mn BATAK VOWEL SIGN KARO O
231 | 0xE1 0xAF 0xAF..0xB1 #Mn [3] BATAK VOWEL SIGN U FOR SIMALUNGUN ...
232 | 0xE1 0xB0 0xAC..0xB3 #Mn [8] LEPCHA VOWEL SIGN E..LEPCHA CONSON...
233 | 0xE1 0xB0 0xB6..0xB7 #Mn [2] LEPCHA SIGN RAN..LEPCHA SIGN NUKTA
234 | 0xE1 0xB3 0x90..0x92 #Mn [3] VEDIC TONE KARSHANA..VEDIC TONE PR...
235 | 0xE1 0xB3 0x94..0xA0 #Mn [13] VEDIC SIGN YAJURVEDIC MIDLINE SVAR...
236 | 0xE1 0xB3 0xA2..0xA8 #Mn [7] VEDIC SIGN VISARGA SVARITA..VEDIC ...
237 | 0xE1 0xB3 0xAD #Mn VEDIC SIGN TIRYAK
238 | 0xE1 0xB3 0xB4 #Mn VEDIC TONE CANDRA ABOVE
239 | 0xE1 0xB3 0xB8..0xB9 #Mn [2] VEDIC TONE RING ABOVE..VEDIC TONE ...
240 | 0xE1 0xB7 0x80..0xB5 #Mn [54] COMBINING DOTTED GRAVE ACCENT..COM...
241 | 0xE1 0xB7 0xBB..0xBF #Mn [5] COMBINING DELETION MARK..COMBINING...
242 | 0xE2 0x80 0x8C #Cf ZERO WIDTH NON-JOINER
243 | 0xE2 0x83 0x90..0x9C #Mn [13] COMBINING LEFT HARPOON ABOVE..COMB...
244 | 0xE2 0x83 0x9D..0xA0 #Me [4] COMBINING ENCLOSING CIRCLE..COMBIN...
245 | 0xE2 0x83 0xA1 #Mn COMBINING LEFT RIGHT ARROW ABOVE
246 | 0xE2 0x83 0xA2..0xA4 #Me [3] COMBINING ENCLOSING SCREEN..COMBIN...
247 | 0xE2 0x83 0xA5..0xB0 #Mn [12] COMBINING REVERSE SOLIDUS OVERLAY....
248 | 0xE2 0xB3 0xAF..0xB1 #Mn [3] COPTIC COMBINING NI ABOVE..COPTIC ...
249 | 0xE2 0xB5 0xBF #Mn TIFINAGH CONSONANT JOINER
250 | 0xE2 0xB7 0xA0..0xBF #Mn [32] COMBINING CYRILLIC LETTER BE..COMB...
251 | 0xE3 0x80 0xAA..0xAD #Mn [4] IDEOGRAPHIC LEVEL TONE MARK..IDEOG...
252 | 0xE3 0x80 0xAE..0xAF #Mc [2] HANGUL SINGLE DOT TONE MARK..HANGU...
253 | 0xE3 0x82 0x99..0x9A #Mn [2] COMBINING KATAKANA-HIRAGANA VOICED...
254 | 0xEA 0x99 0xAF #Mn COMBINING CYRILLIC VZMET
255 | 0xEA 0x99 0xB0..0xB2 #Me [3] COMBINING CYRILLIC TEN MILLIONS SI...
256 | 0xEA 0x99 0xB4..0xBD #Mn [10] COMBINING CYRILLIC LETTER UKRAINIA...
257 | 0xEA 0x9A 0x9E..0x9F #Mn [2] COMBINING CYRILLIC LETTER EF..COMB...
258 | 0xEA 0x9B 0xB0..0xB1 #Mn [2] BAMUM COMBINING MARK KOQNDON..BAMU...
259 | 0xEA 0xA0 0x82 #Mn SYLOTI NAGRI SIGN DVISVARA
260 | 0xEA 0xA0 0x86 #Mn SYLOTI NAGRI SIGN HASANTA
261 | 0xEA 0xA0 0x8B #Mn SYLOTI NAGRI SIGN ANUSVARA
262 | 0xEA 0xA0 0xA5..0xA6 #Mn [2] SYLOTI NAGRI VOWEL SIGN U..SYLOTI ...
263 | 0xEA 0xA3 0x84..0x85 #Mn [2] SAURASHTRA SIGN VIRAMA..SAURASHTRA...
264 | 0xEA 0xA3 0xA0..0xB1 #Mn [18] COMBINING DEVANAGARI DIGIT ZERO..C...
265 | 0xEA 0xA4 0xA6..0xAD #Mn [8] KAYAH LI VOWEL UE..KAYAH LI TONE C...
266 | 0xEA 0xA5 0x87..0x91 #Mn [11] REJANG VOWEL SIGN I..REJANG CONSON...
267 | 0xEA 0xA6 0x80..0x82 #Mn [3] JAVANESE SIGN PANYANGGA..JAVANESE ...
268 | 0xEA 0xA6 0xB3 #Mn JAVANESE SIGN CECAK TELU
269 | 0xEA 0xA6 0xB6..0xB9 #Mn [4] JAVANESE VOWEL SIGN WULU..JAVANESE...
270 | 0xEA 0xA6 0xBC #Mn JAVANESE VOWEL SIGN PEPET
271 | 0xEA 0xA7 0xA5 #Mn MYANMAR SIGN SHAN SAW
272 | 0xEA 0xA8 0xA9..0xAE #Mn [6] CHAM VOWEL SIGN AA..CHAM VOWEL SIG...
273 | 0xEA 0xA8 0xB1..0xB2 #Mn [2] CHAM VOWEL SIGN AU..CHAM VOWEL SIG...
274 | 0xEA 0xA8 0xB5..0xB6 #Mn [2] CHAM CONSONANT SIGN LA..CHAM CONSO...
275 | 0xEA 0xA9 0x83 #Mn CHAM CONSONANT SIGN FINAL NG
276 | 0xEA 0xA9 0x8C #Mn CHAM CONSONANT SIGN FINAL M
277 | 0xEA 0xA9 0xBC #Mn MYANMAR SIGN TAI LAING TONE-2
278 | 0xEA 0xAA 0xB0 #Mn TAI VIET MAI KANG
279 | 0xEA 0xAA 0xB2..0xB4 #Mn [3] TAI VIET VOWEL I..TAI VIET VOWEL U
280 | 0xEA 0xAA 0xB7..0xB8 #Mn [2] TAI VIET MAI KHIT..TAI VIET VOWEL IA
281 | 0xEA 0xAA 0xBE..0xBF #Mn [2] TAI VIET VOWEL AM..TAI VIET TONE M...
282 | 0xEA 0xAB 0x81 #Mn TAI VIET TONE MAI THO
283 | 0xEA 0xAB 0xAC..0xAD #Mn [2] MEETEI MAYEK VOWEL SIGN UU..MEETEI...
284 | 0xEA 0xAB 0xB6 #Mn MEETEI MAYEK VIRAMA
285 | 0xEA 0xAF 0xA5 #Mn MEETEI MAYEK VOWEL SIGN ANAP
286 | 0xEA 0xAF 0xA8 #Mn MEETEI MAYEK VOWEL SIGN UNAP
287 | 0xEA 0xAF 0xAD #Mn MEETEI MAYEK APUN IYEK
288 | 0xEF 0xAC 0x9E #Mn HEBREW POINT JUDEO-SPANISH VARIKA
289 | 0xEF 0xB8 0x80..0x8F #Mn [16] VARIATION SELECTOR-1..VARIATION SE...
290 | 0xEF 0xB8 0xA0..0xAF #Mn [16] COMBINING LIGATURE LEFT HALF..COMB...
291 | 0xEF 0xBE 0x9E..0x9F #Lm [2] HALFWIDTH KATAKANA VOICED SOUND MA...
292 | 0xF0 0x90 0x87 0xBD #Mn PHAISTOS DISC SIGN COMBINING OBLIQ...
293 | 0xF0 0x90 0x8B 0xA0 #Mn COPTIC EPACT THOUSANDS MARK
294 | 0xF0 0x90 0x8D 0xB6..0xBA #Mn [5] COMBINING OLD PERMIC LETTER AN....
295 | 0xF0 0x90 0xA8 0x81..0x83 #Mn [3] KHAROSHTHI VOWEL SIGN I..KHAROS...
296 | 0xF0 0x90 0xA8 0x85..0x86 #Mn [2] KHAROSHTHI VOWEL SIGN E..KHAROS...
297 | 0xF0 0x90 0xA8 0x8C..0x8F #Mn [4] KHAROSHTHI VOWEL LENGTH MARK..K...
298 | 0xF0 0x90 0xA8 0xB8..0xBA #Mn [3] KHAROSHTHI SIGN BAR ABOVE..KHAR...
299 | 0xF0 0x90 0xA8 0xBF #Mn KHAROSHTHI VIRAMA
300 | 0xF0 0x90 0xAB 0xA5..0xA6 #Mn [2] MANICHAEAN ABBREVIATION MARK AB...
301 | 0xF0 0x91 0x80 0x81 #Mn BRAHMI SIGN ANUSVARA
302 | 0xF0 0x91 0x80 0xB8..0xFF #Mn [15] BRAHMI VOWEL SIGN AA..BRAHMI VI...
303 | 0xF0 0x91 0x81 0x00..0x86 #
304 | 0xF0 0x91 0x81 0xBF..0xFF #Mn [3] BRAHMI NUMBER JOINER..KAITHI SI...
305 | 0xF0 0x91 0x82 0x00..0x81 #
306 | 0xF0 0x91 0x82 0xB3..0xB6 #Mn [4] KAITHI VOWEL SIGN U..KAITHI VOW...
307 | 0xF0 0x91 0x82 0xB9..0xBA #Mn [2] KAITHI SIGN VIRAMA..KAITHI SIGN...
308 | 0xF0 0x91 0x84 0x80..0x82 #Mn [3] CHAKMA SIGN CANDRABINDU..CHAKMA...
309 | 0xF0 0x91 0x84 0xA7..0xAB #Mn [5] CHAKMA VOWEL SIGN A..CHAKMA VOW...
310 | 0xF0 0x91 0x84 0xAD..0xB4 #Mn [8] CHAKMA VOWEL SIGN AI..CHAKMA MA...
311 | 0xF0 0x91 0x85 0xB3 #Mn MAHAJANI SIGN NUKTA
312 | 0xF0 0x91 0x86 0x80..0x81 #Mn [2] SHARADA SIGN CANDRABINDU..SHARA...
313 | 0xF0 0x91 0x86 0xB6..0xBE #Mn [9] SHARADA VOWEL SIGN U..SHARADA V...
314 | 0xF0 0x91 0x87 0x8A..0x8C #Mn [3] SHARADA SIGN NUKTA..SHARADA EXT...
315 | 0xF0 0x91 0x88 0xAF..0xB1 #Mn [3] KHOJKI VOWEL SIGN U..KHOJKI VOW...
316 | 0xF0 0x91 0x88 0xB4 #Mn KHOJKI SIGN ANUSVARA
317 | 0xF0 0x91 0x88 0xB6..0xB7 #Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN ...
318 | 0xF0 0x91 0x88 0xBE #Mn KHOJKI SIGN SUKUN
319 | 0xF0 0x91 0x8B 0x9F #Mn KHUDAWADI SIGN ANUSVARA
320 | 0xF0 0x91 0x8B 0xA3..0xAA #Mn [8] KHUDAWADI VOWEL SIGN U..KHUDAWA...
321 | 0xF0 0x91 0x8C 0x80..0x81 #Mn [2] GRANTHA SIGN COMBINING ANUSVARA...
322 | 0xF0 0x91 0x8C 0xBC #Mn GRANTHA SIGN NUKTA
323 | 0xF0 0x91 0x8C 0xBE #Mc GRANTHA VOWEL SIGN AA
324 | 0xF0 0x91 0x8D 0x80 #Mn GRANTHA VOWEL SIGN II
325 | 0xF0 0x91 0x8D 0x97 #Mc GRANTHA AU LENGTH MARK
326 | 0xF0 0x91 0x8D 0xA6..0xAC #Mn [7] COMBINING GRANTHA DIGIT ZERO..C...
327 | 0xF0 0x91 0x8D 0xB0..0xB4 #Mn [5] COMBINING GRANTHA LETTER A..COM...
328 | 0xF0 0x91 0x90 0xB8..0xBF #Mn [8] NEWA VOWEL SIGN U..NEWA VOWEL S...
329 | 0xF0 0x91 0x91 0x82..0x84 #Mn [3] NEWA SIGN VIRAMA..NEWA SIGN ANU...
330 | 0xF0 0x91 0x91 0x86 #Mn NEWA SIGN NUKTA
331 | 0xF0 0x91 0x92 0xB0 #Mc TIRHUTA VOWEL SIGN AA
332 | 0xF0 0x91 0x92 0xB3..0xB8 #Mn [6] TIRHUTA VOWEL SIGN U..TIRHUTA V...
333 | 0xF0 0x91 0x92 0xBA #Mn TIRHUTA VOWEL SIGN SHORT E
334 | 0xF0 0x91 0x92 0xBD #Mc TIRHUTA VOWEL SIGN SHORT O
335 | 0xF0 0x91 0x92 0xBF..0xFF #Mn [2] TIRHUTA SIGN CANDRABINDU..TIRHU...
336 | 0xF0 0x91 0x93 0x00..0x80 #
337 | 0xF0 0x91 0x93 0x82..0x83 #Mn [2] TIRHUTA SIGN VIRAMA..TIRHUTA SI...
338 | 0xF0 0x91 0x96 0xAF #Mc SIDDHAM VOWEL SIGN AA
339 | 0xF0 0x91 0x96 0xB2..0xB5 #Mn [4] SIDDHAM VOWEL SIGN U..SIDDHAM V...
340 | 0xF0 0x91 0x96 0xBC..0xBD #Mn [2] SIDDHAM SIGN CANDRABINDU..SIDDH...
341 | 0xF0 0x91 0x96 0xBF..0xFF #Mn [2] SIDDHAM SIGN VIRAMA..SIDDHAM SI...
342 | 0xF0 0x91 0x97 0x00..0x80 #
343 | 0xF0 0x91 0x97 0x9C..0x9D #Mn [2] SIDDHAM VOWEL SIGN ALTERNATE U....
344 | 0xF0 0x91 0x98 0xB3..0xBA #Mn [8] MODI VOWEL SIGN U..MODI VOWEL S...
345 | 0xF0 0x91 0x98 0xBD #Mn MODI SIGN ANUSVARA
346 | 0xF0 0x91 0x98 0xBF..0xFF #Mn [2] MODI SIGN VIRAMA..MODI SIGN ARD...
347 | 0xF0 0x91 0x99 0x00..0x80 #
348 | 0xF0 0x91 0x9A 0xAB #Mn TAKRI SIGN ANUSVARA
349 | 0xF0 0x91 0x9A 0xAD #Mn TAKRI VOWEL SIGN AA
350 | 0xF0 0x91 0x9A 0xB0..0xB5 #Mn [6] TAKRI VOWEL SIGN U..TAKRI VOWEL...
351 | 0xF0 0x91 0x9A 0xB7 #Mn TAKRI SIGN NUKTA
352 | 0xF0 0x91 0x9C 0x9D..0x9F #Mn [3] AHOM CONSONANT SIGN MEDIAL LA.....
353 | 0xF0 0x91 0x9C 0xA2..0xA5 #Mn [4] AHOM VOWEL SIGN I..AHOM VOWEL S...
354 | 0xF0 0x91 0x9C 0xA7..0xAB #Mn [5] AHOM VOWEL SIGN AW..AHOM SIGN K...
355 | 0xF0 0x91 0xB0 0xB0..0xB6 #Mn [7] BHAIKSUKI VOWEL SIGN I..BHAIKSU...
356 | 0xF0 0x91 0xB0 0xB8..0xBD #Mn [6] BHAIKSUKI VOWEL SIGN E..BHAIKSU...
357 | 0xF0 0x91 0xB0 0xBF #Mn BHAIKSUKI SIGN VIRAMA
358 | 0xF0 0x91 0xB2 0x92..0xA7 #Mn [22] MARCHEN SUBJOINED LETTER KA..MA...
359 | 0xF0 0x91 0xB2 0xAA..0xB0 #Mn [7] MARCHEN SUBJOINED LETTER RA..MA...
360 | 0xF0 0x91 0xB2 0xB2..0xB3 #Mn [2] MARCHEN VOWEL SIGN U..MARCHEN V...
361 | 0xF0 0x91 0xB2 0xB5..0xB6 #Mn [2] MARCHEN SIGN ANUSVARA..MARCHEN ...
362 | 0xF0 0x96 0xAB 0xB0..0xB4 #Mn [5] BASSA VAH COMBINING HIGH TONE.....
363 | 0xF0 0x96 0xAC 0xB0..0xB6 #Mn [7] PAHAWH HMONG MARK CIM TUB..PAHA...
364 | 0xF0 0x96 0xBE 0x8F..0x92 #Mn [4] MIAO TONE RIGHT..MIAO TONE BELOW
365 | 0xF0 0x9B 0xB2 0x9D..0x9E #Mn [2] DUPLOYAN THICK LETTER SELECTOR....
366 | 0xF0 0x9D 0x85 0xA5 #Mc MUSICAL SYMBOL COMBINING STEM
367 | 0xF0 0x9D 0x85 0xA7..0xA9 #Mn [3] MUSICAL SYMBOL COMBINING TREMOL...
368 | 0xF0 0x9D 0x85 0xAE..0xB2 #Mc [5] MUSICAL SYMBOL COMBINING FLAG-1...
369 | 0xF0 0x9D 0x85 0xBB..0xFF #Mn [8] MUSICAL SYMBOL COMBINING ACCENT...
370 | 0xF0 0x9D 0x86 0x00..0x82 #
371 | 0xF0 0x9D 0x86 0x85..0x8B #Mn [7] MUSICAL SYMBOL COMBINING DOIT.....
372 | 0xF0 0x9D 0x86 0xAA..0xAD #Mn [4] MUSICAL SYMBOL COMBINING DOWN B...
373 | 0xF0 0x9D 0x89 0x82..0x84 #Mn [3] COMBINING GREEK MUSICAL TRISEME...
374 | 0xF0 0x9D 0xA8 0x80..0xB6 #Mn [55] SIGNWRITING HEAD RIM..SIGNWRITI...
375 | 0xF0 0x9D 0xA8 0xBB..0xFF #Mn [50] SIGNWRITING MOUTH CLOSED NEUTRA...
376 | 0xF0 0x9D 0xA9 0x00..0xAC #
377 | 0xF0 0x9D 0xA9 0xB5 #Mn SIGNWRITING UPPER BODY TILTING FRO...
378 | 0xF0 0x9D 0xAA 0x84 #Mn SIGNWRITING LOCATION HEAD NECK
379 | 0xF0 0x9D 0xAA 0x9B..0x9F #Mn [5] SIGNWRITING FILL MODIFIER-2..SI...
380 | 0xF0 0x9D 0xAA 0xA1..0xAF #Mn [15] SIGNWRITING ROTATION MODIFIER-2...
381 | 0xF0 0x9E 0x80 0x80..0x86 #Mn [7] COMBINING GLAGOLITIC LETTER AZU...
382 | 0xF0 0x9E 0x80 0x88..0x98 #Mn [17] COMBINING GLAGOLITIC LETTER ZEM...
383 | 0xF0 0x9E 0x80 0x9B..0xA1 #Mn [7] COMBINING GLAGOLITIC LETTER SHT...
384 | 0xF0 0x9E 0x80 0xA3..0xA4 #Mn [2] COMBINING GLAGOLITIC LETTER YU....
385 | 0xF0 0x9E 0x80 0xA6..0xAA #Mn [5] COMBINING GLAGOLITIC LETTER YO....
386 | 0xF0 0x9E 0xA3 0x90..0x96 #Mn [7] MENDE KIKAKUI COMBINING NUMBER ...
387 | 0xF0 0x9E 0xA5 0x84..0x8A #Mn [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA
388 | 0xF3 0xA0 0x80 0xA0..0xFF #Cf [96] TAG SPACE..CANCEL TAG
389 | 0xF3 0xA0 0x81 0x00..0xBF #
390 | 0xF3 0xA0 0x84 0x80..0xFF #Mn [240] VARIATION SELECTOR-17..VA...
391 | 0xF3 0xA0 0x85..0x86 0x00..0xFF #
392 | 0xF3 0xA0 0x87 0x00..0xAF #
393 ;
394
395 Regional_Indicator =
396 0xF0 0x9F 0x87 0xA6..0xBF #So [26] REGIONAL INDICATOR SYMBOL LETTE...
397 ;
398
399 SpacingMark =
400 0xE0 0xA4 0x83 #Mc DEVANAGARI SIGN VISARGA
401 | 0xE0 0xA4 0xBB #Mc DEVANAGARI VOWEL SIGN OOE
402 | 0xE0 0xA4 0xBE..0xFF #Mc [3] DEVANAGARI VOWEL SIGN AA..DEVANAGA...
403 | 0xE0 0xA5 0x00..0x80 #
404 | 0xE0 0xA5 0x89..0x8C #Mc [4] DEVANAGARI VOWEL SIGN CANDRA O..DE...
405 | 0xE0 0xA5 0x8E..0x8F #Mc [2] DEVANAGARI VOWEL SIGN PRISHTHAMATR...
406 | 0xE0 0xA6 0x82..0x83 #Mc [2] BENGALI SIGN ANUSVARA..BENGALI SIG...
407 | 0xE0 0xA6 0xBF..0xFF #Mc [2] BENGALI VOWEL SIGN I..BENGALI VOWE...
408 | 0xE0 0xA7 0x00..0x80 #
409 | 0xE0 0xA7 0x87..0x88 #Mc [2] BENGALI VOWEL SIGN E..BENGALI VOWE...
410 | 0xE0 0xA7 0x8B..0x8C #Mc [2] BENGALI VOWEL SIGN O..BENGALI VOWE...
411 | 0xE0 0xA8 0x83 #Mc GURMUKHI SIGN VISARGA
412 | 0xE0 0xA8 0xBE..0xFF #Mc [3] GURMUKHI VOWEL SIGN AA..GURMUKHI V...
413 | 0xE0 0xA9 0x00..0x80 #
414 | 0xE0 0xAA 0x83 #Mc GUJARATI SIGN VISARGA
415 | 0xE0 0xAA 0xBE..0xFF #Mc [3] GUJARATI VOWEL SIGN AA..GUJARATI V...
416 | 0xE0 0xAB 0x00..0x80 #
417 | 0xE0 0xAB 0x89 #Mc GUJARATI VOWEL SIGN CANDRA O
418 | 0xE0 0xAB 0x8B..0x8C #Mc [2] GUJARATI VOWEL SIGN O..GUJARATI VO...
419 | 0xE0 0xAC 0x82..0x83 #Mc [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VI...
420 | 0xE0 0xAD 0x80 #Mc ORIYA VOWEL SIGN II
421 | 0xE0 0xAD 0x87..0x88 #Mc [2] ORIYA VOWEL SIGN E..ORIYA VOWEL SI...
422 | 0xE0 0xAD 0x8B..0x8C #Mc [2] ORIYA VOWEL SIGN O..ORIYA VOWEL SI...
423 | 0xE0 0xAE 0xBF #Mc TAMIL VOWEL SIGN I
424 | 0xE0 0xAF 0x81..0x82 #Mc [2] TAMIL VOWEL SIGN U..TAMIL VOWEL SI...
425 | 0xE0 0xAF 0x86..0x88 #Mc [3] TAMIL VOWEL SIGN E..TAMIL VOWEL SI...
426 | 0xE0 0xAF 0x8A..0x8C #Mc [3] TAMIL VOWEL SIGN O..TAMIL VOWEL SI...
427 | 0xE0 0xB0 0x81..0x83 #Mc [3] TELUGU SIGN CANDRABINDU..TELUGU SI...
428 | 0xE0 0xB1 0x81..0x84 #Mc [4] TELUGU VOWEL SIGN U..TELUGU VOWEL ...
429 | 0xE0 0xB2 0x82..0x83 #Mc [2] KANNADA SIGN ANUSVARA..KANNADA SIG...
430 | 0xE0 0xB2 0xBE #Mc KANNADA VOWEL SIGN AA
431 | 0xE0 0xB3 0x80..0x81 #Mc [2] KANNADA VOWEL SIGN II..KANNADA VOW...
432 | 0xE0 0xB3 0x83..0x84 #Mc [2] KANNADA VOWEL SIGN VOCALIC R..KANN...
433 | 0xE0 0xB3 0x87..0x88 #Mc [2] KANNADA VOWEL SIGN EE..KANNADA VOW...
434 | 0xE0 0xB3 0x8A..0x8B #Mc [2] KANNADA VOWEL SIGN O..KANNADA VOWE...
435 | 0xE0 0xB4 0x82..0x83 #Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM...
436 | 0xE0 0xB4 0xBF..0xFF #Mc [2] MALAYALAM VOWEL SIGN I..MALAYALAM ...
437 | 0xE0 0xB5 0x00..0x80 #
438 | 0xE0 0xB5 0x86..0x88 #Mc [3] MALAYALAM VOWEL SIGN E..MALAYALAM ...
439 | 0xE0 0xB5 0x8A..0x8C #Mc [3] MALAYALAM VOWEL SIGN O..MALAYALAM ...
440 | 0xE0 0xB6 0x82..0x83 #Mc [2] SINHALA SIGN ANUSVARAYA..SINHALA S...
441 | 0xE0 0xB7 0x90..0x91 #Mc [2] SINHALA VOWEL SIGN KETTI AEDA-PILL...
442 | 0xE0 0xB7 0x98..0x9E #Mc [7] SINHALA VOWEL SIGN GAETTA-PILLA..S...
443 | 0xE0 0xB7 0xB2..0xB3 #Mc [2] SINHALA VOWEL SIGN DIGA GAETTA-PIL...
444 | 0xE0 0xB8 0xB3 #Lo THAI CHARACTER SARA AM
445 | 0xE0 0xBA 0xB3 #Lo LAO VOWEL SIGN AM
446 | 0xE0 0xBC 0xBE..0xBF #Mc [2] TIBETAN SIGN YAR TSHES..TIBETAN SI...
447 | 0xE0 0xBD 0xBF #Mc TIBETAN SIGN RNAM BCAD
448 | 0xE1 0x80 0xB1 #Mc MYANMAR VOWEL SIGN E
449 | 0xE1 0x80 0xBB..0xBC #Mc [2] MYANMAR CONSONANT SIGN MEDIAL YA.....
450 | 0xE1 0x81 0x96..0x97 #Mc [2] MYANMAR VOWEL SIGN VOCALIC R..MYAN...
451 | 0xE1 0x82 0x84 #Mc MYANMAR VOWEL SIGN SHAN E
452 | 0xE1 0x9E 0xB6 #Mc KHMER VOWEL SIGN AA
453 | 0xE1 0x9E 0xBE..0xFF #Mc [8] KHMER VOWEL SIGN OE..KHMER VOWEL S...
454 | 0xE1 0x9F 0x00..0x85 #
455 | 0xE1 0x9F 0x87..0x88 #Mc [2] KHMER SIGN REAHMUK..KHMER SIGN YUU...
456 | 0xE1 0xA4 0xA3..0xA6 #Mc [4] LIMBU VOWEL SIGN EE..LIMBU VOWEL S...
457 | 0xE1 0xA4 0xA9..0xAB #Mc [3] LIMBU SUBJOINED LETTER YA..LIMBU S...
458 | 0xE1 0xA4 0xB0..0xB1 #Mc [2] LIMBU SMALL LETTER KA..LIMBU SMALL...
459 | 0xE1 0xA4 0xB3..0xB8 #Mc [6] LIMBU SMALL LETTER TA..LIMBU SMALL...
460 | 0xE1 0xA8 0x99..0x9A #Mc [2] BUGINESE VOWEL SIGN E..BUGINESE VO...
461 | 0xE1 0xA9 0x95 #Mc TAI THAM CONSONANT SIGN MEDIAL RA
462 | 0xE1 0xA9 0x97 #Mc TAI THAM CONSONANT SIGN LA TANG LAI
463 | 0xE1 0xA9 0xAD..0xB2 #Mc [6] TAI THAM VOWEL SIGN OY..TAI THAM V...
464 | 0xE1 0xAC 0x84 #Mc BALINESE SIGN BISAH
465 | 0xE1 0xAC 0xB5 #Mc BALINESE VOWEL SIGN TEDUNG
466 | 0xE1 0xAC 0xBB #Mc BALINESE VOWEL SIGN RA REPA TEDUNG
467 | 0xE1 0xAC 0xBD..0xFF #Mc [5] BALINESE VOWEL SIGN LA LENGA TEDUN...
468 | 0xE1 0xAD 0x00..0x81 #
469 | 0xE1 0xAD 0x83..0x84 #Mc [2] BALINESE VOWEL SIGN PEPET TEDUNG.....
470 | 0xE1 0xAE 0x82 #Mc SUNDANESE SIGN PANGWISAD
471 | 0xE1 0xAE 0xA1 #Mc SUNDANESE CONSONANT SIGN PAMINGKAL
472 | 0xE1 0xAE 0xA6..0xA7 #Mc [2] SUNDANESE VOWEL SIGN PANAELAENG..S...
473 | 0xE1 0xAE 0xAA #Mc SUNDANESE SIGN PAMAAEH
474 | 0xE1 0xAF 0xA7 #Mc BATAK VOWEL SIGN E
475 | 0xE1 0xAF 0xAA..0xAC #Mc [3] BATAK VOWEL SIGN I..BATAK VOWEL SI...
476 | 0xE1 0xAF 0xAE #Mc BATAK VOWEL SIGN U
477 | 0xE1 0xAF 0xB2..0xB3 #Mc [2] BATAK PANGOLAT..BATAK PANONGONAN
478 | 0xE1 0xB0 0xA4..0xAB #Mc [8] LEPCHA SUBJOINED LETTER YA..LEPCHA...
479 | 0xE1 0xB0 0xB4..0xB5 #Mc [2] LEPCHA CONSONANT SIGN NYIN-DO..LEP...
480 | 0xE1 0xB3 0xA1 #Mc VEDIC TONE ATHARVAVEDIC INDEPENDEN...
481 | 0xE1 0xB3 0xB2..0xB3 #Mc [2] VEDIC SIGN ARDHAVISARGA..VEDIC SIG...
482 | 0xEA 0xA0 0xA3..0xA4 #Mc [2] SYLOTI NAGRI VOWEL SIGN A..SYLOTI ...
483 | 0xEA 0xA0 0xA7 #Mc SYLOTI NAGRI VOWEL SIGN OO
484 | 0xEA 0xA2 0x80..0x81 #Mc [2] SAURASHTRA SIGN ANUSVARA..SAURASHT...
485 | 0xEA 0xA2 0xB4..0xFF #Mc [16] SAURASHTRA CONSONANT SIGN HAARU..S...
486 | 0xEA 0xA3 0x00..0x83 #
487 | 0xEA 0xA5 0x92..0x93 #Mc [2] REJANG CONSONANT SIGN H..REJANG VI...
488 | 0xEA 0xA6 0x83 #Mc JAVANESE SIGN WIGNYAN
489 | 0xEA 0xA6 0xB4..0xB5 #Mc [2] JAVANESE VOWEL SIGN TARUNG..JAVANE...
490 | 0xEA 0xA6 0xBA..0xBB #Mc [2] JAVANESE VOWEL SIGN TALING..JAVANE...
491 | 0xEA 0xA6 0xBD..0xFF #Mc [4] JAVANESE CONSONANT SIGN KERET..JAV...
492 | 0xEA 0xA7 0x00..0x80 #
493 | 0xEA 0xA8 0xAF..0xB0 #Mc [2] CHAM VOWEL SIGN O..CHAM VOWEL SIGN AI
494 | 0xEA 0xA8 0xB3..0xB4 #Mc [2] CHAM CONSONANT SIGN YA..CHAM CONSO...
495 | 0xEA 0xA9 0x8D #Mc CHAM CONSONANT SIGN FINAL H
496 | 0xEA 0xAB 0xAB #Mc MEETEI MAYEK VOWEL SIGN II
497 | 0xEA 0xAB 0xAE..0xAF #Mc [2] MEETEI MAYEK VOWEL SIGN AU..MEETEI...
498 | 0xEA 0xAB 0xB5 #Mc MEETEI MAYEK VOWEL SIGN VISARGA
499 | 0xEA 0xAF 0xA3..0xA4 #Mc [2] MEETEI MAYEK VOWEL SIGN ONAP..MEET...
500 | 0xEA 0xAF 0xA6..0xA7 #Mc [2] MEETEI MAYEK VOWEL SIGN YENAP..MEE...
501 | 0xEA 0xAF 0xA9..0xAA #Mc [2] MEETEI MAYEK VOWEL SIGN CHEINAP..M...
502 | 0xEA 0xAF 0xAC #Mc MEETEI MAYEK LUM IYEK
503 | 0xF0 0x91 0x80 0x80 #Mc BRAHMI SIGN CANDRABINDU
504 | 0xF0 0x91 0x80 0x82 #Mc BRAHMI SIGN VISARGA
505 | 0xF0 0x91 0x82 0x82 #Mc KAITHI SIGN VISARGA
506 | 0xF0 0x91 0x82 0xB0..0xB2 #Mc [3] KAITHI VOWEL SIGN AA..KAITHI VO...
507 | 0xF0 0x91 0x82 0xB7..0xB8 #Mc [2] KAITHI VOWEL SIGN O..KAITHI VOW...
508 | 0xF0 0x91 0x84 0xAC #Mc CHAKMA VOWEL SIGN E
509 | 0xF0 0x91 0x86 0x82 #Mc SHARADA SIGN VISARGA
510 | 0xF0 0x91 0x86 0xB3..0xB5 #Mc [3] SHARADA VOWEL SIGN AA..SHARADA ...
511 | 0xF0 0x91 0x86 0xBF..0xFF #Mc [2] SHARADA VOWEL SIGN AU..SHARADA ...
512 | 0xF0 0x91 0x87 0x00..0x80 #
513 | 0xF0 0x91 0x88 0xAC..0xAE #Mc [3] KHOJKI VOWEL SIGN AA..KHOJKI VO...
514 | 0xF0 0x91 0x88 0xB2..0xB3 #Mc [2] KHOJKI VOWEL SIGN O..KHOJKI VOW...
515 | 0xF0 0x91 0x88 0xB5 #Mc KHOJKI SIGN VIRAMA
516 | 0xF0 0x91 0x8B 0xA0..0xA2 #Mc [3] KHUDAWADI VOWEL SIGN AA..KHUDAW...
517 | 0xF0 0x91 0x8C 0x82..0x83 #Mc [2] GRANTHA SIGN ANUSVARA..GRANTHA ...
518 | 0xF0 0x91 0x8C 0xBF #Mc GRANTHA VOWEL SIGN I
519 | 0xF0 0x91 0x8D 0x81..0x84 #Mc [4] GRANTHA VOWEL SIGN U..GRANTHA V...
520 | 0xF0 0x91 0x8D 0x87..0x88 #Mc [2] GRANTHA VOWEL SIGN EE..GRANTHA ...
521 | 0xF0 0x91 0x8D 0x8B..0x8D #Mc [3] GRANTHA VOWEL SIGN OO..GRANTHA ...
522 | 0xF0 0x91 0x8D 0xA2..0xA3 #Mc [2] GRANTHA VOWEL SIGN VOCALIC L..G...
523 | 0xF0 0x91 0x90 0xB5..0xB7 #Mc [3] NEWA VOWEL SIGN AA..NEWA VOWEL ...
524 | 0xF0 0x91 0x91 0x80..0x81 #Mc [2] NEWA VOWEL SIGN O..NEWA VOWEL S...
525 | 0xF0 0x91 0x91 0x85 #Mc NEWA SIGN VISARGA
526 | 0xF0 0x91 0x92 0xB1..0xB2 #Mc [2] TIRHUTA VOWEL SIGN I..TIRHUTA V...
527 | 0xF0 0x91 0x92 0xB9 #Mc TIRHUTA VOWEL SIGN E
528 | 0xF0 0x91 0x92 0xBB..0xBC #Mc [2] TIRHUTA VOWEL SIGN AI..TIRHUTA ...
529 | 0xF0 0x91 0x92 0xBE #Mc TIRHUTA VOWEL SIGN AU
530 | 0xF0 0x91 0x93 0x81 #Mc TIRHUTA SIGN VISARGA
531 | 0xF0 0x91 0x96 0xB0..0xB1 #Mc [2] SIDDHAM VOWEL SIGN I..SIDDHAM V...
532 | 0xF0 0x91 0x96 0xB8..0xBB #Mc [4] SIDDHAM VOWEL SIGN E..SIDDHAM V...
533 | 0xF0 0x91 0x96 0xBE #Mc SIDDHAM SIGN VISARGA
534 | 0xF0 0x91 0x98 0xB0..0xB2 #Mc [3] MODI VOWEL SIGN AA..MODI VOWEL ...
535 | 0xF0 0x91 0x98 0xBB..0xBC #Mc [2] MODI VOWEL SIGN O..MODI VOWEL S...
536 | 0xF0 0x91 0x98 0xBE #Mc MODI SIGN VISARGA
537 | 0xF0 0x91 0x9A 0xAC #Mc TAKRI SIGN VISARGA
538 | 0xF0 0x91 0x9A 0xAE..0xAF #Mc [2] TAKRI VOWEL SIGN I..TAKRI VOWEL...
539 | 0xF0 0x91 0x9A 0xB6 #Mc TAKRI SIGN VIRAMA
540 | 0xF0 0x91 0x9C 0xA0..0xA1 #Mc [2] AHOM VOWEL SIGN A..AHOM VOWEL S...
541 | 0xF0 0x91 0x9C 0xA6 #Mc AHOM VOWEL SIGN E
542 | 0xF0 0x91 0xB0 0xAF #Mc BHAIKSUKI VOWEL SIGN AA
543 | 0xF0 0x91 0xB0 0xBE #Mc BHAIKSUKI SIGN VISARGA
544 | 0xF0 0x91 0xB2 0xA9 #Mc MARCHEN SUBJOINED LETTER YA
545 | 0xF0 0x91 0xB2 0xB1 #Mc MARCHEN VOWEL SIGN I
546 | 0xF0 0x91 0xB2 0xB4 #Mc MARCHEN VOWEL SIGN O
547 | 0xF0 0x96 0xBD 0x91..0xBE #Mc [46] MIAO SIGN ASPIRATION..MIAO VOWE...
548 | 0xF0 0x9D 0x85 0xA6 #Mc MUSICAL SYMBOL COMBINING SPRECHGES...
549 | 0xF0 0x9D 0x85 0xAD #Mc MUSICAL SYMBOL COMBINING AUGMENTAT...
550 ;
551
552 L =
553 0xE1 0x84 0x80..0xFF #Lo [96] HANGUL CHOSEONG KIYEOK..HANGUL CHO...
554 | 0xE1 0x85 0x00..0x9F #
555 | 0xEA 0xA5 0xA0..0xBC #Lo [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANG...
556 ;
557
558 V =
559 0xE1 0x85 0xA0..0xFF #Lo [72] HANGUL JUNGSEONG FILLER..HANGUL JU...
560 | 0xE1 0x86 0x00..0xA7 #
561 | 0xED 0x9E 0xB0..0xFF #Lo [23] HANGUL JUNGSEONG O-YEO..HANGUL JUN...
562 | 0xED 0x9F 0x00..0x86 #
563 ;
564
565 T =
566 0xE1 0x86 0xA8..0xFF #Lo [88] HANGUL JONGSEONG KIYEOK..HANGUL JO...
567 | 0xE1 0x87 0x00..0xBF #
568 | 0xED 0x9F 0x8B..0xBB #Lo [49] HANGUL JONGSEONG NIEUN-RIEUL..HANG...
569 ;
570
571 LV =
572 0xEA 0xB0 0x80 #Lo HANGUL SYLLABLE GA
573 | 0xEA 0xB0 0x9C #Lo HANGUL SYLLABLE GAE
574 | 0xEA 0xB0 0xB8 #Lo HANGUL SYLLABLE GYA
575 | 0xEA 0xB1 0x94 #Lo HANGUL SYLLABLE GYAE
576 | 0xEA 0xB1 0xB0 #Lo HANGUL SYLLABLE GEO
577 | 0xEA 0xB2 0x8C #Lo HANGUL SYLLABLE GE
578 | 0xEA 0xB2 0xA8 #Lo HANGUL SYLLABLE GYEO
579 | 0xEA 0xB3 0x84 #Lo HANGUL SYLLABLE GYE
580 | 0xEA 0xB3 0xA0 #Lo HANGUL SYLLABLE GO
581 | 0xEA 0xB3 0xBC #Lo HANGUL SYLLABLE GWA
582 | 0xEA 0xB4 0x98 #Lo HANGUL SYLLABLE GWAE
583 | 0xEA 0xB4 0xB4 #Lo HANGUL SYLLABLE GOE
584 | 0xEA 0xB5 0x90 #Lo HANGUL SYLLABLE GYO
585 | 0xEA 0xB5 0xAC #Lo HANGUL SYLLABLE GU
586 | 0xEA 0xB6 0x88 #Lo HANGUL SYLLABLE GWEO
587 | 0xEA 0xB6 0xA4 #Lo HANGUL SYLLABLE GWE
588 | 0xEA 0xB7 0x80 #Lo HANGUL SYLLABLE GWI
589 | 0xEA 0xB7 0x9C #Lo HANGUL SYLLABLE GYU
590 | 0xEA 0xB7 0xB8 #Lo HANGUL SYLLABLE GEU
591 | 0xEA 0xB8 0x94 #Lo HANGUL SYLLABLE GYI
592 | 0xEA 0xB8 0xB0 #Lo HANGUL SYLLABLE GI
593 | 0xEA 0xB9 0x8C #Lo HANGUL SYLLABLE GGA
594 | 0xEA 0xB9 0xA8 #Lo HANGUL SYLLABLE GGAE
595 | 0xEA 0xBA 0x84 #Lo HANGUL SYLLABLE GGYA
596 | 0xEA 0xBA 0xA0 #Lo HANGUL SYLLABLE GGYAE
597 | 0xEA 0xBA 0xBC #Lo HANGUL SYLLABLE GGEO
598 | 0xEA 0xBB 0x98 #Lo HANGUL SYLLABLE GGE
599 | 0xEA 0xBB 0xB4 #Lo HANGUL SYLLABLE GGYEO
600 | 0xEA 0xBC 0x90 #Lo HANGUL SYLLABLE GGYE
601 | 0xEA 0xBC 0xAC #Lo HANGUL SYLLABLE GGO
602 | 0xEA 0xBD 0x88 #Lo HANGUL SYLLABLE GGWA
603 | 0xEA 0xBD 0xA4 #Lo HANGUL SYLLABLE GGWAE
604 | 0xEA 0xBE 0x80 #Lo HANGUL SYLLABLE GGOE
605 | 0xEA 0xBE 0x9C #Lo HANGUL SYLLABLE GGYO
606 | 0xEA 0xBE 0xB8 #Lo HANGUL SYLLABLE GGU
607 | 0xEA 0xBF 0x94 #Lo HANGUL SYLLABLE GGWEO
608 | 0xEA 0xBF 0xB0 #Lo HANGUL SYLLABLE GGWE
609 | 0xEB 0x80 0x8C #Lo HANGUL SYLLABLE GGWI
610 | 0xEB 0x80 0xA8 #Lo HANGUL SYLLABLE GGYU
611 | 0xEB 0x81 0x84 #Lo HANGUL SYLLABLE GGEU
612 | 0xEB 0x81 0xA0 #Lo HANGUL SYLLABLE GGYI
613 | 0xEB 0x81 0xBC #Lo HANGUL SYLLABLE GGI
614 | 0xEB 0x82 0x98 #Lo HANGUL SYLLABLE NA
615 | 0xEB 0x82 0xB4 #Lo HANGUL SYLLABLE NAE
616 | 0xEB 0x83 0x90 #Lo HANGUL SYLLABLE NYA
617 | 0xEB 0x83 0xAC #Lo HANGUL SYLLABLE NYAE
618 | 0xEB 0x84 0x88 #Lo HANGUL SYLLABLE NEO
619 | 0xEB 0x84 0xA4 #Lo HANGUL SYLLABLE NE
620 | 0xEB 0x85 0x80 #Lo HANGUL SYLLABLE NYEO
621 | 0xEB 0x85 0x9C #Lo HANGUL SYLLABLE NYE
622 | 0xEB 0x85 0xB8 #Lo HANGUL SYLLABLE NO
623 | 0xEB 0x86 0x94 #Lo HANGUL SYLLABLE NWA
624 | 0xEB 0x86 0xB0 #Lo HANGUL SYLLABLE NWAE
625 | 0xEB 0x87 0x8C #Lo HANGUL SYLLABLE NOE
626 | 0xEB 0x87 0xA8 #Lo HANGUL SYLLABLE NYO
627 | 0xEB 0x88 0x84 #Lo HANGUL SYLLABLE NU
628 | 0xEB 0x88 0xA0 #Lo HANGUL SYLLABLE NWEO
629 | 0xEB 0x88 0xBC #Lo HANGUL SYLLABLE NWE
630 | 0xEB 0x89 0x98 #Lo HANGUL SYLLABLE NWI
631 | 0xEB 0x89 0xB4 #Lo HANGUL SYLLABLE NYU
632 | 0xEB 0x8A 0x90 #Lo HANGUL SYLLABLE NEU
633 | 0xEB 0x8A 0xAC #Lo HANGUL SYLLABLE NYI
634 | 0xEB 0x8B 0x88 #Lo HANGUL SYLLABLE NI
635 | 0xEB 0x8B 0xA4 #Lo HANGUL SYLLABLE DA
636 | 0xEB 0x8C 0x80 #Lo HANGUL SYLLABLE DAE
637 | 0xEB 0x8C 0x9C #Lo HANGUL SYLLABLE DYA
638 | 0xEB 0x8C 0xB8 #Lo HANGUL SYLLABLE DYAE
639 | 0xEB 0x8D 0x94 #Lo HANGUL SYLLABLE DEO
640 | 0xEB 0x8D 0xB0 #Lo HANGUL SYLLABLE DE
641 | 0xEB 0x8E 0x8C #Lo HANGUL SYLLABLE DYEO
642 | 0xEB 0x8E 0xA8 #Lo HANGUL SYLLABLE DYE
643 | 0xEB 0x8F 0x84 #Lo HANGUL SYLLABLE DO
644 | 0xEB 0x8F 0xA0 #Lo HANGUL SYLLABLE DWA
645 | 0xEB 0x8F 0xBC #Lo HANGUL SYLLABLE DWAE
646 | 0xEB 0x90 0x98 #Lo HANGUL SYLLABLE DOE
647 | 0xEB 0x90 0xB4 #Lo HANGUL SYLLABLE DYO
648 | 0xEB 0x91 0x90 #Lo HANGUL SYLLABLE DU
649 | 0xEB 0x91 0xAC #Lo HANGUL SYLLABLE DWEO
650 | 0xEB 0x92 0x88 #Lo HANGUL SYLLABLE DWE
651 | 0xEB 0x92 0xA4 #Lo HANGUL SYLLABLE DWI
652 | 0xEB 0x93 0x80 #Lo HANGUL SYLLABLE DYU
653 | 0xEB 0x93 0x9C #Lo HANGUL SYLLABLE DEU
654 | 0xEB 0x93 0xB8 #Lo HANGUL SYLLABLE DYI
655 | 0xEB 0x94 0x94 #Lo HANGUL SYLLABLE DI
656 | 0xEB 0x94 0xB0 #Lo HANGUL SYLLABLE DDA
657 | 0xEB 0x95 0x8C #Lo HANGUL SYLLABLE DDAE
658 | 0xEB 0x95 0xA8 #Lo HANGUL SYLLABLE DDYA
659 | 0xEB 0x96 0x84 #Lo HANGUL SYLLABLE DDYAE
660 | 0xEB 0x96 0xA0 #Lo HANGUL SYLLABLE DDEO
661 | 0xEB 0x96 0xBC #Lo HANGUL SYLLABLE DDE
662 | 0xEB 0x97 0x98 #Lo HANGUL SYLLABLE DDYEO
663 | 0xEB 0x97 0xB4 #Lo HANGUL SYLLABLE DDYE
664 | 0xEB 0x98 0x90 #Lo HANGUL SYLLABLE DDO
665 | 0xEB 0x98 0xAC #Lo HANGUL SYLLABLE DDWA
666 | 0xEB 0x99 0x88 #Lo HANGUL SYLLABLE DDWAE
667 | 0xEB 0x99 0xA4 #Lo HANGUL SYLLABLE DDOE
668 | 0xEB 0x9A 0x80 #Lo HANGUL SYLLABLE DDYO
669 | 0xEB 0x9A 0x9C #Lo HANGUL SYLLABLE DDU
670 | 0xEB 0x9A 0xB8 #Lo HANGUL SYLLABLE DDWEO
671 | 0xEB 0x9B 0x94 #Lo HANGUL SYLLABLE DDWE
672 | 0xEB 0x9B 0xB0 #Lo HANGUL SYLLABLE DDWI
673 | 0xEB 0x9C 0x8C #Lo HANGUL SYLLABLE DDYU
674 | 0xEB 0x9C 0xA8 #Lo HANGUL SYLLABLE DDEU
675 | 0xEB 0x9D 0x84 #Lo HANGUL SYLLABLE DDYI
676 | 0xEB 0x9D 0xA0 #Lo HANGUL SYLLABLE DDI
677 | 0xEB 0x9D 0xBC #Lo HANGUL SYLLABLE RA
678 | 0xEB 0x9E 0x98 #Lo HANGUL SYLLABLE RAE
679 | 0xEB 0x9E 0xB4 #Lo HANGUL SYLLABLE RYA
680 | 0xEB 0x9F 0x90 #Lo HANGUL SYLLABLE RYAE
681 | 0xEB 0x9F 0xAC #Lo HANGUL SYLLABLE REO
682 | 0xEB 0xA0 0x88 #Lo HANGUL SYLLABLE RE
683 | 0xEB 0xA0 0xA4 #Lo HANGUL SYLLABLE RYEO
684 | 0xEB 0xA1 0x80 #Lo HANGUL SYLLABLE RYE
685 | 0xEB 0xA1 0x9C #Lo HANGUL SYLLABLE RO
686 | 0xEB 0xA1 0xB8 #Lo HANGUL SYLLABLE RWA
687 | 0xEB 0xA2 0x94 #Lo HANGUL SYLLABLE RWAE
688 | 0xEB 0xA2 0xB0 #Lo HANGUL SYLLABLE ROE
689 | 0xEB 0xA3 0x8C #Lo HANGUL SYLLABLE RYO
690 | 0xEB 0xA3 0xA8 #Lo HANGUL SYLLABLE RU
691 | 0xEB 0xA4 0x84 #Lo HANGUL SYLLABLE RWEO
692 | 0xEB 0xA4 0xA0 #Lo HANGUL SYLLABLE RWE
693 | 0xEB 0xA4 0xBC #Lo HANGUL SYLLABLE RWI
694 | 0xEB 0xA5 0x98 #Lo HANGUL SYLLABLE RYU
695 | 0xEB 0xA5 0xB4 #Lo HANGUL SYLLABLE REU
696 | 0xEB 0xA6 0x90 #Lo HANGUL SYLLABLE RYI
697 | 0xEB 0xA6 0xAC #Lo HANGUL SYLLABLE RI
698 | 0xEB 0xA7 0x88 #Lo HANGUL SYLLABLE MA
699 | 0xEB 0xA7 0xA4 #Lo HANGUL SYLLABLE MAE
700 | 0xEB 0xA8 0x80 #Lo HANGUL SYLLABLE MYA
701 | 0xEB 0xA8 0x9C #Lo HANGUL SYLLABLE MYAE
702 | 0xEB 0xA8 0xB8 #Lo HANGUL SYLLABLE MEO
703 | 0xEB 0xA9 0x94 #Lo HANGUL SYLLABLE ME
704 | 0xEB 0xA9 0xB0 #Lo HANGUL SYLLABLE MYEO
705 | 0xEB 0xAA 0x8C #Lo HANGUL SYLLABLE MYE
706 | 0xEB 0xAA 0xA8 #Lo HANGUL SYLLABLE MO
707 | 0xEB 0xAB 0x84 #Lo HANGUL SYLLABLE MWA
708 | 0xEB 0xAB 0xA0 #Lo HANGUL SYLLABLE MWAE
709 | 0xEB 0xAB 0xBC #Lo HANGUL SYLLABLE MOE
710 | 0xEB 0xAC 0x98 #Lo HANGUL SYLLABLE MYO
711 | 0xEB 0xAC 0xB4 #Lo HANGUL SYLLABLE MU
712 | 0xEB 0xAD 0x90 #Lo HANGUL SYLLABLE MWEO
713 | 0xEB 0xAD 0xAC #Lo HANGUL SYLLABLE MWE
714 | 0xEB 0xAE 0x88 #Lo HANGUL SYLLABLE MWI
715 | 0xEB 0xAE 0xA4 #Lo HANGUL SYLLABLE MYU
716 | 0xEB 0xAF 0x80 #Lo HANGUL SYLLABLE MEU
717 | 0xEB 0xAF 0x9C #Lo HANGUL SYLLABLE MYI
718 | 0xEB 0xAF 0xB8 #Lo HANGUL SYLLABLE MI
719 | 0xEB 0xB0 0x94 #Lo HANGUL SYLLABLE BA
720 | 0xEB 0xB0 0xB0 #Lo HANGUL SYLLABLE BAE
721 | 0xEB 0xB1 0x8C #Lo HANGUL SYLLABLE BYA
722 | 0xEB 0xB1 0xA8 #Lo HANGUL SYLLABLE BYAE
723 | 0xEB 0xB2 0x84 #Lo HANGUL SYLLABLE BEO
724 | 0xEB 0xB2 0xA0 #Lo HANGUL SYLLABLE BE
725 | 0xEB 0xB2 0xBC #Lo HANGUL SYLLABLE BYEO
726 | 0xEB 0xB3 0x98 #Lo HANGUL SYLLABLE BYE
727 | 0xEB 0xB3 0xB4 #Lo HANGUL SYLLABLE BO
728 | 0xEB 0xB4 0x90 #Lo HANGUL SYLLABLE BWA
729 | 0xEB 0xB4 0xAC #Lo HANGUL SYLLABLE BWAE
730 | 0xEB 0xB5 0x88 #Lo HANGUL SYLLABLE BOE
731 | 0xEB 0xB5 0xA4 #Lo HANGUL SYLLABLE BYO
732 | 0xEB 0xB6 0x80 #Lo HANGUL SYLLABLE BU
733 | 0xEB 0xB6 0x9C #Lo HANGUL SYLLABLE BWEO
734 | 0xEB 0xB6 0xB8 #Lo HANGUL SYLLABLE BWE
735 | 0xEB 0xB7 0x94 #Lo HANGUL SYLLABLE BWI
736 | 0xEB 0xB7 0xB0 #Lo HANGUL SYLLABLE BYU
737 | 0xEB 0xB8 0x8C #Lo HANGUL SYLLABLE BEU
738 | 0xEB 0xB8 0xA8 #Lo HANGUL SYLLABLE BYI
739 | 0xEB 0xB9 0x84 #Lo HANGUL SYLLABLE BI
740 | 0xEB 0xB9 0xA0 #Lo HANGUL SYLLABLE BBA
741 | 0xEB 0xB9 0xBC #Lo HANGUL SYLLABLE BBAE
742 | 0xEB 0xBA 0x98 #Lo HANGUL SYLLABLE BBYA
743 | 0xEB 0xBA 0xB4 #Lo HANGUL SYLLABLE BBYAE
744 | 0xEB 0xBB 0x90 #Lo HANGUL SYLLABLE BBEO
745 | 0xEB 0xBB 0xAC #Lo HANGUL SYLLABLE BBE
746 | 0xEB 0xBC 0x88 #Lo HANGUL SYLLABLE BBYEO
747 | 0xEB 0xBC 0xA4 #Lo HANGUL SYLLABLE BBYE
748 | 0xEB 0xBD 0x80 #Lo HANGUL SYLLABLE BBO
749 | 0xEB 0xBD 0x9C #Lo HANGUL SYLLABLE BBWA
750 | 0xEB 0xBD 0xB8 #Lo HANGUL SYLLABLE BBWAE
751 | 0xEB 0xBE 0x94 #Lo HANGUL SYLLABLE BBOE
752 | 0xEB 0xBE 0xB0 #Lo HANGUL SYLLABLE BBYO
753 | 0xEB 0xBF 0x8C #Lo HANGUL SYLLABLE BBU
754 | 0xEB 0xBF 0xA8 #Lo HANGUL SYLLABLE BBWEO
755 | 0xEC 0x80 0x84 #Lo HANGUL SYLLABLE BBWE
756 | 0xEC 0x80 0xA0 #Lo HANGUL SYLLABLE BBWI
757 | 0xEC 0x80 0xBC #Lo HANGUL SYLLABLE BBYU
758 | 0xEC 0x81 0x98 #Lo HANGUL SYLLABLE BBEU
759 | 0xEC 0x81 0xB4 #Lo HANGUL SYLLABLE BBYI
760 | 0xEC 0x82 0x90 #Lo HANGUL SYLLABLE BBI
761 | 0xEC 0x82 0xAC #Lo HANGUL SYLLABLE SA
762 | 0xEC 0x83 0x88 #Lo HANGUL SYLLABLE SAE
763 | 0xEC 0x83 0xA4 #Lo HANGUL SYLLABLE SYA
764 | 0xEC 0x84 0x80 #Lo HANGUL SYLLABLE SYAE
765 | 0xEC 0x84 0x9C #Lo HANGUL SYLLABLE SEO
766 | 0xEC 0x84 0xB8 #Lo HANGUL SYLLABLE SE
767 | 0xEC 0x85 0x94 #Lo HANGUL SYLLABLE SYEO
768 | 0xEC 0x85 0xB0 #Lo HANGUL SYLLABLE SYE
769 | 0xEC 0x86 0x8C #Lo HANGUL SYLLABLE SO
770 | 0xEC 0x86 0xA8 #Lo HANGUL SYLLABLE SWA
771 | 0xEC 0x87 0x84 #Lo HANGUL SYLLABLE SWAE
772 | 0xEC 0x87 0xA0 #Lo HANGUL SYLLABLE SOE
773 | 0xEC 0x87 0xBC #Lo HANGUL SYLLABLE SYO
774 | 0xEC 0x88 0x98 #Lo HANGUL SYLLABLE SU
775 | 0xEC 0x88 0xB4 #Lo HANGUL SYLLABLE SWEO
776 | 0xEC 0x89 0x90 #Lo HANGUL SYLLABLE SWE
777 | 0xEC 0x89 0xAC #Lo HANGUL SYLLABLE SWI
778 | 0xEC 0x8A 0x88 #Lo HANGUL SYLLABLE SYU
779 | 0xEC 0x8A 0xA4 #Lo HANGUL SYLLABLE SEU
780 | 0xEC 0x8B 0x80 #Lo HANGUL SYLLABLE SYI
781 | 0xEC 0x8B 0x9C #Lo HANGUL SYLLABLE SI
782 | 0xEC 0x8B 0xB8 #Lo HANGUL SYLLABLE SSA
783 | 0xEC 0x8C 0x94 #Lo HANGUL SYLLABLE SSAE
784 | 0xEC 0x8C 0xB0 #Lo HANGUL SYLLABLE SSYA
785 | 0xEC 0x8D 0x8C #Lo HANGUL SYLLABLE SSYAE
786 | 0xEC 0x8D 0xA8 #Lo HANGUL SYLLABLE SSEO
787 | 0xEC 0x8E 0x84 #Lo HANGUL SYLLABLE SSE
788 | 0xEC 0x8E 0xA0 #Lo HANGUL SYLLABLE SSYEO
789 | 0xEC 0x8E 0xBC #Lo HANGUL SYLLABLE SSYE
790 | 0xEC 0x8F 0x98 #Lo HANGUL SYLLABLE SSO
791 | 0xEC 0x8F 0xB4 #Lo HANGUL SYLLABLE SSWA
792 | 0xEC 0x90 0x90 #Lo HANGUL SYLLABLE SSWAE
793 | 0xEC 0x90 0xAC #Lo HANGUL SYLLABLE SSOE
794 | 0xEC 0x91 0x88 #Lo HANGUL SYLLABLE SSYO
795 | 0xEC 0x91 0xA4 #Lo HANGUL SYLLABLE SSU
796 | 0xEC 0x92 0x80 #Lo HANGUL SYLLABLE SSWEO
797 | 0xEC 0x92 0x9C #Lo HANGUL SYLLABLE SSWE
798 | 0xEC 0x92 0xB8 #Lo HANGUL SYLLABLE SSWI
799 | 0xEC 0x93 0x94 #Lo HANGUL SYLLABLE SSYU
800 | 0xEC 0x93 0xB0 #Lo HANGUL SYLLABLE SSEU
801 | 0xEC 0x94 0x8C #Lo HANGUL SYLLABLE SSYI
802 | 0xEC 0x94 0xA8 #Lo HANGUL SYLLABLE SSI
803 | 0xEC 0x95 0x84 #Lo HANGUL SYLLABLE A
804 | 0xEC 0x95 0xA0 #Lo HANGUL SYLLABLE AE
805 | 0xEC 0x95 0xBC #Lo HANGUL SYLLABLE YA
806 | 0xEC 0x96 0x98 #Lo HANGUL SYLLABLE YAE
807 | 0xEC 0x96 0xB4 #Lo HANGUL SYLLABLE EO
808 | 0xEC 0x97 0x90 #Lo HANGUL SYLLABLE E
809 | 0xEC 0x97 0xAC #Lo HANGUL SYLLABLE YEO
810 | 0xEC 0x98 0x88 #Lo HANGUL SYLLABLE YE
811 | 0xEC 0x98 0xA4 #Lo HANGUL SYLLABLE O
812 | 0xEC 0x99 0x80 #Lo HANGUL SYLLABLE WA
813 | 0xEC 0x99 0x9C #Lo HANGUL SYLLABLE WAE
814 | 0xEC 0x99 0xB8 #Lo HANGUL SYLLABLE OE
815 | 0xEC 0x9A 0x94 #Lo HANGUL SYLLABLE YO
816 | 0xEC 0x9A 0xB0 #Lo HANGUL SYLLABLE U
817 | 0xEC 0x9B 0x8C #Lo HANGUL SYLLABLE WEO
818 | 0xEC 0x9B 0xA8 #Lo HANGUL SYLLABLE WE
819 | 0xEC 0x9C 0x84 #Lo HANGUL SYLLABLE WI
820 | 0xEC 0x9C 0xA0 #Lo HANGUL SYLLABLE YU
821 | 0xEC 0x9C 0xBC #Lo HANGUL SYLLABLE EU
822 | 0xEC 0x9D 0x98 #Lo HANGUL SYLLABLE YI
823 | 0xEC 0x9D 0xB4 #Lo HANGUL SYLLABLE I
824 | 0xEC 0x9E 0x90 #Lo HANGUL SYLLABLE JA
825 | 0xEC 0x9E 0xAC #Lo HANGUL SYLLABLE JAE
826 | 0xEC 0x9F 0x88 #Lo HANGUL SYLLABLE JYA
827 | 0xEC 0x9F 0xA4 #Lo HANGUL SYLLABLE JYAE
828 | 0xEC 0xA0 0x80 #Lo HANGUL SYLLABLE JEO
829 | 0xEC 0xA0 0x9C #Lo HANGUL SYLLABLE JE
830 | 0xEC 0xA0 0xB8 #Lo HANGUL SYLLABLE JYEO
831 | 0xEC 0xA1 0x94 #Lo HANGUL SYLLABLE JYE
832 | 0xEC 0xA1 0xB0 #Lo HANGUL SYLLABLE JO
833 | 0xEC 0xA2 0x8C #Lo HANGUL SYLLABLE JWA
834 | 0xEC 0xA2 0xA8 #Lo HANGUL SYLLABLE JWAE
835 | 0xEC 0xA3 0x84 #Lo HANGUL SYLLABLE JOE
836 | 0xEC 0xA3 0xA0 #Lo HANGUL SYLLABLE JYO
837 | 0xEC 0xA3 0xBC #Lo HANGUL SYLLABLE JU
838 | 0xEC 0xA4 0x98 #Lo HANGUL SYLLABLE JWEO
839 | 0xEC 0xA4 0xB4 #Lo HANGUL SYLLABLE JWE
840 | 0xEC 0xA5 0x90 #Lo HANGUL SYLLABLE JWI
841 | 0xEC 0xA5 0xAC #Lo HANGUL SYLLABLE JYU
842 | 0xEC 0xA6 0x88 #Lo HANGUL SYLLABLE JEU
843 | 0xEC 0xA6 0xA4 #Lo HANGUL SYLLABLE JYI
844 | 0xEC 0xA7 0x80 #Lo HANGUL SYLLABLE JI
845 | 0xEC 0xA7 0x9C #Lo HANGUL SYLLABLE JJA
846 | 0xEC 0xA7 0xB8 #Lo HANGUL SYLLABLE JJAE
847 | 0xEC 0xA8 0x94 #Lo HANGUL SYLLABLE JJYA
848 | 0xEC 0xA8 0xB0 #Lo HANGUL SYLLABLE JJYAE
849 | 0xEC 0xA9 0x8C #Lo HANGUL SYLLABLE JJEO
850 | 0xEC 0xA9 0xA8 #Lo HANGUL SYLLABLE JJE
851 | 0xEC 0xAA 0x84 #Lo HANGUL SYLLABLE JJYEO
852 | 0xEC 0xAA 0xA0 #Lo HANGUL SYLLABLE JJYE
853 | 0xEC 0xAA 0xBC #Lo HANGUL SYLLABLE JJO
854 | 0xEC 0xAB 0x98 #Lo HANGUL SYLLABLE JJWA
855 | 0xEC 0xAB 0xB4 #Lo HANGUL SYLLABLE JJWAE
856 | 0xEC 0xAC 0x90 #Lo HANGUL SYLLABLE JJOE
857 | 0xEC 0xAC 0xAC #Lo HANGUL SYLLABLE JJYO
858 | 0xEC 0xAD 0x88 #Lo HANGUL SYLLABLE JJU
859 | 0xEC 0xAD 0xA4 #Lo HANGUL SYLLABLE JJWEO
860 | 0xEC 0xAE 0x80 #Lo HANGUL SYLLABLE JJWE
861 | 0xEC 0xAE 0x9C #Lo HANGUL SYLLABLE JJWI
862 | 0xEC 0xAE 0xB8 #Lo HANGUL SYLLABLE JJYU
863 | 0xEC 0xAF 0x94 #Lo HANGUL SYLLABLE JJEU
864 | 0xEC 0xAF 0xB0 #Lo HANGUL SYLLABLE JJYI
865 | 0xEC 0xB0 0x8C #Lo HANGUL SYLLABLE JJI
866 | 0xEC 0xB0 0xA8 #Lo HANGUL SYLLABLE CA
867 | 0xEC 0xB1 0x84 #Lo HANGUL SYLLABLE CAE
868 | 0xEC 0xB1 0xA0 #Lo HANGUL SYLLABLE CYA
869 | 0xEC 0xB1 0xBC #Lo HANGUL SYLLABLE CYAE
870 | 0xEC 0xB2 0x98 #Lo HANGUL SYLLABLE CEO
871 | 0xEC 0xB2 0xB4 #Lo HANGUL SYLLABLE CE
872 | 0xEC 0xB3 0x90 #Lo HANGUL SYLLABLE CYEO
873 | 0xEC 0xB3 0xAC #Lo HANGUL SYLLABLE CYE
874 | 0xEC 0xB4 0x88 #Lo HANGUL SYLLABLE CO
875 | 0xEC 0xB4 0xA4 #Lo HANGUL SYLLABLE CWA
876 | 0xEC 0xB5 0x80 #Lo HANGUL SYLLABLE CWAE
877 | 0xEC 0xB5 0x9C #Lo HANGUL SYLLABLE COE
878 | 0xEC 0xB5 0xB8 #Lo HANGUL SYLLABLE CYO
879 | 0xEC 0xB6 0x94 #Lo HANGUL SYLLABLE CU
880 | 0xEC 0xB6 0xB0 #Lo HANGUL SYLLABLE CWEO
881 | 0xEC 0xB7 0x8C #Lo HANGUL SYLLABLE CWE
882 | 0xEC 0xB7 0xA8 #Lo HANGUL SYLLABLE CWI
883 | 0xEC 0xB8 0x84 #Lo HANGUL SYLLABLE CYU
884 | 0xEC 0xB8 0xA0 #Lo HANGUL SYLLABLE CEU
885 | 0xEC 0xB8 0xBC #Lo HANGUL SYLLABLE CYI
886 | 0xEC 0xB9 0x98 #Lo HANGUL SYLLABLE CI
887 | 0xEC 0xB9 0xB4 #Lo HANGUL SYLLABLE KA
888 | 0xEC 0xBA 0x90 #Lo HANGUL SYLLABLE KAE
889 | 0xEC 0xBA 0xAC #Lo HANGUL SYLLABLE KYA
890 | 0xEC 0xBB 0x88 #Lo HANGUL SYLLABLE KYAE
891 | 0xEC 0xBB 0xA4 #Lo HANGUL SYLLABLE KEO
892 | 0xEC 0xBC 0x80 #Lo HANGUL SYLLABLE KE
893 | 0xEC 0xBC 0x9C #Lo HANGUL SYLLABLE KYEO
894 | 0xEC 0xBC 0xB8 #Lo HANGUL SYLLABLE KYE
895 | 0xEC 0xBD 0x94 #Lo HANGUL SYLLABLE KO
896 | 0xEC 0xBD 0xB0 #Lo HANGUL SYLLABLE KWA
897 | 0xEC 0xBE 0x8C #Lo HANGUL SYLLABLE KWAE
898 | 0xEC 0xBE 0xA8 #Lo HANGUL SYLLABLE KOE
899 | 0xEC 0xBF 0x84 #Lo HANGUL SYLLABLE KYO
900 | 0xEC 0xBF 0xA0 #Lo HANGUL SYLLABLE KU
901 | 0xEC 0xBF 0xBC #Lo HANGUL SYLLABLE KWEO
902 | 0xED 0x80 0x98 #Lo HANGUL SYLLABLE KWE
903 | 0xED 0x80 0xB4 #Lo HANGUL SYLLABLE KWI
904 | 0xED 0x81 0x90 #Lo HANGUL SYLLABLE KYU
905 | 0xED 0x81 0xAC #Lo HANGUL SYLLABLE KEU
906 | 0xED 0x82 0x88 #Lo HANGUL SYLLABLE KYI
907 | 0xED 0x82 0xA4 #Lo HANGUL SYLLABLE KI
908 | 0xED 0x83 0x80 #Lo HANGUL SYLLABLE TA
909 | 0xED 0x83 0x9C #Lo HANGUL SYLLABLE TAE
910 | 0xED 0x83 0xB8 #Lo HANGUL SYLLABLE TYA
911 | 0xED 0x84 0x94 #Lo HANGUL SYLLABLE TYAE
912 | 0xED 0x84 0xB0 #Lo HANGUL SYLLABLE TEO
913 | 0xED 0x85 0x8C #Lo HANGUL SYLLABLE TE
914 | 0xED 0x85 0xA8 #Lo HANGUL SYLLABLE TYEO
915 | 0xED 0x86 0x84 #Lo HANGUL SYLLABLE TYE
916 | 0xED 0x86 0xA0 #Lo HANGUL SYLLABLE TO
917 | 0xED 0x86 0xBC #Lo HANGUL SYLLABLE TWA
918 | 0xED 0x87 0x98 #Lo HANGUL SYLLABLE TWAE
919 | 0xED 0x87 0xB4 #Lo HANGUL SYLLABLE TOE
920 | 0xED 0x88 0x90 #Lo HANGUL SYLLABLE TYO
921 | 0xED 0x88 0xAC #Lo HANGUL SYLLABLE TU
922 | 0xED 0x89 0x88 #Lo HANGUL SYLLABLE TWEO
923 | 0xED 0x89 0xA4 #Lo HANGUL SYLLABLE TWE
924 | 0xED 0x8A 0x80 #Lo HANGUL SYLLABLE TWI
925 | 0xED 0x8A 0x9C #Lo HANGUL SYLLABLE TYU
926 | 0xED 0x8A 0xB8 #Lo HANGUL SYLLABLE TEU
927 | 0xED 0x8B 0x94 #Lo HANGUL SYLLABLE TYI
928 | 0xED 0x8B 0xB0 #Lo HANGUL SYLLABLE TI
929 | 0xED 0x8C 0x8C #Lo HANGUL SYLLABLE PA
930 | 0xED 0x8C 0xA8 #Lo HANGUL SYLLABLE PAE
931 | 0xED 0x8D 0x84 #Lo HANGUL SYLLABLE PYA
932 | 0xED 0x8D 0xA0 #Lo HANGUL SYLLABLE PYAE
933 | 0xED 0x8D 0xBC #Lo HANGUL SYLLABLE PEO
934 | 0xED 0x8E 0x98 #Lo HANGUL SYLLABLE PE
935 | 0xED 0x8E 0xB4 #Lo HANGUL SYLLABLE PYEO
936 | 0xED 0x8F 0x90 #Lo HANGUL SYLLABLE PYE
937 | 0xED 0x8F 0xAC #Lo HANGUL SYLLABLE PO
938 | 0xED 0x90 0x88 #Lo HANGUL SYLLABLE PWA
939 | 0xED 0x90 0xA4 #Lo HANGUL SYLLABLE PWAE
940 | 0xED 0x91 0x80 #Lo HANGUL SYLLABLE POE
941 | 0xED 0x91 0x9C #Lo HANGUL SYLLABLE PYO
942 | 0xED 0x91 0xB8 #Lo HANGUL SYLLABLE PU
943 | 0xED 0x92 0x94 #Lo HANGUL SYLLABLE PWEO
944 | 0xED 0x92 0xB0 #Lo HANGUL SYLLABLE PWE
945 | 0xED 0x93 0x8C #Lo HANGUL SYLLABLE PWI
946 | 0xED 0x93 0xA8 #Lo HANGUL SYLLABLE PYU
947 | 0xED 0x94 0x84 #Lo HANGUL SYLLABLE PEU
948 | 0xED 0x94 0xA0 #Lo HANGUL SYLLABLE PYI
949 | 0xED 0x94 0xBC #Lo HANGUL SYLLABLE PI
950 | 0xED 0x95 0x98 #Lo HANGUL SYLLABLE HA
951 | 0xED 0x95 0xB4 #Lo HANGUL SYLLABLE HAE
952 | 0xED 0x96 0x90 #Lo HANGUL SYLLABLE HYA
953 | 0xED 0x96 0xAC #Lo HANGUL SYLLABLE HYAE
954 | 0xED 0x97 0x88 #Lo HANGUL SYLLABLE HEO
955 | 0xED 0x97 0xA4 #Lo HANGUL SYLLABLE HE
956 | 0xED 0x98 0x80 #Lo HANGUL SYLLABLE HYEO
957 | 0xED 0x98 0x9C #Lo HANGUL SYLLABLE HYE
958 | 0xED 0x98 0xB8 #Lo HANGUL SYLLABLE HO
959 | 0xED 0x99 0x94 #Lo HANGUL SYLLABLE HWA
960 | 0xED 0x99 0xB0 #Lo HANGUL SYLLABLE HWAE
961 | 0xED 0x9A 0x8C #Lo HANGUL SYLLABLE HOE
962 | 0xED 0x9A 0xA8 #Lo HANGUL SYLLABLE HYO
963 | 0xED 0x9B 0x84 #Lo HANGUL SYLLABLE HU
964 | 0xED 0x9B 0xA0 #Lo HANGUL SYLLABLE HWEO
965 | 0xED 0x9B 0xBC #Lo HANGUL SYLLABLE HWE
966 | 0xED 0x9C 0x98 #Lo HANGUL SYLLABLE HWI
967 | 0xED 0x9C 0xB4 #Lo HANGUL SYLLABLE HYU
968 | 0xED 0x9D 0x90 #Lo HANGUL SYLLABLE HEU
969 | 0xED 0x9D 0xAC #Lo HANGUL SYLLABLE HYI
970 | 0xED 0x9E 0x88 #Lo HANGUL SYLLABLE HI
971 ;
972
973 LVT =
974 0xEA 0xB0 0x81..0x9B #Lo [27] HANGUL SYLLABLE GAG..HANGUL SYLLAB...
975 | 0xEA 0xB0 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE GAEG..HANGUL SYLLA...
976 | 0xEA 0xB0 0xB9..0xFF #Lo [27] HANGUL SYLLABLE GYAG..HANGUL SYLLA...
977 | 0xEA 0xB1 0x00..0x93 #
978 | 0xEA 0xB1 0x95..0xAF #Lo [27] HANGUL SYLLABLE GYAEG..HANGUL SYLL...
979 | 0xEA 0xB1 0xB1..0xFF #Lo [27] HANGUL SYLLABLE GEOG..HANGUL SYLLA...
980 | 0xEA 0xB2 0x00..0x8B #
981 | 0xEA 0xB2 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE GEG..HANGUL SYLLAB...
982 | 0xEA 0xB2 0xA9..0xFF #Lo [27] HANGUL SYLLABLE GYEOG..HANGUL SYLL...
983 | 0xEA 0xB3 0x00..0x83 #
984 | 0xEA 0xB3 0x85..0x9F #Lo [27] HANGUL SYLLABLE GYEG..HANGUL SYLLA...
985 | 0xEA 0xB3 0xA1..0xBB #Lo [27] HANGUL SYLLABLE GOG..HANGUL SYLLAB...
986 | 0xEA 0xB3 0xBD..0xFF #Lo [27] HANGUL SYLLABLE GWAG..HANGUL SYLLA...
987 | 0xEA 0xB4 0x00..0x97 #
988 | 0xEA 0xB4 0x99..0xB3 #Lo [27] HANGUL SYLLABLE GWAEG..HANGUL SYLL...
989 | 0xEA 0xB4 0xB5..0xFF #Lo [27] HANGUL SYLLABLE GOEG..HANGUL SYLLA...
990 | 0xEA 0xB5 0x00..0x8F #
991 | 0xEA 0xB5 0x91..0xAB #Lo [27] HANGUL SYLLABLE GYOG..HANGUL SYLLA...
992 | 0xEA 0xB5 0xAD..0xFF #Lo [27] HANGUL SYLLABLE GUG..HANGUL SYLLAB...
993 | 0xEA 0xB6 0x00..0x87 #
994 | 0xEA 0xB6 0x89..0xA3 #Lo [27] HANGUL SYLLABLE GWEOG..HANGUL SYLL...
995 | 0xEA 0xB6 0xA5..0xBF #Lo [27] HANGUL SYLLABLE GWEG..HANGUL SYLLA...
996 | 0xEA 0xB7 0x81..0x9B #Lo [27] HANGUL SYLLABLE GWIG..HANGUL SYLLA...
997 | 0xEA 0xB7 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE GYUG..HANGUL SYLLA...
998 | 0xEA 0xB7 0xB9..0xFF #Lo [27] HANGUL SYLLABLE GEUG..HANGUL SYLLA...
999 | 0xEA 0xB8 0x00..0x93 #
1000 | 0xEA 0xB8 0x95..0xAF #Lo [27] HANGUL SYLLABLE GYIG..HANGUL SYLLA...
1001 | 0xEA 0xB8 0xB1..0xFF #Lo [27] HANGUL SYLLABLE GIG..HANGUL SYLLAB...
1002 | 0xEA 0xB9 0x00..0x8B #
1003 | 0xEA 0xB9 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE GGAG..HANGUL SYLLA...
1004 | 0xEA 0xB9 0xA9..0xFF #Lo [27] HANGUL SYLLABLE GGAEG..HANGUL SYLL...
1005 | 0xEA 0xBA 0x00..0x83 #
1006 | 0xEA 0xBA 0x85..0x9F #Lo [27] HANGUL SYLLABLE GGYAG..HANGUL SYLL...
1007 | 0xEA 0xBA 0xA1..0xBB #Lo [27] HANGUL SYLLABLE GGYAEG..HANGUL SYL...
1008 | 0xEA 0xBA 0xBD..0xFF #Lo [27] HANGUL SYLLABLE GGEOG..HANGUL SYLL...
1009 | 0xEA 0xBB 0x00..0x97 #
1010 | 0xEA 0xBB 0x99..0xB3 #Lo [27] HANGUL SYLLABLE GGEG..HANGUL SYLLA...
1011 | 0xEA 0xBB 0xB5..0xFF #Lo [27] HANGUL SYLLABLE GGYEOG..HANGUL SYL...
1012 | 0xEA 0xBC 0x00..0x8F #
1013 | 0xEA 0xBC 0x91..0xAB #Lo [27] HANGUL SYLLABLE GGYEG..HANGUL SYLL...
1014 | 0xEA 0xBC 0xAD..0xFF #Lo [27] HANGUL SYLLABLE GGOG..HANGUL SYLLA...
1015 | 0xEA 0xBD 0x00..0x87 #
1016 | 0xEA 0xBD 0x89..0xA3 #Lo [27] HANGUL SYLLABLE GGWAG..HANGUL SYLL...
1017 | 0xEA 0xBD 0xA5..0xBF #Lo [27] HANGUL SYLLABLE GGWAEG..HANGUL SYL...
1018 | 0xEA 0xBE 0x81..0x9B #Lo [27] HANGUL SYLLABLE GGOEG..HANGUL SYLL...
1019 | 0xEA 0xBE 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE GGYOG..HANGUL SYLL...
1020 | 0xEA 0xBE 0xB9..0xFF #Lo [27] HANGUL SYLLABLE GGUG..HANGUL SYLLA...
1021 | 0xEA 0xBF 0x00..0x93 #
1022 | 0xEA 0xBF 0x95..0xAF #Lo [27] HANGUL SYLLABLE GGWEOG..HANGUL SYL...
1023 | 0xEA 0xBF 0xB1..0xFF #Lo [27] HANGUL SYLLABLE GGWEG..HANGUL ...
1024 | 0xEA 0xC0..0xFF 0x00..0xFF #
1025 | 0xEB 0x00 0x00..0xFF #
1026 | 0xEB 0x01..0x7F 0x00..0xFF #
1027 | 0xEB 0x80 0x00..0x8B #
1028 | 0xEB 0x80 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE GGWIG..HANGUL SYLL...
1029 | 0xEB 0x80 0xA9..0xFF #Lo [27] HANGUL SYLLABLE GGYUG..HANGUL SYLL...
1030 | 0xEB 0x81 0x00..0x83 #
1031 | 0xEB 0x81 0x85..0x9F #Lo [27] HANGUL SYLLABLE GGEUG..HANGUL SYLL...
1032 | 0xEB 0x81 0xA1..0xBB #Lo [27] HANGUL SYLLABLE GGYIG..HANGUL SYLL...
1033 | 0xEB 0x81 0xBD..0xFF #Lo [27] HANGUL SYLLABLE GGIG..HANGUL SYLLA...
1034 | 0xEB 0x82 0x00..0x97 #
1035 | 0xEB 0x82 0x99..0xB3 #Lo [27] HANGUL SYLLABLE NAG..HANGUL SYLLAB...
1036 | 0xEB 0x82 0xB5..0xFF #Lo [27] HANGUL SYLLABLE NAEG..HANGUL SYLLA...
1037 | 0xEB 0x83 0x00..0x8F #
1038 | 0xEB 0x83 0x91..0xAB #Lo [27] HANGUL SYLLABLE NYAG..HANGUL SYLLA...
1039 | 0xEB 0x83 0xAD..0xFF #Lo [27] HANGUL SYLLABLE NYAEG..HANGUL SYLL...
1040 | 0xEB 0x84 0x00..0x87 #
1041 | 0xEB 0x84 0x89..0xA3 #Lo [27] HANGUL SYLLABLE NEOG..HANGUL SYLLA...
1042 | 0xEB 0x84 0xA5..0xBF #Lo [27] HANGUL SYLLABLE NEG..HANGUL SYLLAB...
1043 | 0xEB 0x85 0x81..0x9B #Lo [27] HANGUL SYLLABLE NYEOG..HANGUL SYLL...
1044 | 0xEB 0x85 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE NYEG..HANGUL SYLLA...
1045 | 0xEB 0x85 0xB9..0xFF #Lo [27] HANGUL SYLLABLE NOG..HANGUL SYLLAB...
1046 | 0xEB 0x86 0x00..0x93 #
1047 | 0xEB 0x86 0x95..0xAF #Lo [27] HANGUL SYLLABLE NWAG..HANGUL SYLLA...
1048 | 0xEB 0x86 0xB1..0xFF #Lo [27] HANGUL SYLLABLE NWAEG..HANGUL SYLL...
1049 | 0xEB 0x87 0x00..0x8B #
1050 | 0xEB 0x87 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE NOEG..HANGUL SYLLA...
1051 | 0xEB 0x87 0xA9..0xFF #Lo [27] HANGUL SYLLABLE NYOG..HANGUL SYLLA...
1052 | 0xEB 0x88 0x00..0x83 #
1053 | 0xEB 0x88 0x85..0x9F #Lo [27] HANGUL SYLLABLE NUG..HANGUL SYLLAB...
1054 | 0xEB 0x88 0xA1..0xBB #Lo [27] HANGUL SYLLABLE NWEOG..HANGUL SYLL...
1055 | 0xEB 0x88 0xBD..0xFF #Lo [27] HANGUL SYLLABLE NWEG..HANGUL SYLLA...
1056 | 0xEB 0x89 0x00..0x97 #
1057 | 0xEB 0x89 0x99..0xB3 #Lo [27] HANGUL SYLLABLE NWIG..HANGUL SYLLA...
1058 | 0xEB 0x89 0xB5..0xFF #Lo [27] HANGUL SYLLABLE NYUG..HANGUL SYLLA...
1059 | 0xEB 0x8A 0x00..0x8F #
1060 | 0xEB 0x8A 0x91..0xAB #Lo [27] HANGUL SYLLABLE NEUG..HANGUL SYLLA...
1061 | 0xEB 0x8A 0xAD..0xFF #Lo [27] HANGUL SYLLABLE NYIG..HANGUL SYLLA...
1062 | 0xEB 0x8B 0x00..0x87 #
1063 | 0xEB 0x8B 0x89..0xA3 #Lo [27] HANGUL SYLLABLE NIG..HANGUL SYLLAB...
1064 | 0xEB 0x8B 0xA5..0xBF #Lo [27] HANGUL SYLLABLE DAG..HANGUL SYLLAB...
1065 | 0xEB 0x8C 0x81..0x9B #Lo [27] HANGUL SYLLABLE DAEG..HANGUL SYLLA...
1066 | 0xEB 0x8C 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE DYAG..HANGUL SYLLA...
1067 | 0xEB 0x8C 0xB9..0xFF #Lo [27] HANGUL SYLLABLE DYAEG..HANGUL SYLL...
1068 | 0xEB 0x8D 0x00..0x93 #
1069 | 0xEB 0x8D 0x95..0xAF #Lo [27] HANGUL SYLLABLE DEOG..HANGUL SYLLA...
1070 | 0xEB 0x8D 0xB1..0xFF #Lo [27] HANGUL SYLLABLE DEG..HANGUL SYLLAB...
1071 | 0xEB 0x8E 0x00..0x8B #
1072 | 0xEB 0x8E 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE DYEOG..HANGUL SYLL...
1073 | 0xEB 0x8E 0xA9..0xFF #Lo [27] HANGUL SYLLABLE DYEG..HANGUL SYLLA...
1074 | 0xEB 0x8F 0x00..0x83 #
1075 | 0xEB 0x8F 0x85..0x9F #Lo [27] HANGUL SYLLABLE DOG..HANGUL SYLLAB...
1076 | 0xEB 0x8F 0xA1..0xBB #Lo [27] HANGUL SYLLABLE DWAG..HANGUL SYLLA...
1077 | 0xEB 0x8F 0xBD..0xFF #Lo [27] HANGUL SYLLABLE DWAEG..HANGUL SYLL...
1078 | 0xEB 0x90 0x00..0x97 #
1079 | 0xEB 0x90 0x99..0xB3 #Lo [27] HANGUL SYLLABLE DOEG..HANGUL SYLLA...
1080 | 0xEB 0x90 0xB5..0xFF #Lo [27] HANGUL SYLLABLE DYOG..HANGUL SYLLA...
1081 | 0xEB 0x91 0x00..0x8F #
1082 | 0xEB 0x91 0x91..0xAB #Lo [27] HANGUL SYLLABLE DUG..HANGUL SYLLAB...
1083 | 0xEB 0x91 0xAD..0xFF #Lo [27] HANGUL SYLLABLE DWEOG..HANGUL SYLL...
1084 | 0xEB 0x92 0x00..0x87 #
1085 | 0xEB 0x92 0x89..0xA3 #Lo [27] HANGUL SYLLABLE DWEG..HANGUL SYLLA...
1086 | 0xEB 0x92 0xA5..0xBF #Lo [27] HANGUL SYLLABLE DWIG..HANGUL SYLLA...
1087 | 0xEB 0x93 0x81..0x9B #Lo [27] HANGUL SYLLABLE DYUG..HANGUL SYLLA...
1088 | 0xEB 0x93 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE DEUG..HANGUL SYLLA...
1089 | 0xEB 0x93 0xB9..0xFF #Lo [27] HANGUL SYLLABLE DYIG..HANGUL SYLLA...
1090 | 0xEB 0x94 0x00..0x93 #
1091 | 0xEB 0x94 0x95..0xAF #Lo [27] HANGUL SYLLABLE DIG..HANGUL SYLLAB...
1092 | 0xEB 0x94 0xB1..0xFF #Lo [27] HANGUL SYLLABLE DDAG..HANGUL SYLLA...
1093 | 0xEB 0x95 0x00..0x8B #
1094 | 0xEB 0x95 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE DDAEG..HANGUL SYLL...
1095 | 0xEB 0x95 0xA9..0xFF #Lo [27] HANGUL SYLLABLE DDYAG..HANGUL SYLL...
1096 | 0xEB 0x96 0x00..0x83 #
1097 | 0xEB 0x96 0x85..0x9F #Lo [27] HANGUL SYLLABLE DDYAEG..HANGUL SYL...
1098 | 0xEB 0x96 0xA1..0xBB #Lo [27] HANGUL SYLLABLE DDEOG..HANGUL SYLL...
1099 | 0xEB 0x96 0xBD..0xFF #Lo [27] HANGUL SYLLABLE DDEG..HANGUL SYLLA...
1100 | 0xEB 0x97 0x00..0x97 #
1101 | 0xEB 0x97 0x99..0xB3 #Lo [27] HANGUL SYLLABLE DDYEOG..HANGUL SYL...
1102 | 0xEB 0x97 0xB5..0xFF #Lo [27] HANGUL SYLLABLE DDYEG..HANGUL SYLL...
1103 | 0xEB 0x98 0x00..0x8F #
1104 | 0xEB 0x98 0x91..0xAB #Lo [27] HANGUL SYLLABLE DDOG..HANGUL SYLLA...
1105 | 0xEB 0x98 0xAD..0xFF #Lo [27] HANGUL SYLLABLE DDWAG..HANGUL SYLL...
1106 | 0xEB 0x99 0x00..0x87 #
1107 | 0xEB 0x99 0x89..0xA3 #Lo [27] HANGUL SYLLABLE DDWAEG..HANGUL SYL...
1108 | 0xEB 0x99 0xA5..0xBF #Lo [27] HANGUL SYLLABLE DDOEG..HANGUL SYLL...
1109 | 0xEB 0x9A 0x81..0x9B #Lo [27] HANGUL SYLLABLE DDYOG..HANGUL SYLL...
1110 | 0xEB 0x9A 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE DDUG..HANGUL SYLLA...
1111 | 0xEB 0x9A 0xB9..0xFF #Lo [27] HANGUL SYLLABLE DDWEOG..HANGUL SYL...
1112 | 0xEB 0x9B 0x00..0x93 #
1113 | 0xEB 0x9B 0x95..0xAF #Lo [27] HANGUL SYLLABLE DDWEG..HANGUL SYLL...
1114 | 0xEB 0x9B 0xB1..0xFF #Lo [27] HANGUL SYLLABLE DDWIG..HANGUL SYLL...
1115 | 0xEB 0x9C 0x00..0x8B #
1116 | 0xEB 0x9C 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE DDYUG..HANGUL SYLL...
1117 | 0xEB 0x9C 0xA9..0xFF #Lo [27] HANGUL SYLLABLE DDEUG..HANGUL SYLL...
1118 | 0xEB 0x9D 0x00..0x83 #
1119 | 0xEB 0x9D 0x85..0x9F #Lo [27] HANGUL SYLLABLE DDYIG..HANGUL SYLL...
1120 | 0xEB 0x9D 0xA1..0xBB #Lo [27] HANGUL SYLLABLE DDIG..HANGUL SYLLA...
1121 | 0xEB 0x9D 0xBD..0xFF #Lo [27] HANGUL SYLLABLE RAG..HANGUL SYLLAB...
1122 | 0xEB 0x9E 0x00..0x97 #
1123 | 0xEB 0x9E 0x99..0xB3 #Lo [27] HANGUL SYLLABLE RAEG..HANGUL SYLLA...
1124 | 0xEB 0x9E 0xB5..0xFF #Lo [27] HANGUL SYLLABLE RYAG..HANGUL SYLLA...
1125 | 0xEB 0x9F 0x00..0x8F #
1126 | 0xEB 0x9F 0x91..0xAB #Lo [27] HANGUL SYLLABLE RYAEG..HANGUL SYLL...
1127 | 0xEB 0x9F 0xAD..0xFF #Lo [27] HANGUL SYLLABLE REOG..HANGUL SYLLA...
1128 | 0xEB 0xA0 0x00..0x87 #
1129 | 0xEB 0xA0 0x89..0xA3 #Lo [27] HANGUL SYLLABLE REG..HANGUL SYLLAB...
1130 | 0xEB 0xA0 0xA5..0xBF #Lo [27] HANGUL SYLLABLE RYEOG..HANGUL SYLL...
1131 | 0xEB 0xA1 0x81..0x9B #Lo [27] HANGUL SYLLABLE RYEG..HANGUL SYLLA...
1132 | 0xEB 0xA1 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE ROG..HANGUL SYLLAB...
1133 | 0xEB 0xA1 0xB9..0xFF #Lo [27] HANGUL SYLLABLE RWAG..HANGUL SYLLA...
1134 | 0xEB 0xA2 0x00..0x93 #
1135 | 0xEB 0xA2 0x95..0xAF #Lo [27] HANGUL SYLLABLE RWAEG..HANGUL SYLL...
1136 | 0xEB 0xA2 0xB1..0xFF #Lo [27] HANGUL SYLLABLE ROEG..HANGUL SYLLA...
1137 | 0xEB 0xA3 0x00..0x8B #
1138 | 0xEB 0xA3 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE RYOG..HANGUL SYLLA...
1139 | 0xEB 0xA3 0xA9..0xFF #Lo [27] HANGUL SYLLABLE RUG..HANGUL SYLLAB...
1140 | 0xEB 0xA4 0x00..0x83 #
1141 | 0xEB 0xA4 0x85..0x9F #Lo [27] HANGUL SYLLABLE RWEOG..HANGUL SYLL...
1142 | 0xEB 0xA4 0xA1..0xBB #Lo [27] HANGUL SYLLABLE RWEG..HANGUL SYLLA...
1143 | 0xEB 0xA4 0xBD..0xFF #Lo [27] HANGUL SYLLABLE RWIG..HANGUL SYLLA...
1144 | 0xEB 0xA5 0x00..0x97 #
1145 | 0xEB 0xA5 0x99..0xB3 #Lo [27] HANGUL SYLLABLE RYUG..HANGUL SYLLA...
1146 | 0xEB 0xA5 0xB5..0xFF #Lo [27] HANGUL SYLLABLE REUG..HANGUL SYLLA...
1147 | 0xEB 0xA6 0x00..0x8F #
1148 | 0xEB 0xA6 0x91..0xAB #Lo [27] HANGUL SYLLABLE RYIG..HANGUL SYLLA...
1149 | 0xEB 0xA6 0xAD..0xFF #Lo [27] HANGUL SYLLABLE RIG..HANGUL SYLLAB...
1150 | 0xEB 0xA7 0x00..0x87 #
1151 | 0xEB 0xA7 0x89..0xA3 #Lo [27] HANGUL SYLLABLE MAG..HANGUL SYLLAB...
1152 | 0xEB 0xA7 0xA5..0xBF #Lo [27] HANGUL SYLLABLE MAEG..HANGUL SYLLA...
1153 | 0xEB 0xA8 0x81..0x9B #Lo [27] HANGUL SYLLABLE MYAG..HANGUL SYLLA...
1154 | 0xEB 0xA8 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE MYAEG..HANGUL SYLL...
1155 | 0xEB 0xA8 0xB9..0xFF #Lo [27] HANGUL SYLLABLE MEOG..HANGUL SYLLA...
1156 | 0xEB 0xA9 0x00..0x93 #
1157 | 0xEB 0xA9 0x95..0xAF #Lo [27] HANGUL SYLLABLE MEG..HANGUL SYLLAB...
1158 | 0xEB 0xA9 0xB1..0xFF #Lo [27] HANGUL SYLLABLE MYEOG..HANGUL SYLL...
1159 | 0xEB 0xAA 0x00..0x8B #
1160 | 0xEB 0xAA 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE MYEG..HANGUL SYLLA...
1161 | 0xEB 0xAA 0xA9..0xFF #Lo [27] HANGUL SYLLABLE MOG..HANGUL SYLLAB...
1162 | 0xEB 0xAB 0x00..0x83 #
1163 | 0xEB 0xAB 0x85..0x9F #Lo [27] HANGUL SYLLABLE MWAG..HANGUL SYLLA...
1164 | 0xEB 0xAB 0xA1..0xBB #Lo [27] HANGUL SYLLABLE MWAEG..HANGUL SYLL...
1165 | 0xEB 0xAB 0xBD..0xFF #Lo [27] HANGUL SYLLABLE MOEG..HANGUL SYLLA...
1166 | 0xEB 0xAC 0x00..0x97 #
1167 | 0xEB 0xAC 0x99..0xB3 #Lo [27] HANGUL SYLLABLE MYOG..HANGUL SYLLA...
1168 | 0xEB 0xAC 0xB5..0xFF #Lo [27] HANGUL SYLLABLE MUG..HANGUL SYLLAB...
1169 | 0xEB 0xAD 0x00..0x8F #
1170 | 0xEB 0xAD 0x91..0xAB #Lo [27] HANGUL SYLLABLE MWEOG..HANGUL SYLL...
1171 | 0xEB 0xAD 0xAD..0xFF #Lo [27] HANGUL SYLLABLE MWEG..HANGUL SYLLA...
1172 | 0xEB 0xAE 0x00..0x87 #
1173 | 0xEB 0xAE 0x89..0xA3 #Lo [27] HANGUL SYLLABLE MWIG..HANGUL SYLLA...
1174 | 0xEB 0xAE 0xA5..0xBF #Lo [27] HANGUL SYLLABLE MYUG..HANGUL SYLLA...
1175 | 0xEB 0xAF 0x81..0x9B #Lo [27] HANGUL SYLLABLE MEUG..HANGUL SYLLA...
1176 | 0xEB 0xAF 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE MYIG..HANGUL SYLLA...
1177 | 0xEB 0xAF 0xB9..0xFF #Lo [27] HANGUL SYLLABLE MIG..HANGUL SYLLAB...
1178 | 0xEB 0xB0 0x00..0x93 #
1179 | 0xEB 0xB0 0x95..0xAF #Lo [27] HANGUL SYLLABLE BAG..HANGUL SYLLAB...
1180 | 0xEB 0xB0 0xB1..0xFF #Lo [27] HANGUL SYLLABLE BAEG..HANGUL SYLLA...
1181 | 0xEB 0xB1 0x00..0x8B #
1182 | 0xEB 0xB1 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE BYAG..HANGUL SYLLA...
1183 | 0xEB 0xB1 0xA9..0xFF #Lo [27] HANGUL SYLLABLE BYAEG..HANGUL SYLL...
1184 | 0xEB 0xB2 0x00..0x83 #
1185 | 0xEB 0xB2 0x85..0x9F #Lo [27] HANGUL SYLLABLE BEOG..HANGUL SYLLA...
1186 | 0xEB 0xB2 0xA1..0xBB #Lo [27] HANGUL SYLLABLE BEG..HANGUL SYLLAB...
1187 | 0xEB 0xB2 0xBD..0xFF #Lo [27] HANGUL SYLLABLE BYEOG..HANGUL SYLL...
1188 | 0xEB 0xB3 0x00..0x97 #
1189 | 0xEB 0xB3 0x99..0xB3 #Lo [27] HANGUL SYLLABLE BYEG..HANGUL SYLLA...
1190 | 0xEB 0xB3 0xB5..0xFF #Lo [27] HANGUL SYLLABLE BOG..HANGUL SYLLAB...
1191 | 0xEB 0xB4 0x00..0x8F #
1192 | 0xEB 0xB4 0x91..0xAB #Lo [27] HANGUL SYLLABLE BWAG..HANGUL SYLLA...
1193 | 0xEB 0xB4 0xAD..0xFF #Lo [27] HANGUL SYLLABLE BWAEG..HANGUL SYLL...
1194 | 0xEB 0xB5 0x00..0x87 #
1195 | 0xEB 0xB5 0x89..0xA3 #Lo [27] HANGUL SYLLABLE BOEG..HANGUL SYLLA...
1196 | 0xEB 0xB5 0xA5..0xBF #Lo [27] HANGUL SYLLABLE BYOG..HANGUL SYLLA...
1197 | 0xEB 0xB6 0x81..0x9B #Lo [27] HANGUL SYLLABLE BUG..HANGUL SYLLAB...
1198 | 0xEB 0xB6 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE BWEOG..HANGUL SYLL...
1199 | 0xEB 0xB6 0xB9..0xFF #Lo [27] HANGUL SYLLABLE BWEG..HANGUL SYLLA...
1200 | 0xEB 0xB7 0x00..0x93 #
1201 | 0xEB 0xB7 0x95..0xAF #Lo [27] HANGUL SYLLABLE BWIG..HANGUL SYLLA...
1202 | 0xEB 0xB7 0xB1..0xFF #Lo [27] HANGUL SYLLABLE BYUG..HANGUL SYLLA...
1203 | 0xEB 0xB8 0x00..0x8B #
1204 | 0xEB 0xB8 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE BEUG..HANGUL SYLLA...
1205 | 0xEB 0xB8 0xA9..0xFF #Lo [27] HANGUL SYLLABLE BYIG..HANGUL SYLLA...
1206 | 0xEB 0xB9 0x00..0x83 #
1207 | 0xEB 0xB9 0x85..0x9F #Lo [27] HANGUL SYLLABLE BIG..HANGUL SYLLAB...
1208 | 0xEB 0xB9 0xA1..0xBB #Lo [27] HANGUL SYLLABLE BBAG..HANGUL SYLLA...
1209 | 0xEB 0xB9 0xBD..0xFF #Lo [27] HANGUL SYLLABLE BBAEG..HANGUL SYLL...
1210 | 0xEB 0xBA 0x00..0x97 #
1211 | 0xEB 0xBA 0x99..0xB3 #Lo [27] HANGUL SYLLABLE BBYAG..HANGUL SYLL...
1212 | 0xEB 0xBA 0xB5..0xFF #Lo [27] HANGUL SYLLABLE BBYAEG..HANGUL SYL...
1213 | 0xEB 0xBB 0x00..0x8F #
1214 | 0xEB 0xBB 0x91..0xAB #Lo [27] HANGUL SYLLABLE BBEOG..HANGUL SYLL...
1215 | 0xEB 0xBB 0xAD..0xFF #Lo [27] HANGUL SYLLABLE BBEG..HANGUL SYLLA...
1216 | 0xEB 0xBC 0x00..0x87 #
1217 | 0xEB 0xBC 0x89..0xA3 #Lo [27] HANGUL SYLLABLE BBYEOG..HANGUL SYL...
1218 | 0xEB 0xBC 0xA5..0xBF #Lo [27] HANGUL SYLLABLE BBYEG..HANGUL SYLL...
1219 | 0xEB 0xBD 0x81..0x9B #Lo [27] HANGUL SYLLABLE BBOG..HANGUL SYLLA...
1220 | 0xEB 0xBD 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE BBWAG..HANGUL SYLL...
1221 | 0xEB 0xBD 0xB9..0xFF #Lo [27] HANGUL SYLLABLE BBWAEG..HANGUL SYL...
1222 | 0xEB 0xBE 0x00..0x93 #
1223 | 0xEB 0xBE 0x95..0xAF #Lo [27] HANGUL SYLLABLE BBOEG..HANGUL SYLL...
1224 | 0xEB 0xBE 0xB1..0xFF #Lo [27] HANGUL SYLLABLE BBYOG..HANGUL SYLL...
1225 | 0xEB 0xBF 0x00..0x8B #
1226 | 0xEB 0xBF 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE BBUG..HANGUL SYLLA...
1227 | 0xEB 0xBF 0xA9..0xFF #Lo [27] HANGUL SYLLABLE BBWEOG..HANGUL...
1228 | 0xEB 0xC0..0xFF 0x00..0xFF #
1229 | 0xEC 0x00 0x00..0xFF #
1230 | 0xEC 0x01..0x7F 0x00..0xFF #
1231 | 0xEC 0x80 0x00..0x83 #
1232 | 0xEC 0x80 0x85..0x9F #Lo [27] HANGUL SYLLABLE BBWEG..HANGUL SYLL...
1233 | 0xEC 0x80 0xA1..0xBB #Lo [27] HANGUL SYLLABLE BBWIG..HANGUL SYLL...
1234 | 0xEC 0x80 0xBD..0xFF #Lo [27] HANGUL SYLLABLE BBYUG..HANGUL SYLL...
1235 | 0xEC 0x81 0x00..0x97 #
1236 | 0xEC 0x81 0x99..0xB3 #Lo [27] HANGUL SYLLABLE BBEUG..HANGUL SYLL...
1237 | 0xEC 0x81 0xB5..0xFF #Lo [27] HANGUL SYLLABLE BBYIG..HANGUL SYLL...
1238 | 0xEC 0x82 0x00..0x8F #
1239 | 0xEC 0x82 0x91..0xAB #Lo [27] HANGUL SYLLABLE BBIG..HANGUL SYLLA...
1240 | 0xEC 0x82 0xAD..0xFF #Lo [27] HANGUL SYLLABLE SAG..HANGUL SYLLAB...
1241 | 0xEC 0x83 0x00..0x87 #
1242 | 0xEC 0x83 0x89..0xA3 #Lo [27] HANGUL SYLLABLE SAEG..HANGUL SYLLA...
1243 | 0xEC 0x83 0xA5..0xBF #Lo [27] HANGUL SYLLABLE SYAG..HANGUL SYLLA...
1244 | 0xEC 0x84 0x81..0x9B #Lo [27] HANGUL SYLLABLE SYAEG..HANGUL SYLL...
1245 | 0xEC 0x84 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE SEOG..HANGUL SYLLA...
1246 | 0xEC 0x84 0xB9..0xFF #Lo [27] HANGUL SYLLABLE SEG..HANGUL SYLLAB...
1247 | 0xEC 0x85 0x00..0x93 #
1248 | 0xEC 0x85 0x95..0xAF #Lo [27] HANGUL SYLLABLE SYEOG..HANGUL SYLL...
1249 | 0xEC 0x85 0xB1..0xFF #Lo [27] HANGUL SYLLABLE SYEG..HANGUL SYLLA...
1250 | 0xEC 0x86 0x00..0x8B #
1251 | 0xEC 0x86 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE SOG..HANGUL SYLLAB...
1252 | 0xEC 0x86 0xA9..0xFF #Lo [27] HANGUL SYLLABLE SWAG..HANGUL SYLLA...
1253 | 0xEC 0x87 0x00..0x83 #
1254 | 0xEC 0x87 0x85..0x9F #Lo [27] HANGUL SYLLABLE SWAEG..HANGUL SYLL...
1255 | 0xEC 0x87 0xA1..0xBB #Lo [27] HANGUL SYLLABLE SOEG..HANGUL SYLLA...
1256 | 0xEC 0x87 0xBD..0xFF #Lo [27] HANGUL SYLLABLE SYOG..HANGUL SYLLA...
1257 | 0xEC 0x88 0x00..0x97 #
1258 | 0xEC 0x88 0x99..0xB3 #Lo [27] HANGUL SYLLABLE SUG..HANGUL SYLLAB...
1259 | 0xEC 0x88 0xB5..0xFF #Lo [27] HANGUL SYLLABLE SWEOG..HANGUL SYLL...
1260 | 0xEC 0x89 0x00..0x8F #
1261 | 0xEC 0x89 0x91..0xAB #Lo [27] HANGUL SYLLABLE SWEG..HANGUL SYLLA...
1262 | 0xEC 0x89 0xAD..0xFF #Lo [27] HANGUL SYLLABLE SWIG..HANGUL SYLLA...
1263 | 0xEC 0x8A 0x00..0x87 #
1264 | 0xEC 0x8A 0x89..0xA3 #Lo [27] HANGUL SYLLABLE SYUG..HANGUL SYLLA...
1265 | 0xEC 0x8A 0xA5..0xBF #Lo [27] HANGUL SYLLABLE SEUG..HANGUL SYLLA...
1266 | 0xEC 0x8B 0x81..0x9B #Lo [27] HANGUL SYLLABLE SYIG..HANGUL SYLLA...
1267 | 0xEC 0x8B 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE SIG..HANGUL SYLLAB...
1268 | 0xEC 0x8B 0xB9..0xFF #Lo [27] HANGUL SYLLABLE SSAG..HANGUL SYLLA...
1269 | 0xEC 0x8C 0x00..0x93 #
1270 | 0xEC 0x8C 0x95..0xAF #Lo [27] HANGUL SYLLABLE SSAEG..HANGUL SYLL...
1271 | 0xEC 0x8C 0xB1..0xFF #Lo [27] HANGUL SYLLABLE SSYAG..HANGUL SYLL...
1272 | 0xEC 0x8D 0x00..0x8B #
1273 | 0xEC 0x8D 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE SSYAEG..HANGUL SYL...
1274 | 0xEC 0x8D 0xA9..0xFF #Lo [27] HANGUL SYLLABLE SSEOG..HANGUL SYLL...
1275 | 0xEC 0x8E 0x00..0x83 #
1276 | 0xEC 0x8E 0x85..0x9F #Lo [27] HANGUL SYLLABLE SSEG..HANGUL SYLLA...
1277 | 0xEC 0x8E 0xA1..0xBB #Lo [27] HANGUL SYLLABLE SSYEOG..HANGUL SYL...
1278 | 0xEC 0x8E 0xBD..0xFF #Lo [27] HANGUL SYLLABLE SSYEG..HANGUL SYLL...
1279 | 0xEC 0x8F 0x00..0x97 #
1280 | 0xEC 0x8F 0x99..0xB3 #Lo [27] HANGUL SYLLABLE SSOG..HANGUL SYLLA...
1281 | 0xEC 0x8F 0xB5..0xFF #Lo [27] HANGUL SYLLABLE SSWAG..HANGUL SYLL...
1282 | 0xEC 0x90 0x00..0x8F #
1283 | 0xEC 0x90 0x91..0xAB #Lo [27] HANGUL SYLLABLE SSWAEG..HANGUL SYL...
1284 | 0xEC 0x90 0xAD..0xFF #Lo [27] HANGUL SYLLABLE SSOEG..HANGUL SYLL...
1285 | 0xEC 0x91 0x00..0x87 #
1286 | 0xEC 0x91 0x89..0xA3 #Lo [27] HANGUL SYLLABLE SSYOG..HANGUL SYLL...
1287 | 0xEC 0x91 0xA5..0xBF #Lo [27] HANGUL SYLLABLE SSUG..HANGUL SYLLA...
1288 | 0xEC 0x92 0x81..0x9B #Lo [27] HANGUL SYLLABLE SSWEOG..HANGUL SYL...
1289 | 0xEC 0x92 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE SSWEG..HANGUL SYLL...
1290 | 0xEC 0x92 0xB9..0xFF #Lo [27] HANGUL SYLLABLE SSWIG..HANGUL SYLL...
1291 | 0xEC 0x93 0x00..0x93 #
1292 | 0xEC 0x93 0x95..0xAF #Lo [27] HANGUL SYLLABLE SSYUG..HANGUL SYLL...
1293 | 0xEC 0x93 0xB1..0xFF #Lo [27] HANGUL SYLLABLE SSEUG..HANGUL SYLL...
1294 | 0xEC 0x94 0x00..0x8B #
1295 | 0xEC 0x94 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE SSYIG..HANGUL SYLL...
1296 | 0xEC 0x94 0xA9..0xFF #Lo [27] HANGUL SYLLABLE SSIG..HANGUL SYLLA...
1297 | 0xEC 0x95 0x00..0x83 #
1298 | 0xEC 0x95 0x85..0x9F #Lo [27] HANGUL SYLLABLE AG..HANGUL SYLLABL...
1299 | 0xEC 0x95 0xA1..0xBB #Lo [27] HANGUL SYLLABLE AEG..HANGUL SYLLAB...
1300 | 0xEC 0x95 0xBD..0xFF #Lo [27] HANGUL SYLLABLE YAG..HANGUL SYLLAB...
1301 | 0xEC 0x96 0x00..0x97 #
1302 | 0xEC 0x96 0x99..0xB3 #Lo [27] HANGUL SYLLABLE YAEG..HANGUL SYLLA...
1303 | 0xEC 0x96 0xB5..0xFF #Lo [27] HANGUL SYLLABLE EOG..HANGUL SYLLAB...
1304 | 0xEC 0x97 0x00..0x8F #
1305 | 0xEC 0x97 0x91..0xAB #Lo [27] HANGUL SYLLABLE EG..HANGUL SYLLABL...
1306 | 0xEC 0x97 0xAD..0xFF #Lo [27] HANGUL SYLLABLE YEOG..HANGUL SYLLA...
1307 | 0xEC 0x98 0x00..0x87 #
1308 | 0xEC 0x98 0x89..0xA3 #Lo [27] HANGUL SYLLABLE YEG..HANGUL SYLLAB...
1309 | 0xEC 0x98 0xA5..0xBF #Lo [27] HANGUL SYLLABLE OG..HANGUL SYLLABL...
1310 | 0xEC 0x99 0x81..0x9B #Lo [27] HANGUL SYLLABLE WAG..HANGUL SYLLAB...
1311 | 0xEC 0x99 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE WAEG..HANGUL SYLLA...
1312 | 0xEC 0x99 0xB9..0xFF #Lo [27] HANGUL SYLLABLE OEG..HANGUL SYLLAB...
1313 | 0xEC 0x9A 0x00..0x93 #
1314 | 0xEC 0x9A 0x95..0xAF #Lo [27] HANGUL SYLLABLE YOG..HANGUL SYLLAB...
1315 | 0xEC 0x9A 0xB1..0xFF #Lo [27] HANGUL SYLLABLE UG..HANGUL SYLLABL...
1316 | 0xEC 0x9B 0x00..0x8B #
1317 | 0xEC 0x9B 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE WEOG..HANGUL SYLLA...
1318 | 0xEC 0x9B 0xA9..0xFF #Lo [27] HANGUL SYLLABLE WEG..HANGUL SYLLAB...
1319 | 0xEC 0x9C 0x00..0x83 #
1320 | 0xEC 0x9C 0x85..0x9F #Lo [27] HANGUL SYLLABLE WIG..HANGUL SYLLAB...
1321 | 0xEC 0x9C 0xA1..0xBB #Lo [27] HANGUL SYLLABLE YUG..HANGUL SYLLAB...
1322 | 0xEC 0x9C 0xBD..0xFF #Lo [27] HANGUL SYLLABLE EUG..HANGUL SYLLAB...
1323 | 0xEC 0x9D 0x00..0x97 #
1324 | 0xEC 0x9D 0x99..0xB3 #Lo [27] HANGUL SYLLABLE YIG..HANGUL SYLLAB...
1325 | 0xEC 0x9D 0xB5..0xFF #Lo [27] HANGUL SYLLABLE IG..HANGUL SYLLABL...
1326 | 0xEC 0x9E 0x00..0x8F #
1327 | 0xEC 0x9E 0x91..0xAB #Lo [27] HANGUL SYLLABLE JAG..HANGUL SYLLAB...
1328 | 0xEC 0x9E 0xAD..0xFF #Lo [27] HANGUL SYLLABLE JAEG..HANGUL SYLLA...
1329 | 0xEC 0x9F 0x00..0x87 #
1330 | 0xEC 0x9F 0x89..0xA3 #Lo [27] HANGUL SYLLABLE JYAG..HANGUL SYLLA...
1331 | 0xEC 0x9F 0xA5..0xBF #Lo [27] HANGUL SYLLABLE JYAEG..HANGUL SYLL...
1332 | 0xEC 0xA0 0x81..0x9B #Lo [27] HANGUL SYLLABLE JEOG..HANGUL SYLLA...
1333 | 0xEC 0xA0 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE JEG..HANGUL SYLLAB...
1334 | 0xEC 0xA0 0xB9..0xFF #Lo [27] HANGUL SYLLABLE JYEOG..HANGUL SYLL...
1335 | 0xEC 0xA1 0x00..0x93 #
1336 | 0xEC 0xA1 0x95..0xAF #Lo [27] HANGUL SYLLABLE JYEG..HANGUL SYLLA...
1337 | 0xEC 0xA1 0xB1..0xFF #Lo [27] HANGUL SYLLABLE JOG..HANGUL SYLLAB...
1338 | 0xEC 0xA2 0x00..0x8B #
1339 | 0xEC 0xA2 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE JWAG..HANGUL SYLLA...
1340 | 0xEC 0xA2 0xA9..0xFF #Lo [27] HANGUL SYLLABLE JWAEG..HANGUL SYLL...
1341 | 0xEC 0xA3 0x00..0x83 #
1342 | 0xEC 0xA3 0x85..0x9F #Lo [27] HANGUL SYLLABLE JOEG..HANGUL SYLLA...
1343 | 0xEC 0xA3 0xA1..0xBB #Lo [27] HANGUL SYLLABLE JYOG..HANGUL SYLLA...
1344 | 0xEC 0xA3 0xBD..0xFF #Lo [27] HANGUL SYLLABLE JUG..HANGUL SYLLAB...
1345 | 0xEC 0xA4 0x00..0x97 #
1346 | 0xEC 0xA4 0x99..0xB3 #Lo [27] HANGUL SYLLABLE JWEOG..HANGUL SYLL...
1347 | 0xEC 0xA4 0xB5..0xFF #Lo [27] HANGUL SYLLABLE JWEG..HANGUL SYLLA...
1348 | 0xEC 0xA5 0x00..0x8F #
1349 | 0xEC 0xA5 0x91..0xAB #Lo [27] HANGUL SYLLABLE JWIG..HANGUL SYLLA...
1350 | 0xEC 0xA5 0xAD..0xFF #Lo [27] HANGUL SYLLABLE JYUG..HANGUL SYLLA...
1351 | 0xEC 0xA6 0x00..0x87 #
1352 | 0xEC 0xA6 0x89..0xA3 #Lo [27] HANGUL SYLLABLE JEUG..HANGUL SYLLA...
1353 | 0xEC 0xA6 0xA5..0xBF #Lo [27] HANGUL SYLLABLE JYIG..HANGUL SYLLA...
1354 | 0xEC 0xA7 0x81..0x9B #Lo [27] HANGUL SYLLABLE JIG..HANGUL SYLLAB...
1355 | 0xEC 0xA7 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE JJAG..HANGUL SYLLA...
1356 | 0xEC 0xA7 0xB9..0xFF #Lo [27] HANGUL SYLLABLE JJAEG..HANGUL SYLL...
1357 | 0xEC 0xA8 0x00..0x93 #
1358 | 0xEC 0xA8 0x95..0xAF #Lo [27] HANGUL SYLLABLE JJYAG..HANGUL SYLL...
1359 | 0xEC 0xA8 0xB1..0xFF #Lo [27] HANGUL SYLLABLE JJYAEG..HANGUL SYL...
1360 | 0xEC 0xA9 0x00..0x8B #
1361 | 0xEC 0xA9 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE JJEOG..HANGUL SYLL...
1362 | 0xEC 0xA9 0xA9..0xFF #Lo [27] HANGUL SYLLABLE JJEG..HANGUL SYLLA...
1363 | 0xEC 0xAA 0x00..0x83 #
1364 | 0xEC 0xAA 0x85..0x9F #Lo [27] HANGUL SYLLABLE JJYEOG..HANGUL SYL...
1365 | 0xEC 0xAA 0xA1..0xBB #Lo [27] HANGUL SYLLABLE JJYEG..HANGUL SYLL...
1366 | 0xEC 0xAA 0xBD..0xFF #Lo [27] HANGUL SYLLABLE JJOG..HANGUL SYLLA...
1367 | 0xEC 0xAB 0x00..0x97 #
1368 | 0xEC 0xAB 0x99..0xB3 #Lo [27] HANGUL SYLLABLE JJWAG..HANGUL SYLL...
1369 | 0xEC 0xAB 0xB5..0xFF #Lo [27] HANGUL SYLLABLE JJWAEG..HANGUL SYL...
1370 | 0xEC 0xAC 0x00..0x8F #
1371 | 0xEC 0xAC 0x91..0xAB #Lo [27] HANGUL SYLLABLE JJOEG..HANGUL SYLL...
1372 | 0xEC 0xAC 0xAD..0xFF #Lo [27] HANGUL SYLLABLE JJYOG..HANGUL SYLL...
1373 | 0xEC 0xAD 0x00..0x87 #
1374 | 0xEC 0xAD 0x89..0xA3 #Lo [27] HANGUL SYLLABLE JJUG..HANGUL SYLLA...
1375 | 0xEC 0xAD 0xA5..0xBF #Lo [27] HANGUL SYLLABLE JJWEOG..HANGUL SYL...
1376 | 0xEC 0xAE 0x81..0x9B #Lo [27] HANGUL SYLLABLE JJWEG..HANGUL SYLL...
1377 | 0xEC 0xAE 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE JJWIG..HANGUL SYLL...
1378 | 0xEC 0xAE 0xB9..0xFF #Lo [27] HANGUL SYLLABLE JJYUG..HANGUL SYLL...
1379 | 0xEC 0xAF 0x00..0x93 #
1380 | 0xEC 0xAF 0x95..0xAF #Lo [27] HANGUL SYLLABLE JJEUG..HANGUL SYLL...
1381 | 0xEC 0xAF 0xB1..0xFF #Lo [27] HANGUL SYLLABLE JJYIG..HANGUL SYLL...
1382 | 0xEC 0xB0 0x00..0x8B #
1383 | 0xEC 0xB0 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE JJIG..HANGUL SYLLA...
1384 | 0xEC 0xB0 0xA9..0xFF #Lo [27] HANGUL SYLLABLE CAG..HANGUL SYLLAB...
1385 | 0xEC 0xB1 0x00..0x83 #
1386 | 0xEC 0xB1 0x85..0x9F #Lo [27] HANGUL SYLLABLE CAEG..HANGUL SYLLA...
1387 | 0xEC 0xB1 0xA1..0xBB #Lo [27] HANGUL SYLLABLE CYAG..HANGUL SYLLA...
1388 | 0xEC 0xB1 0xBD..0xFF #Lo [27] HANGUL SYLLABLE CYAEG..HANGUL SYLL...
1389 | 0xEC 0xB2 0x00..0x97 #
1390 | 0xEC 0xB2 0x99..0xB3 #Lo [27] HANGUL SYLLABLE CEOG..HANGUL SYLLA...
1391 | 0xEC 0xB2 0xB5..0xFF #Lo [27] HANGUL SYLLABLE CEG..HANGUL SYLLAB...
1392 | 0xEC 0xB3 0x00..0x8F #
1393 | 0xEC 0xB3 0x91..0xAB #Lo [27] HANGUL SYLLABLE CYEOG..HANGUL SYLL...
1394 | 0xEC 0xB3 0xAD..0xFF #Lo [27] HANGUL SYLLABLE CYEG..HANGUL SYLLA...
1395 | 0xEC 0xB4 0x00..0x87 #
1396 | 0xEC 0xB4 0x89..0xA3 #Lo [27] HANGUL SYLLABLE COG..HANGUL SYLLAB...
1397 | 0xEC 0xB4 0xA5..0xBF #Lo [27] HANGUL SYLLABLE CWAG..HANGUL SYLLA...
1398 | 0xEC 0xB5 0x81..0x9B #Lo [27] HANGUL SYLLABLE CWAEG..HANGUL SYLL...
1399 | 0xEC 0xB5 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE COEG..HANGUL SYLLA...
1400 | 0xEC 0xB5 0xB9..0xFF #Lo [27] HANGUL SYLLABLE CYOG..HANGUL SYLLA...
1401 | 0xEC 0xB6 0x00..0x93 #
1402 | 0xEC 0xB6 0x95..0xAF #Lo [27] HANGUL SYLLABLE CUG..HANGUL SYLLAB...
1403 | 0xEC 0xB6 0xB1..0xFF #Lo [27] HANGUL SYLLABLE CWEOG..HANGUL SYLL...
1404 | 0xEC 0xB7 0x00..0x8B #
1405 | 0xEC 0xB7 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE CWEG..HANGUL SYLLA...
1406 | 0xEC 0xB7 0xA9..0xFF #Lo [27] HANGUL SYLLABLE CWIG..HANGUL SYLLA...
1407 | 0xEC 0xB8 0x00..0x83 #
1408 | 0xEC 0xB8 0x85..0x9F #Lo [27] HANGUL SYLLABLE CYUG..HANGUL SYLLA...
1409 | 0xEC 0xB8 0xA1..0xBB #Lo [27] HANGUL SYLLABLE CEUG..HANGUL SYLLA...
1410 | 0xEC 0xB8 0xBD..0xFF #Lo [27] HANGUL SYLLABLE CYIG..HANGUL SYLLA...
1411 | 0xEC 0xB9 0x00..0x97 #
1412 | 0xEC 0xB9 0x99..0xB3 #Lo [27] HANGUL SYLLABLE CIG..HANGUL SYLLAB...
1413 | 0xEC 0xB9 0xB5..0xFF #Lo [27] HANGUL SYLLABLE KAG..HANGUL SYLLAB...
1414 | 0xEC 0xBA 0x00..0x8F #
1415 | 0xEC 0xBA 0x91..0xAB #Lo [27] HANGUL SYLLABLE KAEG..HANGUL SYLLA...
1416 | 0xEC 0xBA 0xAD..0xFF #Lo [27] HANGUL SYLLABLE KYAG..HANGUL SYLLA...
1417 | 0xEC 0xBB 0x00..0x87 #
1418 | 0xEC 0xBB 0x89..0xA3 #Lo [27] HANGUL SYLLABLE KYAEG..HANGUL SYLL...
1419 | 0xEC 0xBB 0xA5..0xBF #Lo [27] HANGUL SYLLABLE KEOG..HANGUL SYLLA...
1420 | 0xEC 0xBC 0x81..0x9B #Lo [27] HANGUL SYLLABLE KEG..HANGUL SYLLAB...
1421 | 0xEC 0xBC 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE KYEOG..HANGUL SYLL...
1422 | 0xEC 0xBC 0xB9..0xFF #Lo [27] HANGUL SYLLABLE KYEG..HANGUL SYLLA...
1423 | 0xEC 0xBD 0x00..0x93 #
1424 | 0xEC 0xBD 0x95..0xAF #Lo [27] HANGUL SYLLABLE KOG..HANGUL SYLLAB...
1425 | 0xEC 0xBD 0xB1..0xFF #Lo [27] HANGUL SYLLABLE KWAG..HANGUL SYLLA...
1426 | 0xEC 0xBE 0x00..0x8B #
1427 | 0xEC 0xBE 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE KWAEG..HANGUL SYLL...
1428 | 0xEC 0xBE 0xA9..0xFF #Lo [27] HANGUL SYLLABLE KOEG..HANGUL SYLLA...
1429 | 0xEC 0xBF 0x00..0x83 #
1430 | 0xEC 0xBF 0x85..0x9F #Lo [27] HANGUL SYLLABLE KYOG..HANGUL SYLLA...
1431 | 0xEC 0xBF 0xA1..0xBB #Lo [27] HANGUL SYLLABLE KUG..HANGUL SYLLAB...
1432 | 0xEC 0xBF 0xBD..0xFF #Lo [27] HANGUL SYLLABLE KWEOG..HANGUL ...
1433 | 0xEC 0xC0..0xFF 0x00..0xFF #
1434 | 0xED 0x00 0x00..0xFF #
1435 | 0xED 0x01..0x7F 0x00..0xFF #
1436 | 0xED 0x80 0x00..0x97 #
1437 | 0xED 0x80 0x99..0xB3 #Lo [27] HANGUL SYLLABLE KWEG..HANGUL SYLLA...
1438 | 0xED 0x80 0xB5..0xFF #Lo [27] HANGUL SYLLABLE KWIG..HANGUL SYLLA...
1439 | 0xED 0x81 0x00..0x8F #
1440 | 0xED 0x81 0x91..0xAB #Lo [27] HANGUL SYLLABLE KYUG..HANGUL SYLLA...
1441 | 0xED 0x81 0xAD..0xFF #Lo [27] HANGUL SYLLABLE KEUG..HANGUL SYLLA...
1442 | 0xED 0x82 0x00..0x87 #
1443 | 0xED 0x82 0x89..0xA3 #Lo [27] HANGUL SYLLABLE KYIG..HANGUL SYLLA...
1444 | 0xED 0x82 0xA5..0xBF #Lo [27] HANGUL SYLLABLE KIG..HANGUL SYLLAB...
1445 | 0xED 0x83 0x81..0x9B #Lo [27] HANGUL SYLLABLE TAG..HANGUL SYLLAB...
1446 | 0xED 0x83 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE TAEG..HANGUL SYLLA...
1447 | 0xED 0x83 0xB9..0xFF #Lo [27] HANGUL SYLLABLE TYAG..HANGUL SYLLA...
1448 | 0xED 0x84 0x00..0x93 #
1449 | 0xED 0x84 0x95..0xAF #Lo [27] HANGUL SYLLABLE TYAEG..HANGUL SYLL...
1450 | 0xED 0x84 0xB1..0xFF #Lo [27] HANGUL SYLLABLE TEOG..HANGUL SYLLA...
1451 | 0xED 0x85 0x00..0x8B #
1452 | 0xED 0x85 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE TEG..HANGUL SYLLAB...
1453 | 0xED 0x85 0xA9..0xFF #Lo [27] HANGUL SYLLABLE TYEOG..HANGUL SYLL...
1454 | 0xED 0x86 0x00..0x83 #
1455 | 0xED 0x86 0x85..0x9F #Lo [27] HANGUL SYLLABLE TYEG..HANGUL SYLLA...
1456 | 0xED 0x86 0xA1..0xBB #Lo [27] HANGUL SYLLABLE TOG..HANGUL SYLLAB...
1457 | 0xED 0x86 0xBD..0xFF #Lo [27] HANGUL SYLLABLE TWAG..HANGUL SYLLA...
1458 | 0xED 0x87 0x00..0x97 #
1459 | 0xED 0x87 0x99..0xB3 #Lo [27] HANGUL SYLLABLE TWAEG..HANGUL SYLL...
1460 | 0xED 0x87 0xB5..0xFF #Lo [27] HANGUL SYLLABLE TOEG..HANGUL SYLLA...
1461 | 0xED 0x88 0x00..0x8F #
1462 | 0xED 0x88 0x91..0xAB #Lo [27] HANGUL SYLLABLE TYOG..HANGUL SYLLA...
1463 | 0xED 0x88 0xAD..0xFF #Lo [27] HANGUL SYLLABLE TUG..HANGUL SYLLAB...
1464 | 0xED 0x89 0x00..0x87 #
1465 | 0xED 0x89 0x89..0xA3 #Lo [27] HANGUL SYLLABLE TWEOG..HANGUL SYLL...
1466 | 0xED 0x89 0xA5..0xBF #Lo [27] HANGUL SYLLABLE TWEG..HANGUL SYLLA...
1467 | 0xED 0x8A 0x81..0x9B #Lo [27] HANGUL SYLLABLE TWIG..HANGUL SYLLA...
1468 | 0xED 0x8A 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE TYUG..HANGUL SYLLA...
1469 | 0xED 0x8A 0xB9..0xFF #Lo [27] HANGUL SYLLABLE TEUG..HANGUL SYLLA...
1470 | 0xED 0x8B 0x00..0x93 #
1471 | 0xED 0x8B 0x95..0xAF #Lo [27] HANGUL SYLLABLE TYIG..HANGUL SYLLA...
1472 | 0xED 0x8B 0xB1..0xFF #Lo [27] HANGUL SYLLABLE TIG..HANGUL SYLLAB...
1473 | 0xED 0x8C 0x00..0x8B #
1474 | 0xED 0x8C 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE PAG..HANGUL SYLLAB...
1475 | 0xED 0x8C 0xA9..0xFF #Lo [27] HANGUL SYLLABLE PAEG..HANGUL SYLLA...
1476 | 0xED 0x8D 0x00..0x83 #
1477 | 0xED 0x8D 0x85..0x9F #Lo [27] HANGUL SYLLABLE PYAG..HANGUL SYLLA...
1478 | 0xED 0x8D 0xA1..0xBB #Lo [27] HANGUL SYLLABLE PYAEG..HANGUL SYLL...
1479 | 0xED 0x8D 0xBD..0xFF #Lo [27] HANGUL SYLLABLE PEOG..HANGUL SYLLA...
1480 | 0xED 0x8E 0x00..0x97 #
1481 | 0xED 0x8E 0x99..0xB3 #Lo [27] HANGUL SYLLABLE PEG..HANGUL SYLLAB...
1482 | 0xED 0x8E 0xB5..0xFF #Lo [27] HANGUL SYLLABLE PYEOG..HANGUL SYLL...
1483 | 0xED 0x8F 0x00..0x8F #
1484 | 0xED 0x8F 0x91..0xAB #Lo [27] HANGUL SYLLABLE PYEG..HANGUL SYLLA...
1485 | 0xED 0x8F 0xAD..0xFF #Lo [27] HANGUL SYLLABLE POG..HANGUL SYLLAB...
1486 | 0xED 0x90 0x00..0x87 #
1487 | 0xED 0x90 0x89..0xA3 #Lo [27] HANGUL SYLLABLE PWAG..HANGUL SYLLA...
1488 | 0xED 0x90 0xA5..0xBF #Lo [27] HANGUL SYLLABLE PWAEG..HANGUL SYLL...
1489 | 0xED 0x91 0x81..0x9B #Lo [27] HANGUL SYLLABLE POEG..HANGUL SYLLA...
1490 | 0xED 0x91 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE PYOG..HANGUL SYLLA...
1491 | 0xED 0x91 0xB9..0xFF #Lo [27] HANGUL SYLLABLE PUG..HANGUL SYLLAB...
1492 | 0xED 0x92 0x00..0x93 #
1493 | 0xED 0x92 0x95..0xAF #Lo [27] HANGUL SYLLABLE PWEOG..HANGUL SYLL...
1494 | 0xED 0x92 0xB1..0xFF #Lo [27] HANGUL SYLLABLE PWEG..HANGUL SYLLA...
1495 | 0xED 0x93 0x00..0x8B #
1496 | 0xED 0x93 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE PWIG..HANGUL SYLLA...
1497 | 0xED 0x93 0xA9..0xFF #Lo [27] HANGUL SYLLABLE PYUG..HANGUL SYLLA...
1498 | 0xED 0x94 0x00..0x83 #
1499 | 0xED 0x94 0x85..0x9F #Lo [27] HANGUL SYLLABLE PEUG..HANGUL SYLLA...
1500 | 0xED 0x94 0xA1..0xBB #Lo [27] HANGUL SYLLABLE PYIG..HANGUL SYLLA...
1501 | 0xED 0x94 0xBD..0xFF #Lo [27] HANGUL SYLLABLE PIG..HANGUL SYLLAB...
1502 | 0xED 0x95 0x00..0x97 #
1503 | 0xED 0x95 0x99..0xB3 #Lo [27] HANGUL SYLLABLE HAG..HANGUL SYLLAB...
1504 | 0xED 0x95 0xB5..0xFF #Lo [27] HANGUL SYLLABLE HAEG..HANGUL SYLLA...
1505 | 0xED 0x96 0x00..0x8F #
1506 | 0xED 0x96 0x91..0xAB #Lo [27] HANGUL SYLLABLE HYAG..HANGUL SYLLA...
1507 | 0xED 0x96 0xAD..0xFF #Lo [27] HANGUL SYLLABLE HYAEG..HANGUL SYLL...
1508 | 0xED 0x97 0x00..0x87 #
1509 | 0xED 0x97 0x89..0xA3 #Lo [27] HANGUL SYLLABLE HEOG..HANGUL SYLLA...
1510 | 0xED 0x97 0xA5..0xBF #Lo [27] HANGUL SYLLABLE HEG..HANGUL SYLLAB...
1511 | 0xED 0x98 0x81..0x9B #Lo [27] HANGUL SYLLABLE HYEOG..HANGUL SYLL...
1512 | 0xED 0x98 0x9D..0xB7 #Lo [27] HANGUL SYLLABLE HYEG..HANGUL SYLLA...
1513 | 0xED 0x98 0xB9..0xFF #Lo [27] HANGUL SYLLABLE HOG..HANGUL SYLLAB...
1514 | 0xED 0x99 0x00..0x93 #
1515 | 0xED 0x99 0x95..0xAF #Lo [27] HANGUL SYLLABLE HWAG..HANGUL SYLLA...
1516 | 0xED 0x99 0xB1..0xFF #Lo [27] HANGUL SYLLABLE HWAEG..HANGUL SYLL...
1517 | 0xED 0x9A 0x00..0x8B #
1518 | 0xED 0x9A 0x8D..0xA7 #Lo [27] HANGUL SYLLABLE HOEG..HANGUL SYLLA...
1519 | 0xED 0x9A 0xA9..0xFF #Lo [27] HANGUL SYLLABLE HYOG..HANGUL SYLLA...
1520 | 0xED 0x9B 0x00..0x83 #
1521 | 0xED 0x9B 0x85..0x9F #Lo [27] HANGUL SYLLABLE HUG..HANGUL SYLLAB...
1522 | 0xED 0x9B 0xA1..0xBB #Lo [27] HANGUL SYLLABLE HWEOG..HANGUL SYLL...
1523 | 0xED 0x9B 0xBD..0xFF #Lo [27] HANGUL SYLLABLE HWEG..HANGUL SYLLA...
1524 | 0xED 0x9C 0x00..0x97 #
1525 | 0xED 0x9C 0x99..0xB3 #Lo [27] HANGUL SYLLABLE HWIG..HANGUL SYLLA...
1526 | 0xED 0x9C 0xB5..0xFF #Lo [27] HANGUL SYLLABLE HYUG..HANGUL SYLLA...
1527 | 0xED 0x9D 0x00..0x8F #
1528 | 0xED 0x9D 0x91..0xAB #Lo [27] HANGUL SYLLABLE HEUG..HANGUL SYLLA...
1529 | 0xED 0x9D 0xAD..0xFF #Lo [27] HANGUL SYLLABLE HYIG..HANGUL SYLLA...
1530 | 0xED 0x9E 0x00..0x87 #
1531 | 0xED 0x9E 0x89..0xA3 #Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLAB...
1532 ;
1533
1534 E_Base =
1535 0xE2 0x98 0x9D #So WHITE UP POINTING INDEX
1536 | 0xE2 0x9B 0xB9 #So PERSON WITH BALL
1537 | 0xE2 0x9C 0x8A..0x8D #So [4] RAISED FIST..WRITING HAND
1538 | 0xF0 0x9F 0x8E 0x85 #So FATHER CHRISTMAS
1539 | 0xF0 0x9F 0x8F 0x83..0x84 #So [2] RUNNER..SURFER
1540 | 0xF0 0x9F 0x8F 0x8A..0x8B #So [2] SWIMMER..WEIGHT LIFTER
1541 | 0xF0 0x9F 0x91 0x82..0x83 #So [2] EAR..NOSE
1542 | 0xF0 0x9F 0x91 0x86..0x90 #So [11] WHITE UP POINTING BACKHAND INDE...
1543 | 0xF0 0x9F 0x91 0xAE #So POLICE OFFICER
1544 | 0xF0 0x9F 0x91 0xB0..0xB8 #So [9] BRIDE WITH VEIL..PRINCESS
1545 | 0xF0 0x9F 0x91 0xBC #So BABY ANGEL
1546 | 0xF0 0x9F 0x92 0x81..0x83 #So [3] INFORMATION DESK PERSON..DANCER
1547 | 0xF0 0x9F 0x92 0x85..0x87 #So [3] NAIL POLISH..HAIRCUT
1548 | 0xF0 0x9F 0x92 0xAA #So FLEXED BICEPS
1549 | 0xF0 0x9F 0x95 0xB5 #So SLEUTH OR SPY
1550 | 0xF0 0x9F 0x95 0xBA #So MAN DANCING
1551 | 0xF0 0x9F 0x96 0x90 #So RAISED HAND WITH FINGERS SPLAYED
1552 | 0xF0 0x9F 0x96 0x95..0x96 #So [2] REVERSED HAND WITH MIDDLE FINGE...
1553 | 0xF0 0x9F 0x99 0x85..0x87 #So [3] FACE WITH NO GOOD GESTURE..PERS...
1554 | 0xF0 0x9F 0x99 0x8B..0x8F #So [5] HAPPY PERSON RAISING ONE HAND.....
1555 | 0xF0 0x9F 0x9A 0xA3 #So ROWBOAT
1556 | 0xF0 0x9F 0x9A 0xB4..0xB6 #So [3] BICYCLIST..PEDESTRIAN
1557 | 0xF0 0x9F 0x9B 0x80 #So BATH
1558 | 0xF0 0x9F 0xA4 0x98..0x9E #So [7] SIGN OF THE HORNS..HAND WITH IN...
1559 | 0xF0 0x9F 0xA4 0xA6 #So FACE PALM
1560 | 0xF0 0x9F 0xA4 0xB0 #So PREGNANT WOMAN
1561 | 0xF0 0x9F 0xA4 0xB3..0xB9 #So [7] SELFIE..JUGGLING
1562 | 0xF0 0x9F 0xA4 0xBC..0xBE #So [3] WRESTLERS..HANDBALL
1563 ;
1564
1565 E_Modifier =
1566 0xF0 0x9F 0x8F 0xBB..0xBF #Sk [5] EMOJI MODIFIER FITZPATRICK TYPE...
1567 ;
1568
1569 ZWJ =
1570 0xE2 0x80 0x8D #Cf ZERO WIDTH JOINER
1571 ;
1572
1573 Glue_After_Zwj =
1574 0xE2 0x9D 0xA4 #So HEAVY BLACK HEART
1575 | 0xF0 0x9F 0x92 0x8B #So KISS MARK
1576 | 0xF0 0x9F 0x97 0xA8 #So LEFT SPEECH BUBBLE
1577 ;
1578
1579 E_Base_GAZ =
1580 0xF0 0x9F 0x91 0xA6..0xA9 #So [4] BOY..WOMAN
1581 ;
1582
1583}%%
diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/make_tables.go b/vendor/github.com/apparentlymart/go-textseg/textseg/make_tables.go
new file mode 100644
index 0000000..aad3d05
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-textseg/textseg/make_tables.go
@@ -0,0 +1,307 @@
1// Copyright (c) 2014 Couchbase, Inc.
2// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
3// except in compliance with the License. You may obtain a copy of the License at
4// http://www.apache.org/licenses/LICENSE-2.0
5// Unless required by applicable law or agreed to in writing, software distributed under the
6// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
7// either express or implied. See the License for the specific language governing permissions
8// and limitations under the License.
9
10// Modified by Martin Atkins to serve the needs of package textseg.
11
12// +build ignore
13
14package main
15
16import (
17 "bufio"
18 "flag"
19 "fmt"
20 "io"
21 "log"
22 "net/http"
23 "os"
24 "os/exec"
25 "sort"
26 "strconv"
27 "strings"
28 "unicode"
29)
30
31var url = flag.String("url",
32 "http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/",
33 "URL of Unicode database directory")
34var verbose = flag.Bool("verbose",
35 false,
36 "write data to stdout as it is parsed")
37var localFiles = flag.Bool("local",
38 false,
39 "data files have been copied to the current directory; for debugging only")
40var outputFile = flag.String("output",
41 "",
42 "output file for generated tables; default stdout")
43
44var output *bufio.Writer
45
46func main() {
47 flag.Parse()
48 setupOutput()
49
50 graphemePropertyRanges := make(map[string]*unicode.RangeTable)
51 loadUnicodeData("GraphemeBreakProperty.txt", graphemePropertyRanges)
52 wordPropertyRanges := make(map[string]*unicode.RangeTable)
53 loadUnicodeData("WordBreakProperty.txt", wordPropertyRanges)
54 sentencePropertyRanges := make(map[string]*unicode.RangeTable)
55 loadUnicodeData("SentenceBreakProperty.txt", sentencePropertyRanges)
56
57 fmt.Fprintf(output, fileHeader, *url)
58 generateTables("Grapheme", graphemePropertyRanges)
59 generateTables("Word", wordPropertyRanges)
60 generateTables("Sentence", sentencePropertyRanges)
61
62 flushOutput()
63}
64
65// WordBreakProperty.txt has the form:
66// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD
67// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ
68func openReader(file string) (input io.ReadCloser) {
69 if *localFiles {
70 f, err := os.Open(file)
71 if err != nil {
72 log.Fatal(err)
73 }
74 input = f
75 } else {
76 path := *url + file
77 resp, err := http.Get(path)
78 if err != nil {
79 log.Fatal(err)
80 }
81 if resp.StatusCode != 200 {
82 log.Fatal("bad GET status for "+file, resp.Status)
83 }
84 input = resp.Body
85 }
86 return
87}
88
89func loadUnicodeData(filename string, propertyRanges map[string]*unicode.RangeTable) {
90 f := openReader(filename)
91 defer f.Close()
92 bufioReader := bufio.NewReader(f)
93 line, err := bufioReader.ReadString('\n')
94 for err == nil {
95 parseLine(line, propertyRanges)
96 line, err = bufioReader.ReadString('\n')
97 }
98 // if the err was EOF still need to process last value
99 if err == io.EOF {
100 parseLine(line, propertyRanges)
101 }
102}
103
104const comment = "#"
105const sep = ";"
106const rnge = ".."
107
108func parseLine(line string, propertyRanges map[string]*unicode.RangeTable) {
109 if strings.HasPrefix(line, comment) {
110 return
111 }
112 line = strings.TrimSpace(line)
113 if len(line) == 0 {
114 return
115 }
116 commentStart := strings.Index(line, comment)
117 if commentStart > 0 {
118 line = line[0:commentStart]
119 }
120 pieces := strings.Split(line, sep)
121 if len(pieces) != 2 {
122 log.Printf("unexpected %d pieces in %s", len(pieces), line)
123 return
124 }
125
126 propertyName := strings.TrimSpace(pieces[1])
127
128 rangeTable, ok := propertyRanges[propertyName]
129 if !ok {
130 rangeTable = &unicode.RangeTable{
131 LatinOffset: 0,
132 }
133 propertyRanges[propertyName] = rangeTable
134 }
135
136 codepointRange := strings.TrimSpace(pieces[0])
137 rngeIndex := strings.Index(codepointRange, rnge)
138
139 if rngeIndex < 0 {
140 // single codepoint, not range
141 codepointInt, err := strconv.ParseUint(codepointRange, 16, 64)
142 if err != nil {
143 log.Printf("error parsing int: %v", err)
144 return
145 }
146 if codepointInt < 0x10000 {
147 r16 := unicode.Range16{
148 Lo: uint16(codepointInt),
149 Hi: uint16(codepointInt),
150 Stride: 1,
151 }
152 addR16ToTable(rangeTable, r16)
153 } else {
154 r32 := unicode.Range32{
155 Lo: uint32(codepointInt),
156 Hi: uint32(codepointInt),
157 Stride: 1,
158 }
159 addR32ToTable(rangeTable, r32)
160 }
161 } else {
162 rngeStart := codepointRange[0:rngeIndex]
163 rngeEnd := codepointRange[rngeIndex+2:]
164 rngeStartInt, err := strconv.ParseUint(rngeStart, 16, 64)
165 if err != nil {
166 log.Printf("error parsing int: %v", err)
167 return
168 }
169 rngeEndInt, err := strconv.ParseUint(rngeEnd, 16, 64)
170 if err != nil {
171 log.Printf("error parsing int: %v", err)
172 return
173 }
174 if rngeStartInt < 0x10000 && rngeEndInt < 0x10000 {
175 r16 := unicode.Range16{
176 Lo: uint16(rngeStartInt),
177 Hi: uint16(rngeEndInt),
178 Stride: 1,
179 }
180 addR16ToTable(rangeTable, r16)
181 } else if rngeStartInt >= 0x10000 && rngeEndInt >= 0x10000 {
182 r32 := unicode.Range32{
183 Lo: uint32(rngeStartInt),
184 Hi: uint32(rngeEndInt),
185 Stride: 1,
186 }
187 addR32ToTable(rangeTable, r32)
188 } else {
189 log.Printf("unexpected range")
190 }
191 }
192}
193
194func addR16ToTable(r *unicode.RangeTable, r16 unicode.Range16) {
195 if r.R16 == nil {
196 r.R16 = make([]unicode.Range16, 0, 1)
197 }
198 r.R16 = append(r.R16, r16)
199 if r16.Hi <= unicode.MaxLatin1 {
200 r.LatinOffset++
201 }
202}
203
204func addR32ToTable(r *unicode.RangeTable, r32 unicode.Range32) {
205 if r.R32 == nil {
206 r.R32 = make([]unicode.Range32, 0, 1)
207 }
208 r.R32 = append(r.R32, r32)
209}
210
211func generateTables(prefix string, propertyRanges map[string]*unicode.RangeTable) {
212 prNames := make([]string, 0, len(propertyRanges))
213 for k := range propertyRanges {
214 prNames = append(prNames, k)
215 }
216 sort.Strings(prNames)
217 for _, key := range prNames {
218 rt := propertyRanges[key]
219 fmt.Fprintf(output, "var _%s%s = %s\n", prefix, key, generateRangeTable(rt))
220 }
221 fmt.Fprintf(output, "type _%sRuneRange unicode.RangeTable\n", prefix)
222
223 fmt.Fprintf(output, "func _%sRuneType(r rune) *_%sRuneRange {\n", prefix, prefix)
224 fmt.Fprintf(output, "\tswitch {\n")
225 for _, key := range prNames {
226 fmt.Fprintf(output, "\tcase unicode.Is(_%s%s, r):\n\t\treturn (*_%sRuneRange)(_%s%s)\n", prefix, key, prefix, prefix, key)
227 }
228 fmt.Fprintf(output, "\tdefault:\n\t\treturn nil\n")
229 fmt.Fprintf(output, "\t}\n")
230 fmt.Fprintf(output, "}\n")
231
232 fmt.Fprintf(output, "func (rng *_%sRuneRange) String() string {\n", prefix)
233 fmt.Fprintf(output, "\tswitch (*unicode.RangeTable)(rng) {\n")
234 for _, key := range prNames {
235 fmt.Fprintf(output, "\tcase _%s%s:\n\t\treturn %q\n", prefix, key, key)
236 }
237 fmt.Fprintf(output, "\tdefault:\n\t\treturn \"Other\"\n")
238 fmt.Fprintf(output, "\t}\n")
239 fmt.Fprintf(output, "}\n")
240}
241
242func generateRangeTable(rt *unicode.RangeTable) string {
243 rv := "&unicode.RangeTable{\n"
244 if rt.R16 != nil {
245 rv += "\tR16: []unicode.Range16{\n"
246 for _, r16 := range rt.R16 {
247 rv += fmt.Sprintf("\t\t%#v,\n", r16)
248 }
249 rv += "\t},\n"
250 }
251 if rt.R32 != nil {
252 rv += "\tR32: []unicode.Range32{\n"
253 for _, r32 := range rt.R32 {
254 rv += fmt.Sprintf("\t\t%#v,\n", r32)
255 }
256 rv += "\t},\n"
257 }
258 rv += fmt.Sprintf("\t\tLatinOffset: %d,\n", rt.LatinOffset)
259 rv += "}\n"
260 return rv
261}
262
263const fileHeader = `// Generated by running
264// maketables --url=%s
265// DO NOT EDIT
266
267package textseg
268
269import(
270 "unicode"
271)
272`
273
274func setupOutput() {
275 output = bufio.NewWriter(startGofmt())
276}
277
278// startGofmt connects output to a gofmt process if -output is set.
279func startGofmt() io.Writer {
280 if *outputFile == "" {
281 return os.Stdout
282 }
283 stdout, err := os.Create(*outputFile)
284 if err != nil {
285 log.Fatal(err)
286 }
287 // Pipe output to gofmt.
288 gofmt := exec.Command("gofmt")
289 fd, err := gofmt.StdinPipe()
290 if err != nil {
291 log.Fatal(err)
292 }
293 gofmt.Stdout = stdout
294 gofmt.Stderr = os.Stderr
295 err = gofmt.Start()
296 if err != nil {
297 log.Fatal(err)
298 }
299 return fd
300}
301
302func flushOutput() {
303 err := output.Flush()
304 if err != nil {
305 log.Fatal(err)
306 }
307}
diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go b/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go
new file mode 100644
index 0000000..ac42002
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go
@@ -0,0 +1,212 @@
1// Copyright (c) 2014 Couchbase, Inc.
2// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
3// except in compliance with the License. You may obtain a copy of the License at
4// http://www.apache.org/licenses/LICENSE-2.0
5// Unless required by applicable law or agreed to in writing, software distributed under the
6// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
7// either express or implied. See the License for the specific language governing permissions
8// and limitations under the License.
9
10// +build ignore
11
12package main
13
14import (
15 "bufio"
16 "bytes"
17 "flag"
18 "fmt"
19 "io"
20 "log"
21 "net/http"
22 "os"
23 "os/exec"
24 "strconv"
25 "strings"
26 "unicode"
27)
28
29var url = flag.String("url",
30 "http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/",
31 "URL of Unicode database directory")
32var verbose = flag.Bool("verbose",
33 false,
34 "write data to stdout as it is parsed")
35var localFiles = flag.Bool("local",
36 false,
37 "data files have been copied to the current directory; for debugging only")
38
39var outputFile = flag.String("output",
40 "",
41 "output file for generated tables; default stdout")
42
43var output *bufio.Writer
44
45func main() {
46 flag.Parse()
47 setupOutput()
48
49 graphemeTests := make([]test, 0)
50 graphemeTests = loadUnicodeData("GraphemeBreakTest.txt", graphemeTests)
51 wordTests := make([]test, 0)
52 wordTests = loadUnicodeData("WordBreakTest.txt", wordTests)
53 sentenceTests := make([]test, 0)
54 sentenceTests = loadUnicodeData("SentenceBreakTest.txt", sentenceTests)
55
56 fmt.Fprintf(output, fileHeader, *url)
57 generateTestTables("Grapheme", graphemeTests)
58 generateTestTables("Word", wordTests)
59 generateTestTables("Sentence", sentenceTests)
60
61 flushOutput()
62}
63
64// WordBreakProperty.txt has the form:
65// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD
66// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ
67func openReader(file string) (input io.ReadCloser) {
68 if *localFiles {
69 f, err := os.Open(file)
70 if err != nil {
71 log.Fatal(err)
72 }
73 input = f
74 } else {
75 path := *url + file
76 resp, err := http.Get(path)
77 if err != nil {
78 log.Fatal(err)
79 }
80 if resp.StatusCode != 200 {
81 log.Fatal("bad GET status for "+file, resp.Status)
82 }
83 input = resp.Body
84 }
85 return
86}
87
88func loadUnicodeData(filename string, tests []test) []test {
89 f := openReader(filename)
90 defer f.Close()
91 bufioReader := bufio.NewReader(f)
92 line, err := bufioReader.ReadString('\n')
93 for err == nil {
94 tests = parseLine(line, tests)
95 line, err = bufioReader.ReadString('\n')
96 }
97 // if the err was EOF still need to process last value
98 if err == io.EOF {
99 tests = parseLine(line, tests)
100 }
101 return tests
102}
103
104const comment = "#"
105const brk = "÷"
106const nbrk = "×"
107
108type test [][]byte
109
110func parseLine(line string, tests []test) []test {
111 if strings.HasPrefix(line, comment) {
112 return tests
113 }
114 line = strings.TrimSpace(line)
115 if len(line) == 0 {
116 return tests
117 }
118 commentStart := strings.Index(line, comment)
119 if commentStart > 0 {
120 line = line[0:commentStart]
121 }
122 pieces := strings.Split(line, brk)
123 t := make(test, 0)
124 for _, piece := range pieces {
125 piece = strings.TrimSpace(piece)
126 if len(piece) > 0 {
127 codePoints := strings.Split(piece, nbrk)
128 word := ""
129 for _, codePoint := range codePoints {
130 codePoint = strings.TrimSpace(codePoint)
131 r, err := strconv.ParseInt(codePoint, 16, 64)
132 if err != nil {
133 log.Printf("err: %v for '%s'", err, string(r))
134 return tests
135 }
136
137 word += string(r)
138 }
139 t = append(t, []byte(word))
140 }
141 }
142 tests = append(tests, t)
143 return tests
144}
145
146func generateTestTables(prefix string, tests []test) {
147 fmt.Fprintf(output, testHeader, prefix)
148 for _, t := range tests {
149 fmt.Fprintf(output, "\t\t{\n")
150 fmt.Fprintf(output, "\t\t\tinput: %#v,\n", bytes.Join(t, []byte{}))
151 fmt.Fprintf(output, "\t\t\toutput: %s,\n", generateTest(t))
152 fmt.Fprintf(output, "\t\t},\n")
153 }
154 fmt.Fprintf(output, "}\n")
155}
156
157func generateTest(t test) string {
158 rv := "[][]byte{"
159 for _, te := range t {
160 rv += fmt.Sprintf("%#v,", te)
161 }
162 rv += "}"
163 return rv
164}
165
166const fileHeader = `// Generated by running
167// maketesttables --url=%s
168// DO NOT EDIT
169
170package textseg
171`
172
173const testHeader = `var unicode%sTests = []struct {
174 input []byte
175 output [][]byte
176 }{
177`
178
179func setupOutput() {
180 output = bufio.NewWriter(startGofmt())
181}
182
183// startGofmt connects output to a gofmt process if -output is set.
184func startGofmt() io.Writer {
185 if *outputFile == "" {
186 return os.Stdout
187 }
188 stdout, err := os.Create(*outputFile)
189 if err != nil {
190 log.Fatal(err)
191 }
192 // Pipe output to gofmt.
193 gofmt := exec.Command("gofmt")
194 fd, err := gofmt.StdinPipe()
195 if err != nil {
196 log.Fatal(err)
197 }
198 gofmt.Stdout = stdout
199 gofmt.Stderr = os.Stderr
200 err = gofmt.Start()
201 if err != nil {
202 log.Fatal(err)
203 }
204 return fd
205}
206
207func flushOutput() {
208 err := output.Flush()
209 if err != nil {
210 log.Fatal(err)
211 }
212}
diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go b/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go
new file mode 100644
index 0000000..fab7e84
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go
@@ -0,0 +1,5700 @@
1// Generated by running
2// maketables --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/
3// DO NOT EDIT
4
5package textseg
6
7import (
8 "unicode"
9)
10
11var _GraphemeCR = &unicode.RangeTable{
12 R16: []unicode.Range16{
13 unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1},
14 },
15 LatinOffset: 1,
16}
17
18var _GraphemeControl = &unicode.RangeTable{
19 R16: []unicode.Range16{
20 unicode.Range16{Lo: 0x0, Hi: 0x9, Stride: 0x1},
21 unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1},
22 unicode.Range16{Lo: 0xe, Hi: 0x1f, Stride: 0x1},
23 unicode.Range16{Lo: 0x7f, Hi: 0x9f, Stride: 0x1},
24 unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1},
25 unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1},
26 unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1},
27 unicode.Range16{Lo: 0x200b, Hi: 0x200b, Stride: 0x1},
28 unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1},
29 unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1},
30 unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1},
31 unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1},
32 unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1},
33 unicode.Range16{Lo: 0x2065, Hi: 0x2065, Stride: 0x1},
34 unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1},
35 unicode.Range16{Lo: 0xd800, Hi: 0xdfff, Stride: 0x1},
36 unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1},
37 unicode.Range16{Lo: 0xfff0, Hi: 0xfff8, Stride: 0x1},
38 unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1},
39 },
40 R32: []unicode.Range32{
41 unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1},
42 unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1},
43 unicode.Range32{Lo: 0xe0000, Hi: 0xe0000, Stride: 0x1},
44 unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1},
45 unicode.Range32{Lo: 0xe0002, Hi: 0xe001f, Stride: 0x1},
46 unicode.Range32{Lo: 0xe0080, Hi: 0xe00ff, Stride: 0x1},
47 unicode.Range32{Lo: 0xe01f0, Hi: 0xe0fff, Stride: 0x1},
48 },
49 LatinOffset: 5,
50}
51
52var _GraphemeE_Base = &unicode.RangeTable{
53 R16: []unicode.Range16{
54 unicode.Range16{Lo: 0x261d, Hi: 0x261d, Stride: 0x1},
55 unicode.Range16{Lo: 0x26f9, Hi: 0x26f9, Stride: 0x1},
56 unicode.Range16{Lo: 0x270a, Hi: 0x270d, Stride: 0x1},
57 },
58 R32: []unicode.Range32{
59 unicode.Range32{Lo: 0x1f385, Hi: 0x1f385, Stride: 0x1},
60 unicode.Range32{Lo: 0x1f3c3, Hi: 0x1f3c4, Stride: 0x1},
61 unicode.Range32{Lo: 0x1f3ca, Hi: 0x1f3cb, Stride: 0x1},
62 unicode.Range32{Lo: 0x1f442, Hi: 0x1f443, Stride: 0x1},
63 unicode.Range32{Lo: 0x1f446, Hi: 0x1f450, Stride: 0x1},
64 unicode.Range32{Lo: 0x1f46e, Hi: 0x1f46e, Stride: 0x1},
65 unicode.Range32{Lo: 0x1f470, Hi: 0x1f478, Stride: 0x1},
66 unicode.Range32{Lo: 0x1f47c, Hi: 0x1f47c, Stride: 0x1},
67 unicode.Range32{Lo: 0x1f481, Hi: 0x1f483, Stride: 0x1},
68 unicode.Range32{Lo: 0x1f485, Hi: 0x1f487, Stride: 0x1},
69 unicode.Range32{Lo: 0x1f4aa, Hi: 0x1f4aa, Stride: 0x1},
70 unicode.Range32{Lo: 0x1f575, Hi: 0x1f575, Stride: 0x1},
71 unicode.Range32{Lo: 0x1f57a, Hi: 0x1f57a, Stride: 0x1},
72 unicode.Range32{Lo: 0x1f590, Hi: 0x1f590, Stride: 0x1},
73 unicode.Range32{Lo: 0x1f595, Hi: 0x1f596, Stride: 0x1},
74 unicode.Range32{Lo: 0x1f645, Hi: 0x1f647, Stride: 0x1},
75 unicode.Range32{Lo: 0x1f64b, Hi: 0x1f64f, Stride: 0x1},
76 unicode.Range32{Lo: 0x1f6a3, Hi: 0x1f6a3, Stride: 0x1},
77 unicode.Range32{Lo: 0x1f6b4, Hi: 0x1f6b6, Stride: 0x1},
78 unicode.Range32{Lo: 0x1f6c0, Hi: 0x1f6c0, Stride: 0x1},
79 unicode.Range32{Lo: 0x1f918, Hi: 0x1f91e, Stride: 0x1},
80 unicode.Range32{Lo: 0x1f926, Hi: 0x1f926, Stride: 0x1},
81 unicode.Range32{Lo: 0x1f930, Hi: 0x1f930, Stride: 0x1},
82 unicode.Range32{Lo: 0x1f933, Hi: 0x1f939, Stride: 0x1},
83 unicode.Range32{Lo: 0x1f93c, Hi: 0x1f93e, Stride: 0x1},
84 },
85 LatinOffset: 0,
86}
87
88var _GraphemeE_Base_GAZ = &unicode.RangeTable{
89 R32: []unicode.Range32{
90 unicode.Range32{Lo: 0x1f466, Hi: 0x1f469, Stride: 0x1},
91 },
92 LatinOffset: 0,
93}
94
95var _GraphemeE_Modifier = &unicode.RangeTable{
96 R32: []unicode.Range32{
97 unicode.Range32{Lo: 0x1f3fb, Hi: 0x1f3ff, Stride: 0x1},
98 },
99 LatinOffset: 0,
100}
101
102var _GraphemeExtend = &unicode.RangeTable{
103 R16: []unicode.Range16{
104 unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1},
105 unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1},
106 unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1},
107 unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1},
108 unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1},
109 unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1},
110 unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1},
111 unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1},
112 unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1},
113 unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1},
114 unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1},
115 unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1},
116 unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1},
117 unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1},
118 unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1},
119 unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1},
120 unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1},
121 unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1},
122 unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1},
123 unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1},
124 unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1},
125 unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1},
126 unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1},
127 unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1},
128 unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1},
129 unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1},
130 unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1},
131 unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1},
132 unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1},
133 unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1},
134 unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1},
135 unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1},
136 unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1},
137 unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1},
138 unicode.Range16{Lo: 0x9be, Hi: 0x9be, Stride: 0x1},
139 unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1},
140 unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1},
141 unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1},
142 unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1},
143 unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1},
144 unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1},
145 unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1},
146 unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1},
147 unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1},
148 unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1},
149 unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1},
150 unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1},
151 unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1},
152 unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1},
153 unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1},
154 unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1},
155 unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1},
156 unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1},
157 unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1},
158 unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1},
159 unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1},
160 unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1},
161 unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1},
162 unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1},
163 unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1},
164 unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1},
165 unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1},
166 unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1},
167 unicode.Range16{Lo: 0xbbe, Hi: 0xbbe, Stride: 0x1},
168 unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1},
169 unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1},
170 unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1},
171 unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1},
172 unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1},
173 unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1},
174 unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1},
175 unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1},
176 unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1},
177 unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1},
178 unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1},
179 unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1},
180 unicode.Range16{Lo: 0xcc2, Hi: 0xcc2, Stride: 0x1},
181 unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1},
182 unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1},
183 unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1},
184 unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1},
185 unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1},
186 unicode.Range16{Lo: 0xd3e, Hi: 0xd3e, Stride: 0x1},
187 unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1},
188 unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1},
189 unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1},
190 unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1},
191 unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1},
192 unicode.Range16{Lo: 0xdcf, Hi: 0xdcf, Stride: 0x1},
193 unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1},
194 unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1},
195 unicode.Range16{Lo: 0xddf, Hi: 0xddf, Stride: 0x1},
196 unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1},
197 unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1},
198 unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1},
199 unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1},
200 unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1},
201 unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1},
202 unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1},
203 unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1},
204 unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1},
205 unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1},
206 unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1},
207 unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1},
208 unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1},
209 unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1},
210 unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1},
211 unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1},
212 unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1},
213 unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1},
214 unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1},
215 unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1},
216 unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1},
217 unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1},
218 unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1},
219 unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1},
220 unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1},
221 unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1},
222 unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1},
223 unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1},
224 unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1},
225 unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1},
226 unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1},
227 unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1},
228 unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1},
229 unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1},
230 unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1},
231 unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1},
232 unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1},
233 unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1},
234 unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1},
235 unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1},
236 unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1},
237 unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1},
238 unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1},
239 unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1},
240 unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1},
241 unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1},
242 unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1},
243 unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1},
244 unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1},
245 unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1},
246 unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1},
247 unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1},
248 unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1},
249 unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1},
250 unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1},
251 unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1},
252 unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1},
253 unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1},
254 unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1},
255 unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1},
256 unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1},
257 unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1},
258 unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1},
259 unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1},
260 unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1},
261 unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1},
262 unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1},
263 unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1},
264 unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1},
265 unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1},
266 unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1},
267 unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1},
268 unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1},
269 unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1},
270 unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1},
271 unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1},
272 unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1},
273 unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1},
274 unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1},
275 unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1},
276 unicode.Range16{Lo: 0x200c, Hi: 0x200c, Stride: 0x1},
277 unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1},
278 unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1},
279 unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1},
280 unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1},
281 unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1},
282 unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1},
283 unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1},
284 unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1},
285 unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1},
286 unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1},
287 unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1},
288 unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1},
289 unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1},
290 unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1},
291 unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1},
292 unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1},
293 unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1},
294 unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1},
295 unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1},
296 unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1},
297 unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1},
298 unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1},
299 unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1},
300 unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1},
301 unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1},
302 unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1},
303 unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1},
304 unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1},
305 unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1},
306 unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1},
307 unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1},
308 unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1},
309 unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1},
310 unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1},
311 unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1},
312 unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1},
313 unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1},
314 unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1},
315 unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1},
316 unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1},
317 unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1},
318 unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1},
319 unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1},
320 unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1},
321 unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1},
322 unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1},
323 unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1},
324 unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1},
325 unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1},
326 },
327 R32: []unicode.Range32{
328 unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1},
329 unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1},
330 unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1},
331 unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1},
332 unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1},
333 unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1},
334 unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1},
335 unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1},
336 unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1},
337 unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1},
338 unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1},
339 unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1},
340 unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1},
341 unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1},
342 unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1},
343 unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1},
344 unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1},
345 unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1},
346 unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1},
347 unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1},
348 unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1},
349 unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1},
350 unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1},
351 unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1},
352 unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1},
353 unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1},
354 unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1},
355 unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1},
356 unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1},
357 unicode.Range32{Lo: 0x1133e, Hi: 0x1133e, Stride: 0x1},
358 unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1},
359 unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1},
360 unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1},
361 unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1},
362 unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1},
363 unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1},
364 unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1},
365 unicode.Range32{Lo: 0x114b0, Hi: 0x114b0, Stride: 0x1},
366 unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1},
367 unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1},
368 unicode.Range32{Lo: 0x114bd, Hi: 0x114bd, Stride: 0x1},
369 unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1},
370 unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1},
371 unicode.Range32{Lo: 0x115af, Hi: 0x115af, Stride: 0x1},
372 unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1},
373 unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1},
374 unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1},
375 unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1},
376 unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1},
377 unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1},
378 unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1},
379 unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1},
380 unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1},
381 unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1},
382 unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1},
383 unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1},
384 unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1},
385 unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1},
386 unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1},
387 unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1},
388 unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1},
389 unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1},
390 unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1},
391 unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1},
392 unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1},
393 unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1},
394 unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1},
395 unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1},
396 unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1},
397 unicode.Range32{Lo: 0x1d165, Hi: 0x1d165, Stride: 0x1},
398 unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1},
399 unicode.Range32{Lo: 0x1d16e, Hi: 0x1d172, Stride: 0x1},
400 unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1},
401 unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1},
402 unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1},
403 unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1},
404 unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1},
405 unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1},
406 unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1},
407 unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1},
408 unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1},
409 unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1},
410 unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1},
411 unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1},
412 unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1},
413 unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1},
414 unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1},
415 unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1},
416 unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1},
417 unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1},
418 unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1},
419 },
420 LatinOffset: 0,
421}
422
423var _GraphemeGlue_After_Zwj = &unicode.RangeTable{
424 R16: []unicode.Range16{
425 unicode.Range16{Lo: 0x2764, Hi: 0x2764, Stride: 0x1},
426 },
427 R32: []unicode.Range32{
428 unicode.Range32{Lo: 0x1f48b, Hi: 0x1f48b, Stride: 0x1},
429 unicode.Range32{Lo: 0x1f5e8, Hi: 0x1f5e8, Stride: 0x1},
430 },
431 LatinOffset: 0,
432}
433
434var _GraphemeL = &unicode.RangeTable{
435 R16: []unicode.Range16{
436 unicode.Range16{Lo: 0x1100, Hi: 0x115f, Stride: 0x1},
437 unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1},
438 },
439 LatinOffset: 0,
440}
441
442var _GraphemeLF = &unicode.RangeTable{
443 R16: []unicode.Range16{
444 unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1},
445 },
446 LatinOffset: 1,
447}
448
449var _GraphemeLV = &unicode.RangeTable{
450 R16: []unicode.Range16{
451 unicode.Range16{Lo: 0xac00, Hi: 0xac00, Stride: 0x1},
452 unicode.Range16{Lo: 0xac1c, Hi: 0xac1c, Stride: 0x1},
453 unicode.Range16{Lo: 0xac38, Hi: 0xac38, Stride: 0x1},
454 unicode.Range16{Lo: 0xac54, Hi: 0xac54, Stride: 0x1},
455 unicode.Range16{Lo: 0xac70, Hi: 0xac70, Stride: 0x1},
456 unicode.Range16{Lo: 0xac8c, Hi: 0xac8c, Stride: 0x1},
457 unicode.Range16{Lo: 0xaca8, Hi: 0xaca8, Stride: 0x1},
458 unicode.Range16{Lo: 0xacc4, Hi: 0xacc4, Stride: 0x1},
459 unicode.Range16{Lo: 0xace0, Hi: 0xace0, Stride: 0x1},
460 unicode.Range16{Lo: 0xacfc, Hi: 0xacfc, Stride: 0x1},
461 unicode.Range16{Lo: 0xad18, Hi: 0xad18, Stride: 0x1},
462 unicode.Range16{Lo: 0xad34, Hi: 0xad34, Stride: 0x1},
463 unicode.Range16{Lo: 0xad50, Hi: 0xad50, Stride: 0x1},
464 unicode.Range16{Lo: 0xad6c, Hi: 0xad6c, Stride: 0x1},
465 unicode.Range16{Lo: 0xad88, Hi: 0xad88, Stride: 0x1},
466 unicode.Range16{Lo: 0xada4, Hi: 0xada4, Stride: 0x1},
467 unicode.Range16{Lo: 0xadc0, Hi: 0xadc0, Stride: 0x1},
468 unicode.Range16{Lo: 0xaddc, Hi: 0xaddc, Stride: 0x1},
469 unicode.Range16{Lo: 0xadf8, Hi: 0xadf8, Stride: 0x1},
470 unicode.Range16{Lo: 0xae14, Hi: 0xae14, Stride: 0x1},
471 unicode.Range16{Lo: 0xae30, Hi: 0xae30, Stride: 0x1},
472 unicode.Range16{Lo: 0xae4c, Hi: 0xae4c, Stride: 0x1},
473 unicode.Range16{Lo: 0xae68, Hi: 0xae68, Stride: 0x1},
474 unicode.Range16{Lo: 0xae84, Hi: 0xae84, Stride: 0x1},
475 unicode.Range16{Lo: 0xaea0, Hi: 0xaea0, Stride: 0x1},
476 unicode.Range16{Lo: 0xaebc, Hi: 0xaebc, Stride: 0x1},
477 unicode.Range16{Lo: 0xaed8, Hi: 0xaed8, Stride: 0x1},
478 unicode.Range16{Lo: 0xaef4, Hi: 0xaef4, Stride: 0x1},
479 unicode.Range16{Lo: 0xaf10, Hi: 0xaf10, Stride: 0x1},
480 unicode.Range16{Lo: 0xaf2c, Hi: 0xaf2c, Stride: 0x1},
481 unicode.Range16{Lo: 0xaf48, Hi: 0xaf48, Stride: 0x1},
482 unicode.Range16{Lo: 0xaf64, Hi: 0xaf64, Stride: 0x1},
483 unicode.Range16{Lo: 0xaf80, Hi: 0xaf80, Stride: 0x1},
484 unicode.Range16{Lo: 0xaf9c, Hi: 0xaf9c, Stride: 0x1},
485 unicode.Range16{Lo: 0xafb8, Hi: 0xafb8, Stride: 0x1},
486 unicode.Range16{Lo: 0xafd4, Hi: 0xafd4, Stride: 0x1},
487 unicode.Range16{Lo: 0xaff0, Hi: 0xaff0, Stride: 0x1},
488 unicode.Range16{Lo: 0xb00c, Hi: 0xb00c, Stride: 0x1},
489 unicode.Range16{Lo: 0xb028, Hi: 0xb028, Stride: 0x1},
490 unicode.Range16{Lo: 0xb044, Hi: 0xb044, Stride: 0x1},
491 unicode.Range16{Lo: 0xb060, Hi: 0xb060, Stride: 0x1},
492 unicode.Range16{Lo: 0xb07c, Hi: 0xb07c, Stride: 0x1},
493 unicode.Range16{Lo: 0xb098, Hi: 0xb098, Stride: 0x1},
494 unicode.Range16{Lo: 0xb0b4, Hi: 0xb0b4, Stride: 0x1},
495 unicode.Range16{Lo: 0xb0d0, Hi: 0xb0d0, Stride: 0x1},
496 unicode.Range16{Lo: 0xb0ec, Hi: 0xb0ec, Stride: 0x1},
497 unicode.Range16{Lo: 0xb108, Hi: 0xb108, Stride: 0x1},
498 unicode.Range16{Lo: 0xb124, Hi: 0xb124, Stride: 0x1},
499 unicode.Range16{Lo: 0xb140, Hi: 0xb140, Stride: 0x1},
500 unicode.Range16{Lo: 0xb15c, Hi: 0xb15c, Stride: 0x1},
501 unicode.Range16{Lo: 0xb178, Hi: 0xb178, Stride: 0x1},
502 unicode.Range16{Lo: 0xb194, Hi: 0xb194, Stride: 0x1},
503 unicode.Range16{Lo: 0xb1b0, Hi: 0xb1b0, Stride: 0x1},
504 unicode.Range16{Lo: 0xb1cc, Hi: 0xb1cc, Stride: 0x1},
505 unicode.Range16{Lo: 0xb1e8, Hi: 0xb1e8, Stride: 0x1},
506 unicode.Range16{Lo: 0xb204, Hi: 0xb204, Stride: 0x1},
507 unicode.Range16{Lo: 0xb220, Hi: 0xb220, Stride: 0x1},
508 unicode.Range16{Lo: 0xb23c, Hi: 0xb23c, Stride: 0x1},
509 unicode.Range16{Lo: 0xb258, Hi: 0xb258, Stride: 0x1},
510 unicode.Range16{Lo: 0xb274, Hi: 0xb274, Stride: 0x1},
511 unicode.Range16{Lo: 0xb290, Hi: 0xb290, Stride: 0x1},
512 unicode.Range16{Lo: 0xb2ac, Hi: 0xb2ac, Stride: 0x1},
513 unicode.Range16{Lo: 0xb2c8, Hi: 0xb2c8, Stride: 0x1},
514 unicode.Range16{Lo: 0xb2e4, Hi: 0xb2e4, Stride: 0x1},
515 unicode.Range16{Lo: 0xb300, Hi: 0xb300, Stride: 0x1},
516 unicode.Range16{Lo: 0xb31c, Hi: 0xb31c, Stride: 0x1},
517 unicode.Range16{Lo: 0xb338, Hi: 0xb338, Stride: 0x1},
518 unicode.Range16{Lo: 0xb354, Hi: 0xb354, Stride: 0x1},
519 unicode.Range16{Lo: 0xb370, Hi: 0xb370, Stride: 0x1},
520 unicode.Range16{Lo: 0xb38c, Hi: 0xb38c, Stride: 0x1},
521 unicode.Range16{Lo: 0xb3a8, Hi: 0xb3a8, Stride: 0x1},
522 unicode.Range16{Lo: 0xb3c4, Hi: 0xb3c4, Stride: 0x1},
523 unicode.Range16{Lo: 0xb3e0, Hi: 0xb3e0, Stride: 0x1},
524 unicode.Range16{Lo: 0xb3fc, Hi: 0xb3fc, Stride: 0x1},
525 unicode.Range16{Lo: 0xb418, Hi: 0xb418, Stride: 0x1},
526 unicode.Range16{Lo: 0xb434, Hi: 0xb434, Stride: 0x1},
527 unicode.Range16{Lo: 0xb450, Hi: 0xb450, Stride: 0x1},
528 unicode.Range16{Lo: 0xb46c, Hi: 0xb46c, Stride: 0x1},
529 unicode.Range16{Lo: 0xb488, Hi: 0xb488, Stride: 0x1},
530 unicode.Range16{Lo: 0xb4a4, Hi: 0xb4a4, Stride: 0x1},
531 unicode.Range16{Lo: 0xb4c0, Hi: 0xb4c0, Stride: 0x1},
532 unicode.Range16{Lo: 0xb4dc, Hi: 0xb4dc, Stride: 0x1},
533 unicode.Range16{Lo: 0xb4f8, Hi: 0xb4f8, Stride: 0x1},
534 unicode.Range16{Lo: 0xb514, Hi: 0xb514, Stride: 0x1},
535 unicode.Range16{Lo: 0xb530, Hi: 0xb530, Stride: 0x1},
536 unicode.Range16{Lo: 0xb54c, Hi: 0xb54c, Stride: 0x1},
537 unicode.Range16{Lo: 0xb568, Hi: 0xb568, Stride: 0x1},
538 unicode.Range16{Lo: 0xb584, Hi: 0xb584, Stride: 0x1},
539 unicode.Range16{Lo: 0xb5a0, Hi: 0xb5a0, Stride: 0x1},
540 unicode.Range16{Lo: 0xb5bc, Hi: 0xb5bc, Stride: 0x1},
541 unicode.Range16{Lo: 0xb5d8, Hi: 0xb5d8, Stride: 0x1},
542 unicode.Range16{Lo: 0xb5f4, Hi: 0xb5f4, Stride: 0x1},
543 unicode.Range16{Lo: 0xb610, Hi: 0xb610, Stride: 0x1},
544 unicode.Range16{Lo: 0xb62c, Hi: 0xb62c, Stride: 0x1},
545 unicode.Range16{Lo: 0xb648, Hi: 0xb648, Stride: 0x1},
546 unicode.Range16{Lo: 0xb664, Hi: 0xb664, Stride: 0x1},
547 unicode.Range16{Lo: 0xb680, Hi: 0xb680, Stride: 0x1},
548 unicode.Range16{Lo: 0xb69c, Hi: 0xb69c, Stride: 0x1},
549 unicode.Range16{Lo: 0xb6b8, Hi: 0xb6b8, Stride: 0x1},
550 unicode.Range16{Lo: 0xb6d4, Hi: 0xb6d4, Stride: 0x1},
551 unicode.Range16{Lo: 0xb6f0, Hi: 0xb6f0, Stride: 0x1},
552 unicode.Range16{Lo: 0xb70c, Hi: 0xb70c, Stride: 0x1},
553 unicode.Range16{Lo: 0xb728, Hi: 0xb728, Stride: 0x1},
554 unicode.Range16{Lo: 0xb744, Hi: 0xb744, Stride: 0x1},
555 unicode.Range16{Lo: 0xb760, Hi: 0xb760, Stride: 0x1},
556 unicode.Range16{Lo: 0xb77c, Hi: 0xb77c, Stride: 0x1},
557 unicode.Range16{Lo: 0xb798, Hi: 0xb798, Stride: 0x1},
558 unicode.Range16{Lo: 0xb7b4, Hi: 0xb7b4, Stride: 0x1},
559 unicode.Range16{Lo: 0xb7d0, Hi: 0xb7d0, Stride: 0x1},
560 unicode.Range16{Lo: 0xb7ec, Hi: 0xb7ec, Stride: 0x1},
561 unicode.Range16{Lo: 0xb808, Hi: 0xb808, Stride: 0x1},
562 unicode.Range16{Lo: 0xb824, Hi: 0xb824, Stride: 0x1},
563 unicode.Range16{Lo: 0xb840, Hi: 0xb840, Stride: 0x1},
564 unicode.Range16{Lo: 0xb85c, Hi: 0xb85c, Stride: 0x1},
565 unicode.Range16{Lo: 0xb878, Hi: 0xb878, Stride: 0x1},
566 unicode.Range16{Lo: 0xb894, Hi: 0xb894, Stride: 0x1},
567 unicode.Range16{Lo: 0xb8b0, Hi: 0xb8b0, Stride: 0x1},
568 unicode.Range16{Lo: 0xb8cc, Hi: 0xb8cc, Stride: 0x1},
569 unicode.Range16{Lo: 0xb8e8, Hi: 0xb8e8, Stride: 0x1},
570 unicode.Range16{Lo: 0xb904, Hi: 0xb904, Stride: 0x1},
571 unicode.Range16{Lo: 0xb920, Hi: 0xb920, Stride: 0x1},
572 unicode.Range16{Lo: 0xb93c, Hi: 0xb93c, Stride: 0x1},
573 unicode.Range16{Lo: 0xb958, Hi: 0xb958, Stride: 0x1},
574 unicode.Range16{Lo: 0xb974, Hi: 0xb974, Stride: 0x1},
575 unicode.Range16{Lo: 0xb990, Hi: 0xb990, Stride: 0x1},
576 unicode.Range16{Lo: 0xb9ac, Hi: 0xb9ac, Stride: 0x1},
577 unicode.Range16{Lo: 0xb9c8, Hi: 0xb9c8, Stride: 0x1},
578 unicode.Range16{Lo: 0xb9e4, Hi: 0xb9e4, Stride: 0x1},
579 unicode.Range16{Lo: 0xba00, Hi: 0xba00, Stride: 0x1},
580 unicode.Range16{Lo: 0xba1c, Hi: 0xba1c, Stride: 0x1},
581 unicode.Range16{Lo: 0xba38, Hi: 0xba38, Stride: 0x1},
582 unicode.Range16{Lo: 0xba54, Hi: 0xba54, Stride: 0x1},
583 unicode.Range16{Lo: 0xba70, Hi: 0xba70, Stride: 0x1},
584 unicode.Range16{Lo: 0xba8c, Hi: 0xba8c, Stride: 0x1},
585 unicode.Range16{Lo: 0xbaa8, Hi: 0xbaa8, Stride: 0x1},
586 unicode.Range16{Lo: 0xbac4, Hi: 0xbac4, Stride: 0x1},
587 unicode.Range16{Lo: 0xbae0, Hi: 0xbae0, Stride: 0x1},
588 unicode.Range16{Lo: 0xbafc, Hi: 0xbafc, Stride: 0x1},
589 unicode.Range16{Lo: 0xbb18, Hi: 0xbb18, Stride: 0x1},
590 unicode.Range16{Lo: 0xbb34, Hi: 0xbb34, Stride: 0x1},
591 unicode.Range16{Lo: 0xbb50, Hi: 0xbb50, Stride: 0x1},
592 unicode.Range16{Lo: 0xbb6c, Hi: 0xbb6c, Stride: 0x1},
593 unicode.Range16{Lo: 0xbb88, Hi: 0xbb88, Stride: 0x1},
594 unicode.Range16{Lo: 0xbba4, Hi: 0xbba4, Stride: 0x1},
595 unicode.Range16{Lo: 0xbbc0, Hi: 0xbbc0, Stride: 0x1},
596 unicode.Range16{Lo: 0xbbdc, Hi: 0xbbdc, Stride: 0x1},
597 unicode.Range16{Lo: 0xbbf8, Hi: 0xbbf8, Stride: 0x1},
598 unicode.Range16{Lo: 0xbc14, Hi: 0xbc14, Stride: 0x1},
599 unicode.Range16{Lo: 0xbc30, Hi: 0xbc30, Stride: 0x1},
600 unicode.Range16{Lo: 0xbc4c, Hi: 0xbc4c, Stride: 0x1},
601 unicode.Range16{Lo: 0xbc68, Hi: 0xbc68, Stride: 0x1},
602 unicode.Range16{Lo: 0xbc84, Hi: 0xbc84, Stride: 0x1},
603 unicode.Range16{Lo: 0xbca0, Hi: 0xbca0, Stride: 0x1},
604 unicode.Range16{Lo: 0xbcbc, Hi: 0xbcbc, Stride: 0x1},
605 unicode.Range16{Lo: 0xbcd8, Hi: 0xbcd8, Stride: 0x1},
606 unicode.Range16{Lo: 0xbcf4, Hi: 0xbcf4, Stride: 0x1},
607 unicode.Range16{Lo: 0xbd10, Hi: 0xbd10, Stride: 0x1},
608 unicode.Range16{Lo: 0xbd2c, Hi: 0xbd2c, Stride: 0x1},
609 unicode.Range16{Lo: 0xbd48, Hi: 0xbd48, Stride: 0x1},
610 unicode.Range16{Lo: 0xbd64, Hi: 0xbd64, Stride: 0x1},
611 unicode.Range16{Lo: 0xbd80, Hi: 0xbd80, Stride: 0x1},
612 unicode.Range16{Lo: 0xbd9c, Hi: 0xbd9c, Stride: 0x1},
613 unicode.Range16{Lo: 0xbdb8, Hi: 0xbdb8, Stride: 0x1},
614 unicode.Range16{Lo: 0xbdd4, Hi: 0xbdd4, Stride: 0x1},
615 unicode.Range16{Lo: 0xbdf0, Hi: 0xbdf0, Stride: 0x1},
616 unicode.Range16{Lo: 0xbe0c, Hi: 0xbe0c, Stride: 0x1},
617 unicode.Range16{Lo: 0xbe28, Hi: 0xbe28, Stride: 0x1},
618 unicode.Range16{Lo: 0xbe44, Hi: 0xbe44, Stride: 0x1},
619 unicode.Range16{Lo: 0xbe60, Hi: 0xbe60, Stride: 0x1},
620 unicode.Range16{Lo: 0xbe7c, Hi: 0xbe7c, Stride: 0x1},
621 unicode.Range16{Lo: 0xbe98, Hi: 0xbe98, Stride: 0x1},
622 unicode.Range16{Lo: 0xbeb4, Hi: 0xbeb4, Stride: 0x1},
623 unicode.Range16{Lo: 0xbed0, Hi: 0xbed0, Stride: 0x1},
624 unicode.Range16{Lo: 0xbeec, Hi: 0xbeec, Stride: 0x1},
625 unicode.Range16{Lo: 0xbf08, Hi: 0xbf08, Stride: 0x1},
626 unicode.Range16{Lo: 0xbf24, Hi: 0xbf24, Stride: 0x1},
627 unicode.Range16{Lo: 0xbf40, Hi: 0xbf40, Stride: 0x1},
628 unicode.Range16{Lo: 0xbf5c, Hi: 0xbf5c, Stride: 0x1},
629 unicode.Range16{Lo: 0xbf78, Hi: 0xbf78, Stride: 0x1},
630 unicode.Range16{Lo: 0xbf94, Hi: 0xbf94, Stride: 0x1},
631 unicode.Range16{Lo: 0xbfb0, Hi: 0xbfb0, Stride: 0x1},
632 unicode.Range16{Lo: 0xbfcc, Hi: 0xbfcc, Stride: 0x1},
633 unicode.Range16{Lo: 0xbfe8, Hi: 0xbfe8, Stride: 0x1},
634 unicode.Range16{Lo: 0xc004, Hi: 0xc004, Stride: 0x1},
635 unicode.Range16{Lo: 0xc020, Hi: 0xc020, Stride: 0x1},
636 unicode.Range16{Lo: 0xc03c, Hi: 0xc03c, Stride: 0x1},
637 unicode.Range16{Lo: 0xc058, Hi: 0xc058, Stride: 0x1},
638 unicode.Range16{Lo: 0xc074, Hi: 0xc074, Stride: 0x1},
639 unicode.Range16{Lo: 0xc090, Hi: 0xc090, Stride: 0x1},
640 unicode.Range16{Lo: 0xc0ac, Hi: 0xc0ac, Stride: 0x1},
641 unicode.Range16{Lo: 0xc0c8, Hi: 0xc0c8, Stride: 0x1},
642 unicode.Range16{Lo: 0xc0e4, Hi: 0xc0e4, Stride: 0x1},
643 unicode.Range16{Lo: 0xc100, Hi: 0xc100, Stride: 0x1},
644 unicode.Range16{Lo: 0xc11c, Hi: 0xc11c, Stride: 0x1},
645 unicode.Range16{Lo: 0xc138, Hi: 0xc138, Stride: 0x1},
646 unicode.Range16{Lo: 0xc154, Hi: 0xc154, Stride: 0x1},
647 unicode.Range16{Lo: 0xc170, Hi: 0xc170, Stride: 0x1},
648 unicode.Range16{Lo: 0xc18c, Hi: 0xc18c, Stride: 0x1},
649 unicode.Range16{Lo: 0xc1a8, Hi: 0xc1a8, Stride: 0x1},
650 unicode.Range16{Lo: 0xc1c4, Hi: 0xc1c4, Stride: 0x1},
651 unicode.Range16{Lo: 0xc1e0, Hi: 0xc1e0, Stride: 0x1},
652 unicode.Range16{Lo: 0xc1fc, Hi: 0xc1fc, Stride: 0x1},
653 unicode.Range16{Lo: 0xc218, Hi: 0xc218, Stride: 0x1},
654 unicode.Range16{Lo: 0xc234, Hi: 0xc234, Stride: 0x1},
655 unicode.Range16{Lo: 0xc250, Hi: 0xc250, Stride: 0x1},
656 unicode.Range16{Lo: 0xc26c, Hi: 0xc26c, Stride: 0x1},
657 unicode.Range16{Lo: 0xc288, Hi: 0xc288, Stride: 0x1},
658 unicode.Range16{Lo: 0xc2a4, Hi: 0xc2a4, Stride: 0x1},
659 unicode.Range16{Lo: 0xc2c0, Hi: 0xc2c0, Stride: 0x1},
660 unicode.Range16{Lo: 0xc2dc, Hi: 0xc2dc, Stride: 0x1},
661 unicode.Range16{Lo: 0xc2f8, Hi: 0xc2f8, Stride: 0x1},
662 unicode.Range16{Lo: 0xc314, Hi: 0xc314, Stride: 0x1},
663 unicode.Range16{Lo: 0xc330, Hi: 0xc330, Stride: 0x1},
664 unicode.Range16{Lo: 0xc34c, Hi: 0xc34c, Stride: 0x1},
665 unicode.Range16{Lo: 0xc368, Hi: 0xc368, Stride: 0x1},
666 unicode.Range16{Lo: 0xc384, Hi: 0xc384, Stride: 0x1},
667 unicode.Range16{Lo: 0xc3a0, Hi: 0xc3a0, Stride: 0x1},
668 unicode.Range16{Lo: 0xc3bc, Hi: 0xc3bc, Stride: 0x1},
669 unicode.Range16{Lo: 0xc3d8, Hi: 0xc3d8, Stride: 0x1},
670 unicode.Range16{Lo: 0xc3f4, Hi: 0xc3f4, Stride: 0x1},
671 unicode.Range16{Lo: 0xc410, Hi: 0xc410, Stride: 0x1},
672 unicode.Range16{Lo: 0xc42c, Hi: 0xc42c, Stride: 0x1},
673 unicode.Range16{Lo: 0xc448, Hi: 0xc448, Stride: 0x1},
674 unicode.Range16{Lo: 0xc464, Hi: 0xc464, Stride: 0x1},
675 unicode.Range16{Lo: 0xc480, Hi: 0xc480, Stride: 0x1},
676 unicode.Range16{Lo: 0xc49c, Hi: 0xc49c, Stride: 0x1},
677 unicode.Range16{Lo: 0xc4b8, Hi: 0xc4b8, Stride: 0x1},
678 unicode.Range16{Lo: 0xc4d4, Hi: 0xc4d4, Stride: 0x1},
679 unicode.Range16{Lo: 0xc4f0, Hi: 0xc4f0, Stride: 0x1},
680 unicode.Range16{Lo: 0xc50c, Hi: 0xc50c, Stride: 0x1},
681 unicode.Range16{Lo: 0xc528, Hi: 0xc528, Stride: 0x1},
682 unicode.Range16{Lo: 0xc544, Hi: 0xc544, Stride: 0x1},
683 unicode.Range16{Lo: 0xc560, Hi: 0xc560, Stride: 0x1},
684 unicode.Range16{Lo: 0xc57c, Hi: 0xc57c, Stride: 0x1},
685 unicode.Range16{Lo: 0xc598, Hi: 0xc598, Stride: 0x1},
686 unicode.Range16{Lo: 0xc5b4, Hi: 0xc5b4, Stride: 0x1},
687 unicode.Range16{Lo: 0xc5d0, Hi: 0xc5d0, Stride: 0x1},
688 unicode.Range16{Lo: 0xc5ec, Hi: 0xc5ec, Stride: 0x1},
689 unicode.Range16{Lo: 0xc608, Hi: 0xc608, Stride: 0x1},
690 unicode.Range16{Lo: 0xc624, Hi: 0xc624, Stride: 0x1},
691 unicode.Range16{Lo: 0xc640, Hi: 0xc640, Stride: 0x1},
692 unicode.Range16{Lo: 0xc65c, Hi: 0xc65c, Stride: 0x1},
693 unicode.Range16{Lo: 0xc678, Hi: 0xc678, Stride: 0x1},
694 unicode.Range16{Lo: 0xc694, Hi: 0xc694, Stride: 0x1},
695 unicode.Range16{Lo: 0xc6b0, Hi: 0xc6b0, Stride: 0x1},
696 unicode.Range16{Lo: 0xc6cc, Hi: 0xc6cc, Stride: 0x1},
697 unicode.Range16{Lo: 0xc6e8, Hi: 0xc6e8, Stride: 0x1},
698 unicode.Range16{Lo: 0xc704, Hi: 0xc704, Stride: 0x1},
699 unicode.Range16{Lo: 0xc720, Hi: 0xc720, Stride: 0x1},
700 unicode.Range16{Lo: 0xc73c, Hi: 0xc73c, Stride: 0x1},
701 unicode.Range16{Lo: 0xc758, Hi: 0xc758, Stride: 0x1},
702 unicode.Range16{Lo: 0xc774, Hi: 0xc774, Stride: 0x1},
703 unicode.Range16{Lo: 0xc790, Hi: 0xc790, Stride: 0x1},
704 unicode.Range16{Lo: 0xc7ac, Hi: 0xc7ac, Stride: 0x1},
705 unicode.Range16{Lo: 0xc7c8, Hi: 0xc7c8, Stride: 0x1},
706 unicode.Range16{Lo: 0xc7e4, Hi: 0xc7e4, Stride: 0x1},
707 unicode.Range16{Lo: 0xc800, Hi: 0xc800, Stride: 0x1},
708 unicode.Range16{Lo: 0xc81c, Hi: 0xc81c, Stride: 0x1},
709 unicode.Range16{Lo: 0xc838, Hi: 0xc838, Stride: 0x1},
710 unicode.Range16{Lo: 0xc854, Hi: 0xc854, Stride: 0x1},
711 unicode.Range16{Lo: 0xc870, Hi: 0xc870, Stride: 0x1},
712 unicode.Range16{Lo: 0xc88c, Hi: 0xc88c, Stride: 0x1},
713 unicode.Range16{Lo: 0xc8a8, Hi: 0xc8a8, Stride: 0x1},
714 unicode.Range16{Lo: 0xc8c4, Hi: 0xc8c4, Stride: 0x1},
715 unicode.Range16{Lo: 0xc8e0, Hi: 0xc8e0, Stride: 0x1},
716 unicode.Range16{Lo: 0xc8fc, Hi: 0xc8fc, Stride: 0x1},
717 unicode.Range16{Lo: 0xc918, Hi: 0xc918, Stride: 0x1},
718 unicode.Range16{Lo: 0xc934, Hi: 0xc934, Stride: 0x1},
719 unicode.Range16{Lo: 0xc950, Hi: 0xc950, Stride: 0x1},
720 unicode.Range16{Lo: 0xc96c, Hi: 0xc96c, Stride: 0x1},
721 unicode.Range16{Lo: 0xc988, Hi: 0xc988, Stride: 0x1},
722 unicode.Range16{Lo: 0xc9a4, Hi: 0xc9a4, Stride: 0x1},
723 unicode.Range16{Lo: 0xc9c0, Hi: 0xc9c0, Stride: 0x1},
724 unicode.Range16{Lo: 0xc9dc, Hi: 0xc9dc, Stride: 0x1},
725 unicode.Range16{Lo: 0xc9f8, Hi: 0xc9f8, Stride: 0x1},
726 unicode.Range16{Lo: 0xca14, Hi: 0xca14, Stride: 0x1},
727 unicode.Range16{Lo: 0xca30, Hi: 0xca30, Stride: 0x1},
728 unicode.Range16{Lo: 0xca4c, Hi: 0xca4c, Stride: 0x1},
729 unicode.Range16{Lo: 0xca68, Hi: 0xca68, Stride: 0x1},
730 unicode.Range16{Lo: 0xca84, Hi: 0xca84, Stride: 0x1},
731 unicode.Range16{Lo: 0xcaa0, Hi: 0xcaa0, Stride: 0x1},
732 unicode.Range16{Lo: 0xcabc, Hi: 0xcabc, Stride: 0x1},
733 unicode.Range16{Lo: 0xcad8, Hi: 0xcad8, Stride: 0x1},
734 unicode.Range16{Lo: 0xcaf4, Hi: 0xcaf4, Stride: 0x1},
735 unicode.Range16{Lo: 0xcb10, Hi: 0xcb10, Stride: 0x1},
736 unicode.Range16{Lo: 0xcb2c, Hi: 0xcb2c, Stride: 0x1},
737 unicode.Range16{Lo: 0xcb48, Hi: 0xcb48, Stride: 0x1},
738 unicode.Range16{Lo: 0xcb64, Hi: 0xcb64, Stride: 0x1},
739 unicode.Range16{Lo: 0xcb80, Hi: 0xcb80, Stride: 0x1},
740 unicode.Range16{Lo: 0xcb9c, Hi: 0xcb9c, Stride: 0x1},
741 unicode.Range16{Lo: 0xcbb8, Hi: 0xcbb8, Stride: 0x1},
742 unicode.Range16{Lo: 0xcbd4, Hi: 0xcbd4, Stride: 0x1},
743 unicode.Range16{Lo: 0xcbf0, Hi: 0xcbf0, Stride: 0x1},
744 unicode.Range16{Lo: 0xcc0c, Hi: 0xcc0c, Stride: 0x1},
745 unicode.Range16{Lo: 0xcc28, Hi: 0xcc28, Stride: 0x1},
746 unicode.Range16{Lo: 0xcc44, Hi: 0xcc44, Stride: 0x1},
747 unicode.Range16{Lo: 0xcc60, Hi: 0xcc60, Stride: 0x1},
748 unicode.Range16{Lo: 0xcc7c, Hi: 0xcc7c, Stride: 0x1},
749 unicode.Range16{Lo: 0xcc98, Hi: 0xcc98, Stride: 0x1},
750 unicode.Range16{Lo: 0xccb4, Hi: 0xccb4, Stride: 0x1},
751 unicode.Range16{Lo: 0xccd0, Hi: 0xccd0, Stride: 0x1},
752 unicode.Range16{Lo: 0xccec, Hi: 0xccec, Stride: 0x1},
753 unicode.Range16{Lo: 0xcd08, Hi: 0xcd08, Stride: 0x1},
754 unicode.Range16{Lo: 0xcd24, Hi: 0xcd24, Stride: 0x1},
755 unicode.Range16{Lo: 0xcd40, Hi: 0xcd40, Stride: 0x1},
756 unicode.Range16{Lo: 0xcd5c, Hi: 0xcd5c, Stride: 0x1},
757 unicode.Range16{Lo: 0xcd78, Hi: 0xcd78, Stride: 0x1},
758 unicode.Range16{Lo: 0xcd94, Hi: 0xcd94, Stride: 0x1},
759 unicode.Range16{Lo: 0xcdb0, Hi: 0xcdb0, Stride: 0x1},
760 unicode.Range16{Lo: 0xcdcc, Hi: 0xcdcc, Stride: 0x1},
761 unicode.Range16{Lo: 0xcde8, Hi: 0xcde8, Stride: 0x1},
762 unicode.Range16{Lo: 0xce04, Hi: 0xce04, Stride: 0x1},
763 unicode.Range16{Lo: 0xce20, Hi: 0xce20, Stride: 0x1},
764 unicode.Range16{Lo: 0xce3c, Hi: 0xce3c, Stride: 0x1},
765 unicode.Range16{Lo: 0xce58, Hi: 0xce58, Stride: 0x1},
766 unicode.Range16{Lo: 0xce74, Hi: 0xce74, Stride: 0x1},
767 unicode.Range16{Lo: 0xce90, Hi: 0xce90, Stride: 0x1},
768 unicode.Range16{Lo: 0xceac, Hi: 0xceac, Stride: 0x1},
769 unicode.Range16{Lo: 0xcec8, Hi: 0xcec8, Stride: 0x1},
770 unicode.Range16{Lo: 0xcee4, Hi: 0xcee4, Stride: 0x1},
771 unicode.Range16{Lo: 0xcf00, Hi: 0xcf00, Stride: 0x1},
772 unicode.Range16{Lo: 0xcf1c, Hi: 0xcf1c, Stride: 0x1},
773 unicode.Range16{Lo: 0xcf38, Hi: 0xcf38, Stride: 0x1},
774 unicode.Range16{Lo: 0xcf54, Hi: 0xcf54, Stride: 0x1},
775 unicode.Range16{Lo: 0xcf70, Hi: 0xcf70, Stride: 0x1},
776 unicode.Range16{Lo: 0xcf8c, Hi: 0xcf8c, Stride: 0x1},
777 unicode.Range16{Lo: 0xcfa8, Hi: 0xcfa8, Stride: 0x1},
778 unicode.Range16{Lo: 0xcfc4, Hi: 0xcfc4, Stride: 0x1},
779 unicode.Range16{Lo: 0xcfe0, Hi: 0xcfe0, Stride: 0x1},
780 unicode.Range16{Lo: 0xcffc, Hi: 0xcffc, Stride: 0x1},
781 unicode.Range16{Lo: 0xd018, Hi: 0xd018, Stride: 0x1},
782 unicode.Range16{Lo: 0xd034, Hi: 0xd034, Stride: 0x1},
783 unicode.Range16{Lo: 0xd050, Hi: 0xd050, Stride: 0x1},
784 unicode.Range16{Lo: 0xd06c, Hi: 0xd06c, Stride: 0x1},
785 unicode.Range16{Lo: 0xd088, Hi: 0xd088, Stride: 0x1},
786 unicode.Range16{Lo: 0xd0a4, Hi: 0xd0a4, Stride: 0x1},
787 unicode.Range16{Lo: 0xd0c0, Hi: 0xd0c0, Stride: 0x1},
788 unicode.Range16{Lo: 0xd0dc, Hi: 0xd0dc, Stride: 0x1},
789 unicode.Range16{Lo: 0xd0f8, Hi: 0xd0f8, Stride: 0x1},
790 unicode.Range16{Lo: 0xd114, Hi: 0xd114, Stride: 0x1},
791 unicode.Range16{Lo: 0xd130, Hi: 0xd130, Stride: 0x1},
792 unicode.Range16{Lo: 0xd14c, Hi: 0xd14c, Stride: 0x1},
793 unicode.Range16{Lo: 0xd168, Hi: 0xd168, Stride: 0x1},
794 unicode.Range16{Lo: 0xd184, Hi: 0xd184, Stride: 0x1},
795 unicode.Range16{Lo: 0xd1a0, Hi: 0xd1a0, Stride: 0x1},
796 unicode.Range16{Lo: 0xd1bc, Hi: 0xd1bc, Stride: 0x1},
797 unicode.Range16{Lo: 0xd1d8, Hi: 0xd1d8, Stride: 0x1},
798 unicode.Range16{Lo: 0xd1f4, Hi: 0xd1f4, Stride: 0x1},
799 unicode.Range16{Lo: 0xd210, Hi: 0xd210, Stride: 0x1},
800 unicode.Range16{Lo: 0xd22c, Hi: 0xd22c, Stride: 0x1},
801 unicode.Range16{Lo: 0xd248, Hi: 0xd248, Stride: 0x1},
802 unicode.Range16{Lo: 0xd264, Hi: 0xd264, Stride: 0x1},
803 unicode.Range16{Lo: 0xd280, Hi: 0xd280, Stride: 0x1},
804 unicode.Range16{Lo: 0xd29c, Hi: 0xd29c, Stride: 0x1},
805 unicode.Range16{Lo: 0xd2b8, Hi: 0xd2b8, Stride: 0x1},
806 unicode.Range16{Lo: 0xd2d4, Hi: 0xd2d4, Stride: 0x1},
807 unicode.Range16{Lo: 0xd2f0, Hi: 0xd2f0, Stride: 0x1},
808 unicode.Range16{Lo: 0xd30c, Hi: 0xd30c, Stride: 0x1},
809 unicode.Range16{Lo: 0xd328, Hi: 0xd328, Stride: 0x1},
810 unicode.Range16{Lo: 0xd344, Hi: 0xd344, Stride: 0x1},
811 unicode.Range16{Lo: 0xd360, Hi: 0xd360, Stride: 0x1},
812 unicode.Range16{Lo: 0xd37c, Hi: 0xd37c, Stride: 0x1},
813 unicode.Range16{Lo: 0xd398, Hi: 0xd398, Stride: 0x1},
814 unicode.Range16{Lo: 0xd3b4, Hi: 0xd3b4, Stride: 0x1},
815 unicode.Range16{Lo: 0xd3d0, Hi: 0xd3d0, Stride: 0x1},
816 unicode.Range16{Lo: 0xd3ec, Hi: 0xd3ec, Stride: 0x1},
817 unicode.Range16{Lo: 0xd408, Hi: 0xd408, Stride: 0x1},
818 unicode.Range16{Lo: 0xd424, Hi: 0xd424, Stride: 0x1},
819 unicode.Range16{Lo: 0xd440, Hi: 0xd440, Stride: 0x1},
820 unicode.Range16{Lo: 0xd45c, Hi: 0xd45c, Stride: 0x1},
821 unicode.Range16{Lo: 0xd478, Hi: 0xd478, Stride: 0x1},
822 unicode.Range16{Lo: 0xd494, Hi: 0xd494, Stride: 0x1},
823 unicode.Range16{Lo: 0xd4b0, Hi: 0xd4b0, Stride: 0x1},
824 unicode.Range16{Lo: 0xd4cc, Hi: 0xd4cc, Stride: 0x1},
825 unicode.Range16{Lo: 0xd4e8, Hi: 0xd4e8, Stride: 0x1},
826 unicode.Range16{Lo: 0xd504, Hi: 0xd504, Stride: 0x1},
827 unicode.Range16{Lo: 0xd520, Hi: 0xd520, Stride: 0x1},
828 unicode.Range16{Lo: 0xd53c, Hi: 0xd53c, Stride: 0x1},
829 unicode.Range16{Lo: 0xd558, Hi: 0xd558, Stride: 0x1},
830 unicode.Range16{Lo: 0xd574, Hi: 0xd574, Stride: 0x1},
831 unicode.Range16{Lo: 0xd590, Hi: 0xd590, Stride: 0x1},
832 unicode.Range16{Lo: 0xd5ac, Hi: 0xd5ac, Stride: 0x1},
833 unicode.Range16{Lo: 0xd5c8, Hi: 0xd5c8, Stride: 0x1},
834 unicode.Range16{Lo: 0xd5e4, Hi: 0xd5e4, Stride: 0x1},
835 unicode.Range16{Lo: 0xd600, Hi: 0xd600, Stride: 0x1},
836 unicode.Range16{Lo: 0xd61c, Hi: 0xd61c, Stride: 0x1},
837 unicode.Range16{Lo: 0xd638, Hi: 0xd638, Stride: 0x1},
838 unicode.Range16{Lo: 0xd654, Hi: 0xd654, Stride: 0x1},
839 unicode.Range16{Lo: 0xd670, Hi: 0xd670, Stride: 0x1},
840 unicode.Range16{Lo: 0xd68c, Hi: 0xd68c, Stride: 0x1},
841 unicode.Range16{Lo: 0xd6a8, Hi: 0xd6a8, Stride: 0x1},
842 unicode.Range16{Lo: 0xd6c4, Hi: 0xd6c4, Stride: 0x1},
843 unicode.Range16{Lo: 0xd6e0, Hi: 0xd6e0, Stride: 0x1},
844 unicode.Range16{Lo: 0xd6fc, Hi: 0xd6fc, Stride: 0x1},
845 unicode.Range16{Lo: 0xd718, Hi: 0xd718, Stride: 0x1},
846 unicode.Range16{Lo: 0xd734, Hi: 0xd734, Stride: 0x1},
847 unicode.Range16{Lo: 0xd750, Hi: 0xd750, Stride: 0x1},
848 unicode.Range16{Lo: 0xd76c, Hi: 0xd76c, Stride: 0x1},
849 unicode.Range16{Lo: 0xd788, Hi: 0xd788, Stride: 0x1},
850 },
851 LatinOffset: 0,
852}
853
854var _GraphemeLVT = &unicode.RangeTable{
855 R16: []unicode.Range16{
856 unicode.Range16{Lo: 0xac01, Hi: 0xac1b, Stride: 0x1},
857 unicode.Range16{Lo: 0xac1d, Hi: 0xac37, Stride: 0x1},
858 unicode.Range16{Lo: 0xac39, Hi: 0xac53, Stride: 0x1},
859 unicode.Range16{Lo: 0xac55, Hi: 0xac6f, Stride: 0x1},
860 unicode.Range16{Lo: 0xac71, Hi: 0xac8b, Stride: 0x1},
861 unicode.Range16{Lo: 0xac8d, Hi: 0xaca7, Stride: 0x1},
862 unicode.Range16{Lo: 0xaca9, Hi: 0xacc3, Stride: 0x1},
863 unicode.Range16{Lo: 0xacc5, Hi: 0xacdf, Stride: 0x1},
864 unicode.Range16{Lo: 0xace1, Hi: 0xacfb, Stride: 0x1},
865 unicode.Range16{Lo: 0xacfd, Hi: 0xad17, Stride: 0x1},
866 unicode.Range16{Lo: 0xad19, Hi: 0xad33, Stride: 0x1},
867 unicode.Range16{Lo: 0xad35, Hi: 0xad4f, Stride: 0x1},
868 unicode.Range16{Lo: 0xad51, Hi: 0xad6b, Stride: 0x1},
869 unicode.Range16{Lo: 0xad6d, Hi: 0xad87, Stride: 0x1},
870 unicode.Range16{Lo: 0xad89, Hi: 0xada3, Stride: 0x1},
871 unicode.Range16{Lo: 0xada5, Hi: 0xadbf, Stride: 0x1},
872 unicode.Range16{Lo: 0xadc1, Hi: 0xaddb, Stride: 0x1},
873 unicode.Range16{Lo: 0xaddd, Hi: 0xadf7, Stride: 0x1},
874 unicode.Range16{Lo: 0xadf9, Hi: 0xae13, Stride: 0x1},
875 unicode.Range16{Lo: 0xae15, Hi: 0xae2f, Stride: 0x1},
876 unicode.Range16{Lo: 0xae31, Hi: 0xae4b, Stride: 0x1},
877 unicode.Range16{Lo: 0xae4d, Hi: 0xae67, Stride: 0x1},
878 unicode.Range16{Lo: 0xae69, Hi: 0xae83, Stride: 0x1},
879 unicode.Range16{Lo: 0xae85, Hi: 0xae9f, Stride: 0x1},
880 unicode.Range16{Lo: 0xaea1, Hi: 0xaebb, Stride: 0x1},
881 unicode.Range16{Lo: 0xaebd, Hi: 0xaed7, Stride: 0x1},
882 unicode.Range16{Lo: 0xaed9, Hi: 0xaef3, Stride: 0x1},
883 unicode.Range16{Lo: 0xaef5, Hi: 0xaf0f, Stride: 0x1},
884 unicode.Range16{Lo: 0xaf11, Hi: 0xaf2b, Stride: 0x1},
885 unicode.Range16{Lo: 0xaf2d, Hi: 0xaf47, Stride: 0x1},
886 unicode.Range16{Lo: 0xaf49, Hi: 0xaf63, Stride: 0x1},
887 unicode.Range16{Lo: 0xaf65, Hi: 0xaf7f, Stride: 0x1},
888 unicode.Range16{Lo: 0xaf81, Hi: 0xaf9b, Stride: 0x1},
889 unicode.Range16{Lo: 0xaf9d, Hi: 0xafb7, Stride: 0x1},
890 unicode.Range16{Lo: 0xafb9, Hi: 0xafd3, Stride: 0x1},
891 unicode.Range16{Lo: 0xafd5, Hi: 0xafef, Stride: 0x1},
892 unicode.Range16{Lo: 0xaff1, Hi: 0xb00b, Stride: 0x1},
893 unicode.Range16{Lo: 0xb00d, Hi: 0xb027, Stride: 0x1},
894 unicode.Range16{Lo: 0xb029, Hi: 0xb043, Stride: 0x1},
895 unicode.Range16{Lo: 0xb045, Hi: 0xb05f, Stride: 0x1},
896 unicode.Range16{Lo: 0xb061, Hi: 0xb07b, Stride: 0x1},
897 unicode.Range16{Lo: 0xb07d, Hi: 0xb097, Stride: 0x1},
898 unicode.Range16{Lo: 0xb099, Hi: 0xb0b3, Stride: 0x1},
899 unicode.Range16{Lo: 0xb0b5, Hi: 0xb0cf, Stride: 0x1},
900 unicode.Range16{Lo: 0xb0d1, Hi: 0xb0eb, Stride: 0x1},
901 unicode.Range16{Lo: 0xb0ed, Hi: 0xb107, Stride: 0x1},
902 unicode.Range16{Lo: 0xb109, Hi: 0xb123, Stride: 0x1},
903 unicode.Range16{Lo: 0xb125, Hi: 0xb13f, Stride: 0x1},
904 unicode.Range16{Lo: 0xb141, Hi: 0xb15b, Stride: 0x1},
905 unicode.Range16{Lo: 0xb15d, Hi: 0xb177, Stride: 0x1},
906 unicode.Range16{Lo: 0xb179, Hi: 0xb193, Stride: 0x1},
907 unicode.Range16{Lo: 0xb195, Hi: 0xb1af, Stride: 0x1},
908 unicode.Range16{Lo: 0xb1b1, Hi: 0xb1cb, Stride: 0x1},
909 unicode.Range16{Lo: 0xb1cd, Hi: 0xb1e7, Stride: 0x1},
910 unicode.Range16{Lo: 0xb1e9, Hi: 0xb203, Stride: 0x1},
911 unicode.Range16{Lo: 0xb205, Hi: 0xb21f, Stride: 0x1},
912 unicode.Range16{Lo: 0xb221, Hi: 0xb23b, Stride: 0x1},
913 unicode.Range16{Lo: 0xb23d, Hi: 0xb257, Stride: 0x1},
914 unicode.Range16{Lo: 0xb259, Hi: 0xb273, Stride: 0x1},
915 unicode.Range16{Lo: 0xb275, Hi: 0xb28f, Stride: 0x1},
916 unicode.Range16{Lo: 0xb291, Hi: 0xb2ab, Stride: 0x1},
917 unicode.Range16{Lo: 0xb2ad, Hi: 0xb2c7, Stride: 0x1},
918 unicode.Range16{Lo: 0xb2c9, Hi: 0xb2e3, Stride: 0x1},
919 unicode.Range16{Lo: 0xb2e5, Hi: 0xb2ff, Stride: 0x1},
920 unicode.Range16{Lo: 0xb301, Hi: 0xb31b, Stride: 0x1},
921 unicode.Range16{Lo: 0xb31d, Hi: 0xb337, Stride: 0x1},
922 unicode.Range16{Lo: 0xb339, Hi: 0xb353, Stride: 0x1},
923 unicode.Range16{Lo: 0xb355, Hi: 0xb36f, Stride: 0x1},
924 unicode.Range16{Lo: 0xb371, Hi: 0xb38b, Stride: 0x1},
925 unicode.Range16{Lo: 0xb38d, Hi: 0xb3a7, Stride: 0x1},
926 unicode.Range16{Lo: 0xb3a9, Hi: 0xb3c3, Stride: 0x1},
927 unicode.Range16{Lo: 0xb3c5, Hi: 0xb3df, Stride: 0x1},
928 unicode.Range16{Lo: 0xb3e1, Hi: 0xb3fb, Stride: 0x1},
929 unicode.Range16{Lo: 0xb3fd, Hi: 0xb417, Stride: 0x1},
930 unicode.Range16{Lo: 0xb419, Hi: 0xb433, Stride: 0x1},
931 unicode.Range16{Lo: 0xb435, Hi: 0xb44f, Stride: 0x1},
932 unicode.Range16{Lo: 0xb451, Hi: 0xb46b, Stride: 0x1},
933 unicode.Range16{Lo: 0xb46d, Hi: 0xb487, Stride: 0x1},
934 unicode.Range16{Lo: 0xb489, Hi: 0xb4a3, Stride: 0x1},
935 unicode.Range16{Lo: 0xb4a5, Hi: 0xb4bf, Stride: 0x1},
936 unicode.Range16{Lo: 0xb4c1, Hi: 0xb4db, Stride: 0x1},
937 unicode.Range16{Lo: 0xb4dd, Hi: 0xb4f7, Stride: 0x1},
938 unicode.Range16{Lo: 0xb4f9, Hi: 0xb513, Stride: 0x1},
939 unicode.Range16{Lo: 0xb515, Hi: 0xb52f, Stride: 0x1},
940 unicode.Range16{Lo: 0xb531, Hi: 0xb54b, Stride: 0x1},
941 unicode.Range16{Lo: 0xb54d, Hi: 0xb567, Stride: 0x1},
942 unicode.Range16{Lo: 0xb569, Hi: 0xb583, Stride: 0x1},
943 unicode.Range16{Lo: 0xb585, Hi: 0xb59f, Stride: 0x1},
944 unicode.Range16{Lo: 0xb5a1, Hi: 0xb5bb, Stride: 0x1},
945 unicode.Range16{Lo: 0xb5bd, Hi: 0xb5d7, Stride: 0x1},
946 unicode.Range16{Lo: 0xb5d9, Hi: 0xb5f3, Stride: 0x1},
947 unicode.Range16{Lo: 0xb5f5, Hi: 0xb60f, Stride: 0x1},
948 unicode.Range16{Lo: 0xb611, Hi: 0xb62b, Stride: 0x1},
949 unicode.Range16{Lo: 0xb62d, Hi: 0xb647, Stride: 0x1},
950 unicode.Range16{Lo: 0xb649, Hi: 0xb663, Stride: 0x1},
951 unicode.Range16{Lo: 0xb665, Hi: 0xb67f, Stride: 0x1},
952 unicode.Range16{Lo: 0xb681, Hi: 0xb69b, Stride: 0x1},
953 unicode.Range16{Lo: 0xb69d, Hi: 0xb6b7, Stride: 0x1},
954 unicode.Range16{Lo: 0xb6b9, Hi: 0xb6d3, Stride: 0x1},
955 unicode.Range16{Lo: 0xb6d5, Hi: 0xb6ef, Stride: 0x1},
956 unicode.Range16{Lo: 0xb6f1, Hi: 0xb70b, Stride: 0x1},
957 unicode.Range16{Lo: 0xb70d, Hi: 0xb727, Stride: 0x1},
958 unicode.Range16{Lo: 0xb729, Hi: 0xb743, Stride: 0x1},
959 unicode.Range16{Lo: 0xb745, Hi: 0xb75f, Stride: 0x1},
960 unicode.Range16{Lo: 0xb761, Hi: 0xb77b, Stride: 0x1},
961 unicode.Range16{Lo: 0xb77d, Hi: 0xb797, Stride: 0x1},
962 unicode.Range16{Lo: 0xb799, Hi: 0xb7b3, Stride: 0x1},
963 unicode.Range16{Lo: 0xb7b5, Hi: 0xb7cf, Stride: 0x1},
964 unicode.Range16{Lo: 0xb7d1, Hi: 0xb7eb, Stride: 0x1},
965 unicode.Range16{Lo: 0xb7ed, Hi: 0xb807, Stride: 0x1},
966 unicode.Range16{Lo: 0xb809, Hi: 0xb823, Stride: 0x1},
967 unicode.Range16{Lo: 0xb825, Hi: 0xb83f, Stride: 0x1},
968 unicode.Range16{Lo: 0xb841, Hi: 0xb85b, Stride: 0x1},
969 unicode.Range16{Lo: 0xb85d, Hi: 0xb877, Stride: 0x1},
970 unicode.Range16{Lo: 0xb879, Hi: 0xb893, Stride: 0x1},
971 unicode.Range16{Lo: 0xb895, Hi: 0xb8af, Stride: 0x1},
972 unicode.Range16{Lo: 0xb8b1, Hi: 0xb8cb, Stride: 0x1},
973 unicode.Range16{Lo: 0xb8cd, Hi: 0xb8e7, Stride: 0x1},
974 unicode.Range16{Lo: 0xb8e9, Hi: 0xb903, Stride: 0x1},
975 unicode.Range16{Lo: 0xb905, Hi: 0xb91f, Stride: 0x1},
976 unicode.Range16{Lo: 0xb921, Hi: 0xb93b, Stride: 0x1},
977 unicode.Range16{Lo: 0xb93d, Hi: 0xb957, Stride: 0x1},
978 unicode.Range16{Lo: 0xb959, Hi: 0xb973, Stride: 0x1},
979 unicode.Range16{Lo: 0xb975, Hi: 0xb98f, Stride: 0x1},
980 unicode.Range16{Lo: 0xb991, Hi: 0xb9ab, Stride: 0x1},
981 unicode.Range16{Lo: 0xb9ad, Hi: 0xb9c7, Stride: 0x1},
982 unicode.Range16{Lo: 0xb9c9, Hi: 0xb9e3, Stride: 0x1},
983 unicode.Range16{Lo: 0xb9e5, Hi: 0xb9ff, Stride: 0x1},
984 unicode.Range16{Lo: 0xba01, Hi: 0xba1b, Stride: 0x1},
985 unicode.Range16{Lo: 0xba1d, Hi: 0xba37, Stride: 0x1},
986 unicode.Range16{Lo: 0xba39, Hi: 0xba53, Stride: 0x1},
987 unicode.Range16{Lo: 0xba55, Hi: 0xba6f, Stride: 0x1},
988 unicode.Range16{Lo: 0xba71, Hi: 0xba8b, Stride: 0x1},
989 unicode.Range16{Lo: 0xba8d, Hi: 0xbaa7, Stride: 0x1},
990 unicode.Range16{Lo: 0xbaa9, Hi: 0xbac3, Stride: 0x1},
991 unicode.Range16{Lo: 0xbac5, Hi: 0xbadf, Stride: 0x1},
992 unicode.Range16{Lo: 0xbae1, Hi: 0xbafb, Stride: 0x1},
993 unicode.Range16{Lo: 0xbafd, Hi: 0xbb17, Stride: 0x1},
994 unicode.Range16{Lo: 0xbb19, Hi: 0xbb33, Stride: 0x1},
995 unicode.Range16{Lo: 0xbb35, Hi: 0xbb4f, Stride: 0x1},
996 unicode.Range16{Lo: 0xbb51, Hi: 0xbb6b, Stride: 0x1},
997 unicode.Range16{Lo: 0xbb6d, Hi: 0xbb87, Stride: 0x1},
998 unicode.Range16{Lo: 0xbb89, Hi: 0xbba3, Stride: 0x1},
999 unicode.Range16{Lo: 0xbba5, Hi: 0xbbbf, Stride: 0x1},
1000 unicode.Range16{Lo: 0xbbc1, Hi: 0xbbdb, Stride: 0x1},
1001 unicode.Range16{Lo: 0xbbdd, Hi: 0xbbf7, Stride: 0x1},
1002 unicode.Range16{Lo: 0xbbf9, Hi: 0xbc13, Stride: 0x1},
1003 unicode.Range16{Lo: 0xbc15, Hi: 0xbc2f, Stride: 0x1},
1004 unicode.Range16{Lo: 0xbc31, Hi: 0xbc4b, Stride: 0x1},
1005 unicode.Range16{Lo: 0xbc4d, Hi: 0xbc67, Stride: 0x1},
1006 unicode.Range16{Lo: 0xbc69, Hi: 0xbc83, Stride: 0x1},
1007 unicode.Range16{Lo: 0xbc85, Hi: 0xbc9f, Stride: 0x1},
1008 unicode.Range16{Lo: 0xbca1, Hi: 0xbcbb, Stride: 0x1},
1009 unicode.Range16{Lo: 0xbcbd, Hi: 0xbcd7, Stride: 0x1},
1010 unicode.Range16{Lo: 0xbcd9, Hi: 0xbcf3, Stride: 0x1},
1011 unicode.Range16{Lo: 0xbcf5, Hi: 0xbd0f, Stride: 0x1},
1012 unicode.Range16{Lo: 0xbd11, Hi: 0xbd2b, Stride: 0x1},
1013 unicode.Range16{Lo: 0xbd2d, Hi: 0xbd47, Stride: 0x1},
1014 unicode.Range16{Lo: 0xbd49, Hi: 0xbd63, Stride: 0x1},
1015 unicode.Range16{Lo: 0xbd65, Hi: 0xbd7f, Stride: 0x1},
1016 unicode.Range16{Lo: 0xbd81, Hi: 0xbd9b, Stride: 0x1},
1017 unicode.Range16{Lo: 0xbd9d, Hi: 0xbdb7, Stride: 0x1},
1018 unicode.Range16{Lo: 0xbdb9, Hi: 0xbdd3, Stride: 0x1},
1019 unicode.Range16{Lo: 0xbdd5, Hi: 0xbdef, Stride: 0x1},
1020 unicode.Range16{Lo: 0xbdf1, Hi: 0xbe0b, Stride: 0x1},
1021 unicode.Range16{Lo: 0xbe0d, Hi: 0xbe27, Stride: 0x1},
1022 unicode.Range16{Lo: 0xbe29, Hi: 0xbe43, Stride: 0x1},
1023 unicode.Range16{Lo: 0xbe45, Hi: 0xbe5f, Stride: 0x1},
1024 unicode.Range16{Lo: 0xbe61, Hi: 0xbe7b, Stride: 0x1},
1025 unicode.Range16{Lo: 0xbe7d, Hi: 0xbe97, Stride: 0x1},
1026 unicode.Range16{Lo: 0xbe99, Hi: 0xbeb3, Stride: 0x1},
1027 unicode.Range16{Lo: 0xbeb5, Hi: 0xbecf, Stride: 0x1},
1028 unicode.Range16{Lo: 0xbed1, Hi: 0xbeeb, Stride: 0x1},
1029 unicode.Range16{Lo: 0xbeed, Hi: 0xbf07, Stride: 0x1},
1030 unicode.Range16{Lo: 0xbf09, Hi: 0xbf23, Stride: 0x1},
1031 unicode.Range16{Lo: 0xbf25, Hi: 0xbf3f, Stride: 0x1},
1032 unicode.Range16{Lo: 0xbf41, Hi: 0xbf5b, Stride: 0x1},
1033 unicode.Range16{Lo: 0xbf5d, Hi: 0xbf77, Stride: 0x1},
1034 unicode.Range16{Lo: 0xbf79, Hi: 0xbf93, Stride: 0x1},
1035 unicode.Range16{Lo: 0xbf95, Hi: 0xbfaf, Stride: 0x1},
1036 unicode.Range16{Lo: 0xbfb1, Hi: 0xbfcb, Stride: 0x1},
1037 unicode.Range16{Lo: 0xbfcd, Hi: 0xbfe7, Stride: 0x1},
1038 unicode.Range16{Lo: 0xbfe9, Hi: 0xc003, Stride: 0x1},
1039 unicode.Range16{Lo: 0xc005, Hi: 0xc01f, Stride: 0x1},
1040 unicode.Range16{Lo: 0xc021, Hi: 0xc03b, Stride: 0x1},
1041 unicode.Range16{Lo: 0xc03d, Hi: 0xc057, Stride: 0x1},
1042 unicode.Range16{Lo: 0xc059, Hi: 0xc073, Stride: 0x1},
1043 unicode.Range16{Lo: 0xc075, Hi: 0xc08f, Stride: 0x1},
1044 unicode.Range16{Lo: 0xc091, Hi: 0xc0ab, Stride: 0x1},
1045 unicode.Range16{Lo: 0xc0ad, Hi: 0xc0c7, Stride: 0x1},
1046 unicode.Range16{Lo: 0xc0c9, Hi: 0xc0e3, Stride: 0x1},
1047 unicode.Range16{Lo: 0xc0e5, Hi: 0xc0ff, Stride: 0x1},
1048 unicode.Range16{Lo: 0xc101, Hi: 0xc11b, Stride: 0x1},
1049 unicode.Range16{Lo: 0xc11d, Hi: 0xc137, Stride: 0x1},
1050 unicode.Range16{Lo: 0xc139, Hi: 0xc153, Stride: 0x1},
1051 unicode.Range16{Lo: 0xc155, Hi: 0xc16f, Stride: 0x1},
1052 unicode.Range16{Lo: 0xc171, Hi: 0xc18b, Stride: 0x1},
1053 unicode.Range16{Lo: 0xc18d, Hi: 0xc1a7, Stride: 0x1},
1054 unicode.Range16{Lo: 0xc1a9, Hi: 0xc1c3, Stride: 0x1},
1055 unicode.Range16{Lo: 0xc1c5, Hi: 0xc1df, Stride: 0x1},
1056 unicode.Range16{Lo: 0xc1e1, Hi: 0xc1fb, Stride: 0x1},
1057 unicode.Range16{Lo: 0xc1fd, Hi: 0xc217, Stride: 0x1},
1058 unicode.Range16{Lo: 0xc219, Hi: 0xc233, Stride: 0x1},
1059 unicode.Range16{Lo: 0xc235, Hi: 0xc24f, Stride: 0x1},
1060 unicode.Range16{Lo: 0xc251, Hi: 0xc26b, Stride: 0x1},
1061 unicode.Range16{Lo: 0xc26d, Hi: 0xc287, Stride: 0x1},
1062 unicode.Range16{Lo: 0xc289, Hi: 0xc2a3, Stride: 0x1},
1063 unicode.Range16{Lo: 0xc2a5, Hi: 0xc2bf, Stride: 0x1},
1064 unicode.Range16{Lo: 0xc2c1, Hi: 0xc2db, Stride: 0x1},
1065 unicode.Range16{Lo: 0xc2dd, Hi: 0xc2f7, Stride: 0x1},
1066 unicode.Range16{Lo: 0xc2f9, Hi: 0xc313, Stride: 0x1},
1067 unicode.Range16{Lo: 0xc315, Hi: 0xc32f, Stride: 0x1},
1068 unicode.Range16{Lo: 0xc331, Hi: 0xc34b, Stride: 0x1},
1069 unicode.Range16{Lo: 0xc34d, Hi: 0xc367, Stride: 0x1},
1070 unicode.Range16{Lo: 0xc369, Hi: 0xc383, Stride: 0x1},
1071 unicode.Range16{Lo: 0xc385, Hi: 0xc39f, Stride: 0x1},
1072 unicode.Range16{Lo: 0xc3a1, Hi: 0xc3bb, Stride: 0x1},
1073 unicode.Range16{Lo: 0xc3bd, Hi: 0xc3d7, Stride: 0x1},
1074 unicode.Range16{Lo: 0xc3d9, Hi: 0xc3f3, Stride: 0x1},
1075 unicode.Range16{Lo: 0xc3f5, Hi: 0xc40f, Stride: 0x1},
1076 unicode.Range16{Lo: 0xc411, Hi: 0xc42b, Stride: 0x1},
1077 unicode.Range16{Lo: 0xc42d, Hi: 0xc447, Stride: 0x1},
1078 unicode.Range16{Lo: 0xc449, Hi: 0xc463, Stride: 0x1},
1079 unicode.Range16{Lo: 0xc465, Hi: 0xc47f, Stride: 0x1},
1080 unicode.Range16{Lo: 0xc481, Hi: 0xc49b, Stride: 0x1},
1081 unicode.Range16{Lo: 0xc49d, Hi: 0xc4b7, Stride: 0x1},
1082 unicode.Range16{Lo: 0xc4b9, Hi: 0xc4d3, Stride: 0x1},
1083 unicode.Range16{Lo: 0xc4d5, Hi: 0xc4ef, Stride: 0x1},
1084 unicode.Range16{Lo: 0xc4f1, Hi: 0xc50b, Stride: 0x1},
1085 unicode.Range16{Lo: 0xc50d, Hi: 0xc527, Stride: 0x1},
1086 unicode.Range16{Lo: 0xc529, Hi: 0xc543, Stride: 0x1},
1087 unicode.Range16{Lo: 0xc545, Hi: 0xc55f, Stride: 0x1},
1088 unicode.Range16{Lo: 0xc561, Hi: 0xc57b, Stride: 0x1},
1089 unicode.Range16{Lo: 0xc57d, Hi: 0xc597, Stride: 0x1},
1090 unicode.Range16{Lo: 0xc599, Hi: 0xc5b3, Stride: 0x1},
1091 unicode.Range16{Lo: 0xc5b5, Hi: 0xc5cf, Stride: 0x1},
1092 unicode.Range16{Lo: 0xc5d1, Hi: 0xc5eb, Stride: 0x1},
1093 unicode.Range16{Lo: 0xc5ed, Hi: 0xc607, Stride: 0x1},
1094 unicode.Range16{Lo: 0xc609, Hi: 0xc623, Stride: 0x1},
1095 unicode.Range16{Lo: 0xc625, Hi: 0xc63f, Stride: 0x1},
1096 unicode.Range16{Lo: 0xc641, Hi: 0xc65b, Stride: 0x1},
1097 unicode.Range16{Lo: 0xc65d, Hi: 0xc677, Stride: 0x1},
1098 unicode.Range16{Lo: 0xc679, Hi: 0xc693, Stride: 0x1},
1099 unicode.Range16{Lo: 0xc695, Hi: 0xc6af, Stride: 0x1},
1100 unicode.Range16{Lo: 0xc6b1, Hi: 0xc6cb, Stride: 0x1},
1101 unicode.Range16{Lo: 0xc6cd, Hi: 0xc6e7, Stride: 0x1},
1102 unicode.Range16{Lo: 0xc6e9, Hi: 0xc703, Stride: 0x1},
1103 unicode.Range16{Lo: 0xc705, Hi: 0xc71f, Stride: 0x1},
1104 unicode.Range16{Lo: 0xc721, Hi: 0xc73b, Stride: 0x1},
1105 unicode.Range16{Lo: 0xc73d, Hi: 0xc757, Stride: 0x1},
1106 unicode.Range16{Lo: 0xc759, Hi: 0xc773, Stride: 0x1},
1107 unicode.Range16{Lo: 0xc775, Hi: 0xc78f, Stride: 0x1},
1108 unicode.Range16{Lo: 0xc791, Hi: 0xc7ab, Stride: 0x1},
1109 unicode.Range16{Lo: 0xc7ad, Hi: 0xc7c7, Stride: 0x1},
1110 unicode.Range16{Lo: 0xc7c9, Hi: 0xc7e3, Stride: 0x1},
1111 unicode.Range16{Lo: 0xc7e5, Hi: 0xc7ff, Stride: 0x1},
1112 unicode.Range16{Lo: 0xc801, Hi: 0xc81b, Stride: 0x1},
1113 unicode.Range16{Lo: 0xc81d, Hi: 0xc837, Stride: 0x1},
1114 unicode.Range16{Lo: 0xc839, Hi: 0xc853, Stride: 0x1},
1115 unicode.Range16{Lo: 0xc855, Hi: 0xc86f, Stride: 0x1},
1116 unicode.Range16{Lo: 0xc871, Hi: 0xc88b, Stride: 0x1},
1117 unicode.Range16{Lo: 0xc88d, Hi: 0xc8a7, Stride: 0x1},
1118 unicode.Range16{Lo: 0xc8a9, Hi: 0xc8c3, Stride: 0x1},
1119 unicode.Range16{Lo: 0xc8c5, Hi: 0xc8df, Stride: 0x1},
1120 unicode.Range16{Lo: 0xc8e1, Hi: 0xc8fb, Stride: 0x1},
1121 unicode.Range16{Lo: 0xc8fd, Hi: 0xc917, Stride: 0x1},
1122 unicode.Range16{Lo: 0xc919, Hi: 0xc933, Stride: 0x1},
1123 unicode.Range16{Lo: 0xc935, Hi: 0xc94f, Stride: 0x1},
1124 unicode.Range16{Lo: 0xc951, Hi: 0xc96b, Stride: 0x1},
1125 unicode.Range16{Lo: 0xc96d, Hi: 0xc987, Stride: 0x1},
1126 unicode.Range16{Lo: 0xc989, Hi: 0xc9a3, Stride: 0x1},
1127 unicode.Range16{Lo: 0xc9a5, Hi: 0xc9bf, Stride: 0x1},
1128 unicode.Range16{Lo: 0xc9c1, Hi: 0xc9db, Stride: 0x1},
1129 unicode.Range16{Lo: 0xc9dd, Hi: 0xc9f7, Stride: 0x1},
1130 unicode.Range16{Lo: 0xc9f9, Hi: 0xca13, Stride: 0x1},
1131 unicode.Range16{Lo: 0xca15, Hi: 0xca2f, Stride: 0x1},
1132 unicode.Range16{Lo: 0xca31, Hi: 0xca4b, Stride: 0x1},
1133 unicode.Range16{Lo: 0xca4d, Hi: 0xca67, Stride: 0x1},
1134 unicode.Range16{Lo: 0xca69, Hi: 0xca83, Stride: 0x1},
1135 unicode.Range16{Lo: 0xca85, Hi: 0xca9f, Stride: 0x1},
1136 unicode.Range16{Lo: 0xcaa1, Hi: 0xcabb, Stride: 0x1},
1137 unicode.Range16{Lo: 0xcabd, Hi: 0xcad7, Stride: 0x1},
1138 unicode.Range16{Lo: 0xcad9, Hi: 0xcaf3, Stride: 0x1},
1139 unicode.Range16{Lo: 0xcaf5, Hi: 0xcb0f, Stride: 0x1},
1140 unicode.Range16{Lo: 0xcb11, Hi: 0xcb2b, Stride: 0x1},
1141 unicode.Range16{Lo: 0xcb2d, Hi: 0xcb47, Stride: 0x1},
1142 unicode.Range16{Lo: 0xcb49, Hi: 0xcb63, Stride: 0x1},
1143 unicode.Range16{Lo: 0xcb65, Hi: 0xcb7f, Stride: 0x1},
1144 unicode.Range16{Lo: 0xcb81, Hi: 0xcb9b, Stride: 0x1},
1145 unicode.Range16{Lo: 0xcb9d, Hi: 0xcbb7, Stride: 0x1},
1146 unicode.Range16{Lo: 0xcbb9, Hi: 0xcbd3, Stride: 0x1},
1147 unicode.Range16{Lo: 0xcbd5, Hi: 0xcbef, Stride: 0x1},
1148 unicode.Range16{Lo: 0xcbf1, Hi: 0xcc0b, Stride: 0x1},
1149 unicode.Range16{Lo: 0xcc0d, Hi: 0xcc27, Stride: 0x1},
1150 unicode.Range16{Lo: 0xcc29, Hi: 0xcc43, Stride: 0x1},
1151 unicode.Range16{Lo: 0xcc45, Hi: 0xcc5f, Stride: 0x1},
1152 unicode.Range16{Lo: 0xcc61, Hi: 0xcc7b, Stride: 0x1},
1153 unicode.Range16{Lo: 0xcc7d, Hi: 0xcc97, Stride: 0x1},
1154 unicode.Range16{Lo: 0xcc99, Hi: 0xccb3, Stride: 0x1},
1155 unicode.Range16{Lo: 0xccb5, Hi: 0xcccf, Stride: 0x1},
1156 unicode.Range16{Lo: 0xccd1, Hi: 0xcceb, Stride: 0x1},
1157 unicode.Range16{Lo: 0xcced, Hi: 0xcd07, Stride: 0x1},
1158 unicode.Range16{Lo: 0xcd09, Hi: 0xcd23, Stride: 0x1},
1159 unicode.Range16{Lo: 0xcd25, Hi: 0xcd3f, Stride: 0x1},
1160 unicode.Range16{Lo: 0xcd41, Hi: 0xcd5b, Stride: 0x1},
1161 unicode.Range16{Lo: 0xcd5d, Hi: 0xcd77, Stride: 0x1},
1162 unicode.Range16{Lo: 0xcd79, Hi: 0xcd93, Stride: 0x1},
1163 unicode.Range16{Lo: 0xcd95, Hi: 0xcdaf, Stride: 0x1},
1164 unicode.Range16{Lo: 0xcdb1, Hi: 0xcdcb, Stride: 0x1},
1165 unicode.Range16{Lo: 0xcdcd, Hi: 0xcde7, Stride: 0x1},
1166 unicode.Range16{Lo: 0xcde9, Hi: 0xce03, Stride: 0x1},
1167 unicode.Range16{Lo: 0xce05, Hi: 0xce1f, Stride: 0x1},
1168 unicode.Range16{Lo: 0xce21, Hi: 0xce3b, Stride: 0x1},
1169 unicode.Range16{Lo: 0xce3d, Hi: 0xce57, Stride: 0x1},
1170 unicode.Range16{Lo: 0xce59, Hi: 0xce73, Stride: 0x1},
1171 unicode.Range16{Lo: 0xce75, Hi: 0xce8f, Stride: 0x1},
1172 unicode.Range16{Lo: 0xce91, Hi: 0xceab, Stride: 0x1},
1173 unicode.Range16{Lo: 0xcead, Hi: 0xcec7, Stride: 0x1},
1174 unicode.Range16{Lo: 0xcec9, Hi: 0xcee3, Stride: 0x1},
1175 unicode.Range16{Lo: 0xcee5, Hi: 0xceff, Stride: 0x1},
1176 unicode.Range16{Lo: 0xcf01, Hi: 0xcf1b, Stride: 0x1},
1177 unicode.Range16{Lo: 0xcf1d, Hi: 0xcf37, Stride: 0x1},
1178 unicode.Range16{Lo: 0xcf39, Hi: 0xcf53, Stride: 0x1},
1179 unicode.Range16{Lo: 0xcf55, Hi: 0xcf6f, Stride: 0x1},
1180 unicode.Range16{Lo: 0xcf71, Hi: 0xcf8b, Stride: 0x1},
1181 unicode.Range16{Lo: 0xcf8d, Hi: 0xcfa7, Stride: 0x1},
1182 unicode.Range16{Lo: 0xcfa9, Hi: 0xcfc3, Stride: 0x1},
1183 unicode.Range16{Lo: 0xcfc5, Hi: 0xcfdf, Stride: 0x1},
1184 unicode.Range16{Lo: 0xcfe1, Hi: 0xcffb, Stride: 0x1},
1185 unicode.Range16{Lo: 0xcffd, Hi: 0xd017, Stride: 0x1},
1186 unicode.Range16{Lo: 0xd019, Hi: 0xd033, Stride: 0x1},
1187 unicode.Range16{Lo: 0xd035, Hi: 0xd04f, Stride: 0x1},
1188 unicode.Range16{Lo: 0xd051, Hi: 0xd06b, Stride: 0x1},
1189 unicode.Range16{Lo: 0xd06d, Hi: 0xd087, Stride: 0x1},
1190 unicode.Range16{Lo: 0xd089, Hi: 0xd0a3, Stride: 0x1},
1191 unicode.Range16{Lo: 0xd0a5, Hi: 0xd0bf, Stride: 0x1},
1192 unicode.Range16{Lo: 0xd0c1, Hi: 0xd0db, Stride: 0x1},
1193 unicode.Range16{Lo: 0xd0dd, Hi: 0xd0f7, Stride: 0x1},
1194 unicode.Range16{Lo: 0xd0f9, Hi: 0xd113, Stride: 0x1},
1195 unicode.Range16{Lo: 0xd115, Hi: 0xd12f, Stride: 0x1},
1196 unicode.Range16{Lo: 0xd131, Hi: 0xd14b, Stride: 0x1},
1197 unicode.Range16{Lo: 0xd14d, Hi: 0xd167, Stride: 0x1},
1198 unicode.Range16{Lo: 0xd169, Hi: 0xd183, Stride: 0x1},
1199 unicode.Range16{Lo: 0xd185, Hi: 0xd19f, Stride: 0x1},
1200 unicode.Range16{Lo: 0xd1a1, Hi: 0xd1bb, Stride: 0x1},
1201 unicode.Range16{Lo: 0xd1bd, Hi: 0xd1d7, Stride: 0x1},
1202 unicode.Range16{Lo: 0xd1d9, Hi: 0xd1f3, Stride: 0x1},
1203 unicode.Range16{Lo: 0xd1f5, Hi: 0xd20f, Stride: 0x1},
1204 unicode.Range16{Lo: 0xd211, Hi: 0xd22b, Stride: 0x1},
1205 unicode.Range16{Lo: 0xd22d, Hi: 0xd247, Stride: 0x1},
1206 unicode.Range16{Lo: 0xd249, Hi: 0xd263, Stride: 0x1},
1207 unicode.Range16{Lo: 0xd265, Hi: 0xd27f, Stride: 0x1},
1208 unicode.Range16{Lo: 0xd281, Hi: 0xd29b, Stride: 0x1},
1209 unicode.Range16{Lo: 0xd29d, Hi: 0xd2b7, Stride: 0x1},
1210 unicode.Range16{Lo: 0xd2b9, Hi: 0xd2d3, Stride: 0x1},
1211 unicode.Range16{Lo: 0xd2d5, Hi: 0xd2ef, Stride: 0x1},
1212 unicode.Range16{Lo: 0xd2f1, Hi: 0xd30b, Stride: 0x1},
1213 unicode.Range16{Lo: 0xd30d, Hi: 0xd327, Stride: 0x1},
1214 unicode.Range16{Lo: 0xd329, Hi: 0xd343, Stride: 0x1},
1215 unicode.Range16{Lo: 0xd345, Hi: 0xd35f, Stride: 0x1},
1216 unicode.Range16{Lo: 0xd361, Hi: 0xd37b, Stride: 0x1},
1217 unicode.Range16{Lo: 0xd37d, Hi: 0xd397, Stride: 0x1},
1218 unicode.Range16{Lo: 0xd399, Hi: 0xd3b3, Stride: 0x1},
1219 unicode.Range16{Lo: 0xd3b5, Hi: 0xd3cf, Stride: 0x1},
1220 unicode.Range16{Lo: 0xd3d1, Hi: 0xd3eb, Stride: 0x1},
1221 unicode.Range16{Lo: 0xd3ed, Hi: 0xd407, Stride: 0x1},
1222 unicode.Range16{Lo: 0xd409, Hi: 0xd423, Stride: 0x1},
1223 unicode.Range16{Lo: 0xd425, Hi: 0xd43f, Stride: 0x1},
1224 unicode.Range16{Lo: 0xd441, Hi: 0xd45b, Stride: 0x1},
1225 unicode.Range16{Lo: 0xd45d, Hi: 0xd477, Stride: 0x1},
1226 unicode.Range16{Lo: 0xd479, Hi: 0xd493, Stride: 0x1},
1227 unicode.Range16{Lo: 0xd495, Hi: 0xd4af, Stride: 0x1},
1228 unicode.Range16{Lo: 0xd4b1, Hi: 0xd4cb, Stride: 0x1},
1229 unicode.Range16{Lo: 0xd4cd, Hi: 0xd4e7, Stride: 0x1},
1230 unicode.Range16{Lo: 0xd4e9, Hi: 0xd503, Stride: 0x1},
1231 unicode.Range16{Lo: 0xd505, Hi: 0xd51f, Stride: 0x1},
1232 unicode.Range16{Lo: 0xd521, Hi: 0xd53b, Stride: 0x1},
1233 unicode.Range16{Lo: 0xd53d, Hi: 0xd557, Stride: 0x1},
1234 unicode.Range16{Lo: 0xd559, Hi: 0xd573, Stride: 0x1},
1235 unicode.Range16{Lo: 0xd575, Hi: 0xd58f, Stride: 0x1},
1236 unicode.Range16{Lo: 0xd591, Hi: 0xd5ab, Stride: 0x1},
1237 unicode.Range16{Lo: 0xd5ad, Hi: 0xd5c7, Stride: 0x1},
1238 unicode.Range16{Lo: 0xd5c9, Hi: 0xd5e3, Stride: 0x1},
1239 unicode.Range16{Lo: 0xd5e5, Hi: 0xd5ff, Stride: 0x1},
1240 unicode.Range16{Lo: 0xd601, Hi: 0xd61b, Stride: 0x1},
1241 unicode.Range16{Lo: 0xd61d, Hi: 0xd637, Stride: 0x1},
1242 unicode.Range16{Lo: 0xd639, Hi: 0xd653, Stride: 0x1},
1243 unicode.Range16{Lo: 0xd655, Hi: 0xd66f, Stride: 0x1},
1244 unicode.Range16{Lo: 0xd671, Hi: 0xd68b, Stride: 0x1},
1245 unicode.Range16{Lo: 0xd68d, Hi: 0xd6a7, Stride: 0x1},
1246 unicode.Range16{Lo: 0xd6a9, Hi: 0xd6c3, Stride: 0x1},
1247 unicode.Range16{Lo: 0xd6c5, Hi: 0xd6df, Stride: 0x1},
1248 unicode.Range16{Lo: 0xd6e1, Hi: 0xd6fb, Stride: 0x1},
1249 unicode.Range16{Lo: 0xd6fd, Hi: 0xd717, Stride: 0x1},
1250 unicode.Range16{Lo: 0xd719, Hi: 0xd733, Stride: 0x1},
1251 unicode.Range16{Lo: 0xd735, Hi: 0xd74f, Stride: 0x1},
1252 unicode.Range16{Lo: 0xd751, Hi: 0xd76b, Stride: 0x1},
1253 unicode.Range16{Lo: 0xd76d, Hi: 0xd787, Stride: 0x1},
1254 unicode.Range16{Lo: 0xd789, Hi: 0xd7a3, Stride: 0x1},
1255 },
1256 LatinOffset: 0,
1257}
1258
1259var _GraphemePrepend = &unicode.RangeTable{
1260 R16: []unicode.Range16{
1261 unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1},
1262 unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1},
1263 unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1},
1264 unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1},
1265 unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1},
1266 },
1267 R32: []unicode.Range32{
1268 unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1},
1269 unicode.Range32{Lo: 0x111c2, Hi: 0x111c3, Stride: 0x1},
1270 },
1271 LatinOffset: 0,
1272}
1273
1274var _GraphemeRegional_Indicator = &unicode.RangeTable{
1275 R32: []unicode.Range32{
1276 unicode.Range32{Lo: 0x1f1e6, Hi: 0x1f1ff, Stride: 0x1},
1277 },
1278 LatinOffset: 0,
1279}
1280
1281var _GraphemeSpacingMark = &unicode.RangeTable{
1282 R16: []unicode.Range16{
1283 unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1},
1284 unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1},
1285 unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1},
1286 unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1},
1287 unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1},
1288 unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1},
1289 unicode.Range16{Lo: 0x9bf, Hi: 0x9c0, Stride: 0x1},
1290 unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1},
1291 unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1},
1292 unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1},
1293 unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1},
1294 unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1},
1295 unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1},
1296 unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1},
1297 unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1},
1298 unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1},
1299 unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1},
1300 unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1},
1301 unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1},
1302 unicode.Range16{Lo: 0xbbf, Hi: 0xbbf, Stride: 0x1},
1303 unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1},
1304 unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1},
1305 unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1},
1306 unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1},
1307 unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1},
1308 unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1},
1309 unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1},
1310 unicode.Range16{Lo: 0xcc0, Hi: 0xcc1, Stride: 0x1},
1311 unicode.Range16{Lo: 0xcc3, Hi: 0xcc4, Stride: 0x1},
1312 unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1},
1313 unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1},
1314 unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1},
1315 unicode.Range16{Lo: 0xd3f, Hi: 0xd40, Stride: 0x1},
1316 unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1},
1317 unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1},
1318 unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1},
1319 unicode.Range16{Lo: 0xdd0, Hi: 0xdd1, Stride: 0x1},
1320 unicode.Range16{Lo: 0xdd8, Hi: 0xdde, Stride: 0x1},
1321 unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1},
1322 unicode.Range16{Lo: 0xe33, Hi: 0xe33, Stride: 0x1},
1323 unicode.Range16{Lo: 0xeb3, Hi: 0xeb3, Stride: 0x1},
1324 unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1},
1325 unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1},
1326 unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1},
1327 unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1},
1328 unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1},
1329 unicode.Range16{Lo: 0x1084, Hi: 0x1084, Stride: 0x1},
1330 unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1},
1331 unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1},
1332 unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1},
1333 unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1},
1334 unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1},
1335 unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1},
1336 unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1},
1337 unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1},
1338 unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1},
1339 unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1},
1340 unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1},
1341 unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1},
1342 unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1},
1343 unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1},
1344 unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1},
1345 unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1},
1346 unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1},
1347 unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1},
1348 unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1},
1349 unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1},
1350 unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1},
1351 unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1},
1352 unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1},
1353 unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1},
1354 unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1},
1355 unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1},
1356 unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1},
1357 unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1},
1358 unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1},
1359 unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1},
1360 unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1},
1361 unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1},
1362 unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1},
1363 unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1},
1364 unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1},
1365 unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1},
1366 unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1},
1367 unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1},
1368 unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1},
1369 unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1},
1370 unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1},
1371 unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1},
1372 unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1},
1373 unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1},
1374 unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1},
1375 unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1},
1376 unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1},
1377 },
1378 R32: []unicode.Range32{
1379 unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1},
1380 unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1},
1381 unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1},
1382 unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1},
1383 unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1},
1384 unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1},
1385 unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1},
1386 unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1},
1387 unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1},
1388 unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1},
1389 unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1},
1390 unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1},
1391 unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1},
1392 unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1},
1393 unicode.Range32{Lo: 0x1133f, Hi: 0x1133f, Stride: 0x1},
1394 unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1},
1395 unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1},
1396 unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1},
1397 unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1},
1398 unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1},
1399 unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1},
1400 unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1},
1401 unicode.Range32{Lo: 0x114b1, Hi: 0x114b2, Stride: 0x1},
1402 unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1},
1403 unicode.Range32{Lo: 0x114bb, Hi: 0x114bc, Stride: 0x1},
1404 unicode.Range32{Lo: 0x114be, Hi: 0x114be, Stride: 0x1},
1405 unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1},
1406 unicode.Range32{Lo: 0x115b0, Hi: 0x115b1, Stride: 0x1},
1407 unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1},
1408 unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1},
1409 unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1},
1410 unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1},
1411 unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1},
1412 unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1},
1413 unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1},
1414 unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1},
1415 unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1},
1416 unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1},
1417 unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1},
1418 unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1},
1419 unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1},
1420 unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1},
1421 unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1},
1422 unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1},
1423 unicode.Range32{Lo: 0x1d166, Hi: 0x1d166, Stride: 0x1},
1424 unicode.Range32{Lo: 0x1d16d, Hi: 0x1d16d, Stride: 0x1},
1425 },
1426 LatinOffset: 0,
1427}
1428
1429var _GraphemeT = &unicode.RangeTable{
1430 R16: []unicode.Range16{
1431 unicode.Range16{Lo: 0x11a8, Hi: 0x11ff, Stride: 0x1},
1432 unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1},
1433 },
1434 LatinOffset: 0,
1435}
1436
1437var _GraphemeV = &unicode.RangeTable{
1438 R16: []unicode.Range16{
1439 unicode.Range16{Lo: 0x1160, Hi: 0x11a7, Stride: 0x1},
1440 unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1},
1441 },
1442 LatinOffset: 0,
1443}
1444
1445var _GraphemeZWJ = &unicode.RangeTable{
1446 R16: []unicode.Range16{
1447 unicode.Range16{Lo: 0x200d, Hi: 0x200d, Stride: 0x1},
1448 },
1449 LatinOffset: 0,
1450}
1451
1452type _GraphemeRuneRange unicode.RangeTable
1453
1454func _GraphemeRuneType(r rune) *_GraphemeRuneRange {
1455 switch {
1456 case unicode.Is(_GraphemeCR, r):
1457 return (*_GraphemeRuneRange)(_GraphemeCR)
1458 case unicode.Is(_GraphemeControl, r):
1459 return (*_GraphemeRuneRange)(_GraphemeControl)
1460 case unicode.Is(_GraphemeE_Base, r):
1461 return (*_GraphemeRuneRange)(_GraphemeE_Base)
1462 case unicode.Is(_GraphemeE_Base_GAZ, r):
1463 return (*_GraphemeRuneRange)(_GraphemeE_Base_GAZ)
1464 case unicode.Is(_GraphemeE_Modifier, r):
1465 return (*_GraphemeRuneRange)(_GraphemeE_Modifier)
1466 case unicode.Is(_GraphemeExtend, r):
1467 return (*_GraphemeRuneRange)(_GraphemeExtend)
1468 case unicode.Is(_GraphemeGlue_After_Zwj, r):
1469 return (*_GraphemeRuneRange)(_GraphemeGlue_After_Zwj)
1470 case unicode.Is(_GraphemeL, r):
1471 return (*_GraphemeRuneRange)(_GraphemeL)
1472 case unicode.Is(_GraphemeLF, r):
1473 return (*_GraphemeRuneRange)(_GraphemeLF)
1474 case unicode.Is(_GraphemeLV, r):
1475 return (*_GraphemeRuneRange)(_GraphemeLV)
1476 case unicode.Is(_GraphemeLVT, r):
1477 return (*_GraphemeRuneRange)(_GraphemeLVT)
1478 case unicode.Is(_GraphemePrepend, r):
1479 return (*_GraphemeRuneRange)(_GraphemePrepend)
1480 case unicode.Is(_GraphemeRegional_Indicator, r):
1481 return (*_GraphemeRuneRange)(_GraphemeRegional_Indicator)
1482 case unicode.Is(_GraphemeSpacingMark, r):
1483 return (*_GraphemeRuneRange)(_GraphemeSpacingMark)
1484 case unicode.Is(_GraphemeT, r):
1485 return (*_GraphemeRuneRange)(_GraphemeT)
1486 case unicode.Is(_GraphemeV, r):
1487 return (*_GraphemeRuneRange)(_GraphemeV)
1488 case unicode.Is(_GraphemeZWJ, r):
1489 return (*_GraphemeRuneRange)(_GraphemeZWJ)
1490 default:
1491 return nil
1492 }
1493}
1494func (rng *_GraphemeRuneRange) String() string {
1495 switch (*unicode.RangeTable)(rng) {
1496 case _GraphemeCR:
1497 return "CR"
1498 case _GraphemeControl:
1499 return "Control"
1500 case _GraphemeE_Base:
1501 return "E_Base"
1502 case _GraphemeE_Base_GAZ:
1503 return "E_Base_GAZ"
1504 case _GraphemeE_Modifier:
1505 return "E_Modifier"
1506 case _GraphemeExtend:
1507 return "Extend"
1508 case _GraphemeGlue_After_Zwj:
1509 return "Glue_After_Zwj"
1510 case _GraphemeL:
1511 return "L"
1512 case _GraphemeLF:
1513 return "LF"
1514 case _GraphemeLV:
1515 return "LV"
1516 case _GraphemeLVT:
1517 return "LVT"
1518 case _GraphemePrepend:
1519 return "Prepend"
1520 case _GraphemeRegional_Indicator:
1521 return "Regional_Indicator"
1522 case _GraphemeSpacingMark:
1523 return "SpacingMark"
1524 case _GraphemeT:
1525 return "T"
1526 case _GraphemeV:
1527 return "V"
1528 case _GraphemeZWJ:
1529 return "ZWJ"
1530 default:
1531 return "Other"
1532 }
1533}
1534
1535var _WordALetter = &unicode.RangeTable{
1536 R16: []unicode.Range16{
1537 unicode.Range16{Lo: 0x41, Hi: 0x5a, Stride: 0x1},
1538 unicode.Range16{Lo: 0x61, Hi: 0x7a, Stride: 0x1},
1539 unicode.Range16{Lo: 0xaa, Hi: 0xaa, Stride: 0x1},
1540 unicode.Range16{Lo: 0xb5, Hi: 0xb5, Stride: 0x1},
1541 unicode.Range16{Lo: 0xba, Hi: 0xba, Stride: 0x1},
1542 unicode.Range16{Lo: 0xc0, Hi: 0xd6, Stride: 0x1},
1543 unicode.Range16{Lo: 0xd8, Hi: 0xf6, Stride: 0x1},
1544 unicode.Range16{Lo: 0xf8, Hi: 0x1ba, Stride: 0x1},
1545 unicode.Range16{Lo: 0x1bb, Hi: 0x1bb, Stride: 0x1},
1546 unicode.Range16{Lo: 0x1bc, Hi: 0x1bf, Stride: 0x1},
1547 unicode.Range16{Lo: 0x1c0, Hi: 0x1c3, Stride: 0x1},
1548 unicode.Range16{Lo: 0x1c4, Hi: 0x293, Stride: 0x1},
1549 unicode.Range16{Lo: 0x294, Hi: 0x294, Stride: 0x1},
1550 unicode.Range16{Lo: 0x295, Hi: 0x2af, Stride: 0x1},
1551 unicode.Range16{Lo: 0x2b0, Hi: 0x2c1, Stride: 0x1},
1552 unicode.Range16{Lo: 0x2c6, Hi: 0x2d1, Stride: 0x1},
1553 unicode.Range16{Lo: 0x2e0, Hi: 0x2e4, Stride: 0x1},
1554 unicode.Range16{Lo: 0x2ec, Hi: 0x2ec, Stride: 0x1},
1555 unicode.Range16{Lo: 0x2ee, Hi: 0x2ee, Stride: 0x1},
1556 unicode.Range16{Lo: 0x370, Hi: 0x373, Stride: 0x1},
1557 unicode.Range16{Lo: 0x374, Hi: 0x374, Stride: 0x1},
1558 unicode.Range16{Lo: 0x376, Hi: 0x377, Stride: 0x1},
1559 unicode.Range16{Lo: 0x37a, Hi: 0x37a, Stride: 0x1},
1560 unicode.Range16{Lo: 0x37b, Hi: 0x37d, Stride: 0x1},
1561 unicode.Range16{Lo: 0x37f, Hi: 0x37f, Stride: 0x1},
1562 unicode.Range16{Lo: 0x386, Hi: 0x386, Stride: 0x1},
1563 unicode.Range16{Lo: 0x388, Hi: 0x38a, Stride: 0x1},
1564 unicode.Range16{Lo: 0x38c, Hi: 0x38c, Stride: 0x1},
1565 unicode.Range16{Lo: 0x38e, Hi: 0x3a1, Stride: 0x1},
1566 unicode.Range16{Lo: 0x3a3, Hi: 0x3f5, Stride: 0x1},
1567 unicode.Range16{Lo: 0x3f7, Hi: 0x481, Stride: 0x1},
1568 unicode.Range16{Lo: 0x48a, Hi: 0x52f, Stride: 0x1},
1569 unicode.Range16{Lo: 0x531, Hi: 0x556, Stride: 0x1},
1570 unicode.Range16{Lo: 0x559, Hi: 0x559, Stride: 0x1},
1571 unicode.Range16{Lo: 0x561, Hi: 0x587, Stride: 0x1},
1572 unicode.Range16{Lo: 0x5f3, Hi: 0x5f3, Stride: 0x1},
1573 unicode.Range16{Lo: 0x620, Hi: 0x63f, Stride: 0x1},
1574 unicode.Range16{Lo: 0x640, Hi: 0x640, Stride: 0x1},
1575 unicode.Range16{Lo: 0x641, Hi: 0x64a, Stride: 0x1},
1576 unicode.Range16{Lo: 0x66e, Hi: 0x66f, Stride: 0x1},
1577 unicode.Range16{Lo: 0x671, Hi: 0x6d3, Stride: 0x1},
1578 unicode.Range16{Lo: 0x6d5, Hi: 0x6d5, Stride: 0x1},
1579 unicode.Range16{Lo: 0x6e5, Hi: 0x6e6, Stride: 0x1},
1580 unicode.Range16{Lo: 0x6ee, Hi: 0x6ef, Stride: 0x1},
1581 unicode.Range16{Lo: 0x6fa, Hi: 0x6fc, Stride: 0x1},
1582 unicode.Range16{Lo: 0x6ff, Hi: 0x6ff, Stride: 0x1},
1583 unicode.Range16{Lo: 0x710, Hi: 0x710, Stride: 0x1},
1584 unicode.Range16{Lo: 0x712, Hi: 0x72f, Stride: 0x1},
1585 unicode.Range16{Lo: 0x74d, Hi: 0x7a5, Stride: 0x1},
1586 unicode.Range16{Lo: 0x7b1, Hi: 0x7b1, Stride: 0x1},
1587 unicode.Range16{Lo: 0x7ca, Hi: 0x7ea, Stride: 0x1},
1588 unicode.Range16{Lo: 0x7f4, Hi: 0x7f5, Stride: 0x1},
1589 unicode.Range16{Lo: 0x7fa, Hi: 0x7fa, Stride: 0x1},
1590 unicode.Range16{Lo: 0x800, Hi: 0x815, Stride: 0x1},
1591 unicode.Range16{Lo: 0x81a, Hi: 0x81a, Stride: 0x1},
1592 unicode.Range16{Lo: 0x824, Hi: 0x824, Stride: 0x1},
1593 unicode.Range16{Lo: 0x828, Hi: 0x828, Stride: 0x1},
1594 unicode.Range16{Lo: 0x840, Hi: 0x858, Stride: 0x1},
1595 unicode.Range16{Lo: 0x8a0, Hi: 0x8b4, Stride: 0x1},
1596 unicode.Range16{Lo: 0x8b6, Hi: 0x8bd, Stride: 0x1},
1597 unicode.Range16{Lo: 0x904, Hi: 0x939, Stride: 0x1},
1598 unicode.Range16{Lo: 0x93d, Hi: 0x93d, Stride: 0x1},
1599 unicode.Range16{Lo: 0x950, Hi: 0x950, Stride: 0x1},
1600 unicode.Range16{Lo: 0x958, Hi: 0x961, Stride: 0x1},
1601 unicode.Range16{Lo: 0x971, Hi: 0x971, Stride: 0x1},
1602 unicode.Range16{Lo: 0x972, Hi: 0x980, Stride: 0x1},
1603 unicode.Range16{Lo: 0x985, Hi: 0x98c, Stride: 0x1},
1604 unicode.Range16{Lo: 0x98f, Hi: 0x990, Stride: 0x1},
1605 unicode.Range16{Lo: 0x993, Hi: 0x9a8, Stride: 0x1},
1606 unicode.Range16{Lo: 0x9aa, Hi: 0x9b0, Stride: 0x1},
1607 unicode.Range16{Lo: 0x9b2, Hi: 0x9b2, Stride: 0x1},
1608 unicode.Range16{Lo: 0x9b6, Hi: 0x9b9, Stride: 0x1},
1609 unicode.Range16{Lo: 0x9bd, Hi: 0x9bd, Stride: 0x1},
1610 unicode.Range16{Lo: 0x9ce, Hi: 0x9ce, Stride: 0x1},
1611 unicode.Range16{Lo: 0x9dc, Hi: 0x9dd, Stride: 0x1},
1612 unicode.Range16{Lo: 0x9df, Hi: 0x9e1, Stride: 0x1},
1613 unicode.Range16{Lo: 0x9f0, Hi: 0x9f1, Stride: 0x1},
1614 unicode.Range16{Lo: 0xa05, Hi: 0xa0a, Stride: 0x1},
1615 unicode.Range16{Lo: 0xa0f, Hi: 0xa10, Stride: 0x1},
1616 unicode.Range16{Lo: 0xa13, Hi: 0xa28, Stride: 0x1},
1617 unicode.Range16{Lo: 0xa2a, Hi: 0xa30, Stride: 0x1},
1618 unicode.Range16{Lo: 0xa32, Hi: 0xa33, Stride: 0x1},
1619 unicode.Range16{Lo: 0xa35, Hi: 0xa36, Stride: 0x1},
1620 unicode.Range16{Lo: 0xa38, Hi: 0xa39, Stride: 0x1},
1621 unicode.Range16{Lo: 0xa59, Hi: 0xa5c, Stride: 0x1},
1622 unicode.Range16{Lo: 0xa5e, Hi: 0xa5e, Stride: 0x1},
1623 unicode.Range16{Lo: 0xa72, Hi: 0xa74, Stride: 0x1},
1624 unicode.Range16{Lo: 0xa85, Hi: 0xa8d, Stride: 0x1},
1625 unicode.Range16{Lo: 0xa8f, Hi: 0xa91, Stride: 0x1},
1626 unicode.Range16{Lo: 0xa93, Hi: 0xaa8, Stride: 0x1},
1627 unicode.Range16{Lo: 0xaaa, Hi: 0xab0, Stride: 0x1},
1628 unicode.Range16{Lo: 0xab2, Hi: 0xab3, Stride: 0x1},
1629 unicode.Range16{Lo: 0xab5, Hi: 0xab9, Stride: 0x1},
1630 unicode.Range16{Lo: 0xabd, Hi: 0xabd, Stride: 0x1},
1631 unicode.Range16{Lo: 0xad0, Hi: 0xad0, Stride: 0x1},
1632 unicode.Range16{Lo: 0xae0, Hi: 0xae1, Stride: 0x1},
1633 unicode.Range16{Lo: 0xaf9, Hi: 0xaf9, Stride: 0x1},
1634 unicode.Range16{Lo: 0xb05, Hi: 0xb0c, Stride: 0x1},
1635 unicode.Range16{Lo: 0xb0f, Hi: 0xb10, Stride: 0x1},
1636 unicode.Range16{Lo: 0xb13, Hi: 0xb28, Stride: 0x1},
1637 unicode.Range16{Lo: 0xb2a, Hi: 0xb30, Stride: 0x1},
1638 unicode.Range16{Lo: 0xb32, Hi: 0xb33, Stride: 0x1},
1639 unicode.Range16{Lo: 0xb35, Hi: 0xb39, Stride: 0x1},
1640 unicode.Range16{Lo: 0xb3d, Hi: 0xb3d, Stride: 0x1},
1641 unicode.Range16{Lo: 0xb5c, Hi: 0xb5d, Stride: 0x1},
1642 unicode.Range16{Lo: 0xb5f, Hi: 0xb61, Stride: 0x1},
1643 unicode.Range16{Lo: 0xb71, Hi: 0xb71, Stride: 0x1},
1644 unicode.Range16{Lo: 0xb83, Hi: 0xb83, Stride: 0x1},
1645 unicode.Range16{Lo: 0xb85, Hi: 0xb8a, Stride: 0x1},
1646 unicode.Range16{Lo: 0xb8e, Hi: 0xb90, Stride: 0x1},
1647 unicode.Range16{Lo: 0xb92, Hi: 0xb95, Stride: 0x1},
1648 unicode.Range16{Lo: 0xb99, Hi: 0xb9a, Stride: 0x1},
1649 unicode.Range16{Lo: 0xb9c, Hi: 0xb9c, Stride: 0x1},
1650 unicode.Range16{Lo: 0xb9e, Hi: 0xb9f, Stride: 0x1},
1651 unicode.Range16{Lo: 0xba3, Hi: 0xba4, Stride: 0x1},
1652 unicode.Range16{Lo: 0xba8, Hi: 0xbaa, Stride: 0x1},
1653 unicode.Range16{Lo: 0xbae, Hi: 0xbb9, Stride: 0x1},
1654 unicode.Range16{Lo: 0xbd0, Hi: 0xbd0, Stride: 0x1},
1655 unicode.Range16{Lo: 0xc05, Hi: 0xc0c, Stride: 0x1},
1656 unicode.Range16{Lo: 0xc0e, Hi: 0xc10, Stride: 0x1},
1657 unicode.Range16{Lo: 0xc12, Hi: 0xc28, Stride: 0x1},
1658 unicode.Range16{Lo: 0xc2a, Hi: 0xc39, Stride: 0x1},
1659 unicode.Range16{Lo: 0xc3d, Hi: 0xc3d, Stride: 0x1},
1660 unicode.Range16{Lo: 0xc58, Hi: 0xc5a, Stride: 0x1},
1661 unicode.Range16{Lo: 0xc60, Hi: 0xc61, Stride: 0x1},
1662 unicode.Range16{Lo: 0xc80, Hi: 0xc80, Stride: 0x1},
1663 unicode.Range16{Lo: 0xc85, Hi: 0xc8c, Stride: 0x1},
1664 unicode.Range16{Lo: 0xc8e, Hi: 0xc90, Stride: 0x1},
1665 unicode.Range16{Lo: 0xc92, Hi: 0xca8, Stride: 0x1},
1666 unicode.Range16{Lo: 0xcaa, Hi: 0xcb3, Stride: 0x1},
1667 unicode.Range16{Lo: 0xcb5, Hi: 0xcb9, Stride: 0x1},
1668 unicode.Range16{Lo: 0xcbd, Hi: 0xcbd, Stride: 0x1},
1669 unicode.Range16{Lo: 0xcde, Hi: 0xcde, Stride: 0x1},
1670 unicode.Range16{Lo: 0xce0, Hi: 0xce1, Stride: 0x1},
1671 unicode.Range16{Lo: 0xcf1, Hi: 0xcf2, Stride: 0x1},
1672 unicode.Range16{Lo: 0xd05, Hi: 0xd0c, Stride: 0x1},
1673 unicode.Range16{Lo: 0xd0e, Hi: 0xd10, Stride: 0x1},
1674 unicode.Range16{Lo: 0xd12, Hi: 0xd3a, Stride: 0x1},
1675 unicode.Range16{Lo: 0xd3d, Hi: 0xd3d, Stride: 0x1},
1676 unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1},
1677 unicode.Range16{Lo: 0xd54, Hi: 0xd56, Stride: 0x1},
1678 unicode.Range16{Lo: 0xd5f, Hi: 0xd61, Stride: 0x1},
1679 unicode.Range16{Lo: 0xd7a, Hi: 0xd7f, Stride: 0x1},
1680 unicode.Range16{Lo: 0xd85, Hi: 0xd96, Stride: 0x1},
1681 unicode.Range16{Lo: 0xd9a, Hi: 0xdb1, Stride: 0x1},
1682 unicode.Range16{Lo: 0xdb3, Hi: 0xdbb, Stride: 0x1},
1683 unicode.Range16{Lo: 0xdbd, Hi: 0xdbd, Stride: 0x1},
1684 unicode.Range16{Lo: 0xdc0, Hi: 0xdc6, Stride: 0x1},
1685 unicode.Range16{Lo: 0xf00, Hi: 0xf00, Stride: 0x1},
1686 unicode.Range16{Lo: 0xf40, Hi: 0xf47, Stride: 0x1},
1687 unicode.Range16{Lo: 0xf49, Hi: 0xf6c, Stride: 0x1},
1688 unicode.Range16{Lo: 0xf88, Hi: 0xf8c, Stride: 0x1},
1689 unicode.Range16{Lo: 0x10a0, Hi: 0x10c5, Stride: 0x1},
1690 unicode.Range16{Lo: 0x10c7, Hi: 0x10c7, Stride: 0x1},
1691 unicode.Range16{Lo: 0x10cd, Hi: 0x10cd, Stride: 0x1},
1692 unicode.Range16{Lo: 0x10d0, Hi: 0x10fa, Stride: 0x1},
1693 unicode.Range16{Lo: 0x10fc, Hi: 0x10fc, Stride: 0x1},
1694 unicode.Range16{Lo: 0x10fd, Hi: 0x1248, Stride: 0x1},
1695 unicode.Range16{Lo: 0x124a, Hi: 0x124d, Stride: 0x1},
1696 unicode.Range16{Lo: 0x1250, Hi: 0x1256, Stride: 0x1},
1697 unicode.Range16{Lo: 0x1258, Hi: 0x1258, Stride: 0x1},
1698 unicode.Range16{Lo: 0x125a, Hi: 0x125d, Stride: 0x1},
1699 unicode.Range16{Lo: 0x1260, Hi: 0x1288, Stride: 0x1},
1700 unicode.Range16{Lo: 0x128a, Hi: 0x128d, Stride: 0x1},
1701 unicode.Range16{Lo: 0x1290, Hi: 0x12b0, Stride: 0x1},
1702 unicode.Range16{Lo: 0x12b2, Hi: 0x12b5, Stride: 0x1},
1703 unicode.Range16{Lo: 0x12b8, Hi: 0x12be, Stride: 0x1},
1704 unicode.Range16{Lo: 0x12c0, Hi: 0x12c0, Stride: 0x1},
1705 unicode.Range16{Lo: 0x12c2, Hi: 0x12c5, Stride: 0x1},
1706 unicode.Range16{Lo: 0x12c8, Hi: 0x12d6, Stride: 0x1},
1707 unicode.Range16{Lo: 0x12d8, Hi: 0x1310, Stride: 0x1},
1708 unicode.Range16{Lo: 0x1312, Hi: 0x1315, Stride: 0x1},
1709 unicode.Range16{Lo: 0x1318, Hi: 0x135a, Stride: 0x1},
1710 unicode.Range16{Lo: 0x1380, Hi: 0x138f, Stride: 0x1},
1711 unicode.Range16{Lo: 0x13a0, Hi: 0x13f5, Stride: 0x1},
1712 unicode.Range16{Lo: 0x13f8, Hi: 0x13fd, Stride: 0x1},
1713 unicode.Range16{Lo: 0x1401, Hi: 0x166c, Stride: 0x1},
1714 unicode.Range16{Lo: 0x166f, Hi: 0x167f, Stride: 0x1},
1715 unicode.Range16{Lo: 0x1681, Hi: 0x169a, Stride: 0x1},
1716 unicode.Range16{Lo: 0x16a0, Hi: 0x16ea, Stride: 0x1},
1717 unicode.Range16{Lo: 0x16ee, Hi: 0x16f0, Stride: 0x1},
1718 unicode.Range16{Lo: 0x16f1, Hi: 0x16f8, Stride: 0x1},
1719 unicode.Range16{Lo: 0x1700, Hi: 0x170c, Stride: 0x1},
1720 unicode.Range16{Lo: 0x170e, Hi: 0x1711, Stride: 0x1},
1721 unicode.Range16{Lo: 0x1720, Hi: 0x1731, Stride: 0x1},
1722 unicode.Range16{Lo: 0x1740, Hi: 0x1751, Stride: 0x1},
1723 unicode.Range16{Lo: 0x1760, Hi: 0x176c, Stride: 0x1},
1724 unicode.Range16{Lo: 0x176e, Hi: 0x1770, Stride: 0x1},
1725 unicode.Range16{Lo: 0x1820, Hi: 0x1842, Stride: 0x1},
1726 unicode.Range16{Lo: 0x1843, Hi: 0x1843, Stride: 0x1},
1727 unicode.Range16{Lo: 0x1844, Hi: 0x1877, Stride: 0x1},
1728 unicode.Range16{Lo: 0x1880, Hi: 0x1884, Stride: 0x1},
1729 unicode.Range16{Lo: 0x1887, Hi: 0x18a8, Stride: 0x1},
1730 unicode.Range16{Lo: 0x18aa, Hi: 0x18aa, Stride: 0x1},
1731 unicode.Range16{Lo: 0x18b0, Hi: 0x18f5, Stride: 0x1},
1732 unicode.Range16{Lo: 0x1900, Hi: 0x191e, Stride: 0x1},
1733 unicode.Range16{Lo: 0x1a00, Hi: 0x1a16, Stride: 0x1},
1734 unicode.Range16{Lo: 0x1b05, Hi: 0x1b33, Stride: 0x1},
1735 unicode.Range16{Lo: 0x1b45, Hi: 0x1b4b, Stride: 0x1},
1736 unicode.Range16{Lo: 0x1b83, Hi: 0x1ba0, Stride: 0x1},
1737 unicode.Range16{Lo: 0x1bae, Hi: 0x1baf, Stride: 0x1},
1738 unicode.Range16{Lo: 0x1bba, Hi: 0x1be5, Stride: 0x1},
1739 unicode.Range16{Lo: 0x1c00, Hi: 0x1c23, Stride: 0x1},
1740 unicode.Range16{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 0x1},
1741 unicode.Range16{Lo: 0x1c5a, Hi: 0x1c77, Stride: 0x1},
1742 unicode.Range16{Lo: 0x1c78, Hi: 0x1c7d, Stride: 0x1},
1743 unicode.Range16{Lo: 0x1c80, Hi: 0x1c88, Stride: 0x1},
1744 unicode.Range16{Lo: 0x1ce9, Hi: 0x1cec, Stride: 0x1},
1745 unicode.Range16{Lo: 0x1cee, Hi: 0x1cf1, Stride: 0x1},
1746 unicode.Range16{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 0x1},
1747 unicode.Range16{Lo: 0x1d00, Hi: 0x1d2b, Stride: 0x1},
1748 unicode.Range16{Lo: 0x1d2c, Hi: 0x1d6a, Stride: 0x1},
1749 unicode.Range16{Lo: 0x1d6b, Hi: 0x1d77, Stride: 0x1},
1750 unicode.Range16{Lo: 0x1d78, Hi: 0x1d78, Stride: 0x1},
1751 unicode.Range16{Lo: 0x1d79, Hi: 0x1d9a, Stride: 0x1},
1752 unicode.Range16{Lo: 0x1d9b, Hi: 0x1dbf, Stride: 0x1},
1753 unicode.Range16{Lo: 0x1e00, Hi: 0x1f15, Stride: 0x1},
1754 unicode.Range16{Lo: 0x1f18, Hi: 0x1f1d, Stride: 0x1},
1755 unicode.Range16{Lo: 0x1f20, Hi: 0x1f45, Stride: 0x1},
1756 unicode.Range16{Lo: 0x1f48, Hi: 0x1f4d, Stride: 0x1},
1757 unicode.Range16{Lo: 0x1f50, Hi: 0x1f57, Stride: 0x1},
1758 unicode.Range16{Lo: 0x1f59, Hi: 0x1f59, Stride: 0x1},
1759 unicode.Range16{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 0x1},
1760 unicode.Range16{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 0x1},
1761 unicode.Range16{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 0x1},
1762 unicode.Range16{Lo: 0x1f80, Hi: 0x1fb4, Stride: 0x1},
1763 unicode.Range16{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 0x1},
1764 unicode.Range16{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 0x1},
1765 unicode.Range16{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 0x1},
1766 unicode.Range16{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 0x1},
1767 unicode.Range16{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 0x1},
1768 unicode.Range16{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 0x1},
1769 unicode.Range16{Lo: 0x1fe0, Hi: 0x1fec, Stride: 0x1},
1770 unicode.Range16{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 0x1},
1771 unicode.Range16{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 0x1},
1772 unicode.Range16{Lo: 0x2071, Hi: 0x2071, Stride: 0x1},
1773 unicode.Range16{Lo: 0x207f, Hi: 0x207f, Stride: 0x1},
1774 unicode.Range16{Lo: 0x2090, Hi: 0x209c, Stride: 0x1},
1775 unicode.Range16{Lo: 0x2102, Hi: 0x2102, Stride: 0x1},
1776 unicode.Range16{Lo: 0x2107, Hi: 0x2107, Stride: 0x1},
1777 unicode.Range16{Lo: 0x210a, Hi: 0x2113, Stride: 0x1},
1778 unicode.Range16{Lo: 0x2115, Hi: 0x2115, Stride: 0x1},
1779 unicode.Range16{Lo: 0x2119, Hi: 0x211d, Stride: 0x1},
1780 unicode.Range16{Lo: 0x2124, Hi: 0x2124, Stride: 0x1},
1781 unicode.Range16{Lo: 0x2126, Hi: 0x2126, Stride: 0x1},
1782 unicode.Range16{Lo: 0x2128, Hi: 0x2128, Stride: 0x1},
1783 unicode.Range16{Lo: 0x212a, Hi: 0x212d, Stride: 0x1},
1784 unicode.Range16{Lo: 0x212f, Hi: 0x2134, Stride: 0x1},
1785 unicode.Range16{Lo: 0x2135, Hi: 0x2138, Stride: 0x1},
1786 unicode.Range16{Lo: 0x2139, Hi: 0x2139, Stride: 0x1},
1787 unicode.Range16{Lo: 0x213c, Hi: 0x213f, Stride: 0x1},
1788 unicode.Range16{Lo: 0x2145, Hi: 0x2149, Stride: 0x1},
1789 unicode.Range16{Lo: 0x214e, Hi: 0x214e, Stride: 0x1},
1790 unicode.Range16{Lo: 0x2160, Hi: 0x2182, Stride: 0x1},
1791 unicode.Range16{Lo: 0x2183, Hi: 0x2184, Stride: 0x1},
1792 unicode.Range16{Lo: 0x2185, Hi: 0x2188, Stride: 0x1},
1793 unicode.Range16{Lo: 0x24b6, Hi: 0x24e9, Stride: 0x1},
1794 unicode.Range16{Lo: 0x2c00, Hi: 0x2c2e, Stride: 0x1},
1795 unicode.Range16{Lo: 0x2c30, Hi: 0x2c5e, Stride: 0x1},
1796 unicode.Range16{Lo: 0x2c60, Hi: 0x2c7b, Stride: 0x1},
1797 unicode.Range16{Lo: 0x2c7c, Hi: 0x2c7d, Stride: 0x1},
1798 unicode.Range16{Lo: 0x2c7e, Hi: 0x2ce4, Stride: 0x1},
1799 unicode.Range16{Lo: 0x2ceb, Hi: 0x2cee, Stride: 0x1},
1800 unicode.Range16{Lo: 0x2cf2, Hi: 0x2cf3, Stride: 0x1},
1801 unicode.Range16{Lo: 0x2d00, Hi: 0x2d25, Stride: 0x1},
1802 unicode.Range16{Lo: 0x2d27, Hi: 0x2d27, Stride: 0x1},
1803 unicode.Range16{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 0x1},
1804 unicode.Range16{Lo: 0x2d30, Hi: 0x2d67, Stride: 0x1},
1805 unicode.Range16{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 0x1},
1806 unicode.Range16{Lo: 0x2d80, Hi: 0x2d96, Stride: 0x1},
1807 unicode.Range16{Lo: 0x2da0, Hi: 0x2da6, Stride: 0x1},
1808 unicode.Range16{Lo: 0x2da8, Hi: 0x2dae, Stride: 0x1},
1809 unicode.Range16{Lo: 0x2db0, Hi: 0x2db6, Stride: 0x1},
1810 unicode.Range16{Lo: 0x2db8, Hi: 0x2dbe, Stride: 0x1},
1811 unicode.Range16{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 0x1},
1812 unicode.Range16{Lo: 0x2dc8, Hi: 0x2dce, Stride: 0x1},
1813 unicode.Range16{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 0x1},
1814 unicode.Range16{Lo: 0x2dd8, Hi: 0x2dde, Stride: 0x1},
1815 unicode.Range16{Lo: 0x2e2f, Hi: 0x2e2f, Stride: 0x1},
1816 unicode.Range16{Lo: 0x3005, Hi: 0x3005, Stride: 0x1},
1817 unicode.Range16{Lo: 0x303b, Hi: 0x303b, Stride: 0x1},
1818 unicode.Range16{Lo: 0x303c, Hi: 0x303c, Stride: 0x1},
1819 unicode.Range16{Lo: 0x3105, Hi: 0x312d, Stride: 0x1},
1820 unicode.Range16{Lo: 0x3131, Hi: 0x318e, Stride: 0x1},
1821 unicode.Range16{Lo: 0x31a0, Hi: 0x31ba, Stride: 0x1},
1822 unicode.Range16{Lo: 0xa000, Hi: 0xa014, Stride: 0x1},
1823 unicode.Range16{Lo: 0xa015, Hi: 0xa015, Stride: 0x1},
1824 unicode.Range16{Lo: 0xa016, Hi: 0xa48c, Stride: 0x1},
1825 unicode.Range16{Lo: 0xa4d0, Hi: 0xa4f7, Stride: 0x1},
1826 unicode.Range16{Lo: 0xa4f8, Hi: 0xa4fd, Stride: 0x1},
1827 unicode.Range16{Lo: 0xa500, Hi: 0xa60b, Stride: 0x1},
1828 unicode.Range16{Lo: 0xa60c, Hi: 0xa60c, Stride: 0x1},
1829 unicode.Range16{Lo: 0xa610, Hi: 0xa61f, Stride: 0x1},
1830 unicode.Range16{Lo: 0xa62a, Hi: 0xa62b, Stride: 0x1},
1831 unicode.Range16{Lo: 0xa640, Hi: 0xa66d, Stride: 0x1},
1832 unicode.Range16{Lo: 0xa66e, Hi: 0xa66e, Stride: 0x1},
1833 unicode.Range16{Lo: 0xa67f, Hi: 0xa67f, Stride: 0x1},
1834 unicode.Range16{Lo: 0xa680, Hi: 0xa69b, Stride: 0x1},
1835 unicode.Range16{Lo: 0xa69c, Hi: 0xa69d, Stride: 0x1},
1836 unicode.Range16{Lo: 0xa6a0, Hi: 0xa6e5, Stride: 0x1},
1837 unicode.Range16{Lo: 0xa6e6, Hi: 0xa6ef, Stride: 0x1},
1838 unicode.Range16{Lo: 0xa717, Hi: 0xa71f, Stride: 0x1},
1839 unicode.Range16{Lo: 0xa722, Hi: 0xa76f, Stride: 0x1},
1840 unicode.Range16{Lo: 0xa770, Hi: 0xa770, Stride: 0x1},
1841 unicode.Range16{Lo: 0xa771, Hi: 0xa787, Stride: 0x1},
1842 unicode.Range16{Lo: 0xa788, Hi: 0xa788, Stride: 0x1},
1843 unicode.Range16{Lo: 0xa78b, Hi: 0xa78e, Stride: 0x1},
1844 unicode.Range16{Lo: 0xa78f, Hi: 0xa78f, Stride: 0x1},
1845 unicode.Range16{Lo: 0xa790, Hi: 0xa7ae, Stride: 0x1},
1846 unicode.Range16{Lo: 0xa7b0, Hi: 0xa7b7, Stride: 0x1},
1847 unicode.Range16{Lo: 0xa7f7, Hi: 0xa7f7, Stride: 0x1},
1848 unicode.Range16{Lo: 0xa7f8, Hi: 0xa7f9, Stride: 0x1},
1849 unicode.Range16{Lo: 0xa7fa, Hi: 0xa7fa, Stride: 0x1},
1850 unicode.Range16{Lo: 0xa7fb, Hi: 0xa801, Stride: 0x1},
1851 unicode.Range16{Lo: 0xa803, Hi: 0xa805, Stride: 0x1},
1852 unicode.Range16{Lo: 0xa807, Hi: 0xa80a, Stride: 0x1},
1853 unicode.Range16{Lo: 0xa80c, Hi: 0xa822, Stride: 0x1},
1854 unicode.Range16{Lo: 0xa840, Hi: 0xa873, Stride: 0x1},
1855 unicode.Range16{Lo: 0xa882, Hi: 0xa8b3, Stride: 0x1},
1856 unicode.Range16{Lo: 0xa8f2, Hi: 0xa8f7, Stride: 0x1},
1857 unicode.Range16{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 0x1},
1858 unicode.Range16{Lo: 0xa8fd, Hi: 0xa8fd, Stride: 0x1},
1859 unicode.Range16{Lo: 0xa90a, Hi: 0xa925, Stride: 0x1},
1860 unicode.Range16{Lo: 0xa930, Hi: 0xa946, Stride: 0x1},
1861 unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1},
1862 unicode.Range16{Lo: 0xa984, Hi: 0xa9b2, Stride: 0x1},
1863 unicode.Range16{Lo: 0xa9cf, Hi: 0xa9cf, Stride: 0x1},
1864 unicode.Range16{Lo: 0xaa00, Hi: 0xaa28, Stride: 0x1},
1865 unicode.Range16{Lo: 0xaa40, Hi: 0xaa42, Stride: 0x1},
1866 unicode.Range16{Lo: 0xaa44, Hi: 0xaa4b, Stride: 0x1},
1867 unicode.Range16{Lo: 0xaae0, Hi: 0xaaea, Stride: 0x1},
1868 unicode.Range16{Lo: 0xaaf2, Hi: 0xaaf2, Stride: 0x1},
1869 unicode.Range16{Lo: 0xaaf3, Hi: 0xaaf4, Stride: 0x1},
1870 unicode.Range16{Lo: 0xab01, Hi: 0xab06, Stride: 0x1},
1871 unicode.Range16{Lo: 0xab09, Hi: 0xab0e, Stride: 0x1},
1872 unicode.Range16{Lo: 0xab11, Hi: 0xab16, Stride: 0x1},
1873 unicode.Range16{Lo: 0xab20, Hi: 0xab26, Stride: 0x1},
1874 unicode.Range16{Lo: 0xab28, Hi: 0xab2e, Stride: 0x1},
1875 unicode.Range16{Lo: 0xab30, Hi: 0xab5a, Stride: 0x1},
1876 unicode.Range16{Lo: 0xab5c, Hi: 0xab5f, Stride: 0x1},
1877 unicode.Range16{Lo: 0xab60, Hi: 0xab65, Stride: 0x1},
1878 unicode.Range16{Lo: 0xab70, Hi: 0xabbf, Stride: 0x1},
1879 unicode.Range16{Lo: 0xabc0, Hi: 0xabe2, Stride: 0x1},
1880 unicode.Range16{Lo: 0xac00, Hi: 0xd7a3, Stride: 0x1},
1881 unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1},
1882 unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1},
1883 unicode.Range16{Lo: 0xfb00, Hi: 0xfb06, Stride: 0x1},
1884 unicode.Range16{Lo: 0xfb13, Hi: 0xfb17, Stride: 0x1},
1885 unicode.Range16{Lo: 0xfb50, Hi: 0xfbb1, Stride: 0x1},
1886 unicode.Range16{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 0x1},
1887 unicode.Range16{Lo: 0xfd50, Hi: 0xfd8f, Stride: 0x1},
1888 unicode.Range16{Lo: 0xfd92, Hi: 0xfdc7, Stride: 0x1},
1889 unicode.Range16{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 0x1},
1890 unicode.Range16{Lo: 0xfe70, Hi: 0xfe74, Stride: 0x1},
1891 unicode.Range16{Lo: 0xfe76, Hi: 0xfefc, Stride: 0x1},
1892 unicode.Range16{Lo: 0xff21, Hi: 0xff3a, Stride: 0x1},
1893 unicode.Range16{Lo: 0xff41, Hi: 0xff5a, Stride: 0x1},
1894 unicode.Range16{Lo: 0xffa0, Hi: 0xffbe, Stride: 0x1},
1895 unicode.Range16{Lo: 0xffc2, Hi: 0xffc7, Stride: 0x1},
1896 unicode.Range16{Lo: 0xffca, Hi: 0xffcf, Stride: 0x1},
1897 unicode.Range16{Lo: 0xffd2, Hi: 0xffd7, Stride: 0x1},
1898 unicode.Range16{Lo: 0xffda, Hi: 0xffdc, Stride: 0x1},
1899 },
1900 R32: []unicode.Range32{
1901 unicode.Range32{Lo: 0x10000, Hi: 0x1000b, Stride: 0x1},
1902 unicode.Range32{Lo: 0x1000d, Hi: 0x10026, Stride: 0x1},
1903 unicode.Range32{Lo: 0x10028, Hi: 0x1003a, Stride: 0x1},
1904 unicode.Range32{Lo: 0x1003c, Hi: 0x1003d, Stride: 0x1},
1905 unicode.Range32{Lo: 0x1003f, Hi: 0x1004d, Stride: 0x1},
1906 unicode.Range32{Lo: 0x10050, Hi: 0x1005d, Stride: 0x1},
1907 unicode.Range32{Lo: 0x10080, Hi: 0x100fa, Stride: 0x1},
1908 unicode.Range32{Lo: 0x10140, Hi: 0x10174, Stride: 0x1},
1909 unicode.Range32{Lo: 0x10280, Hi: 0x1029c, Stride: 0x1},
1910 unicode.Range32{Lo: 0x102a0, Hi: 0x102d0, Stride: 0x1},
1911 unicode.Range32{Lo: 0x10300, Hi: 0x1031f, Stride: 0x1},
1912 unicode.Range32{Lo: 0x10330, Hi: 0x10340, Stride: 0x1},
1913 unicode.Range32{Lo: 0x10341, Hi: 0x10341, Stride: 0x1},
1914 unicode.Range32{Lo: 0x10342, Hi: 0x10349, Stride: 0x1},
1915 unicode.Range32{Lo: 0x1034a, Hi: 0x1034a, Stride: 0x1},
1916 unicode.Range32{Lo: 0x10350, Hi: 0x10375, Stride: 0x1},
1917 unicode.Range32{Lo: 0x10380, Hi: 0x1039d, Stride: 0x1},
1918 unicode.Range32{Lo: 0x103a0, Hi: 0x103c3, Stride: 0x1},
1919 unicode.Range32{Lo: 0x103c8, Hi: 0x103cf, Stride: 0x1},
1920 unicode.Range32{Lo: 0x103d1, Hi: 0x103d5, Stride: 0x1},
1921 unicode.Range32{Lo: 0x10400, Hi: 0x1044f, Stride: 0x1},
1922 unicode.Range32{Lo: 0x10450, Hi: 0x1049d, Stride: 0x1},
1923 unicode.Range32{Lo: 0x104b0, Hi: 0x104d3, Stride: 0x1},
1924 unicode.Range32{Lo: 0x104d8, Hi: 0x104fb, Stride: 0x1},
1925 unicode.Range32{Lo: 0x10500, Hi: 0x10527, Stride: 0x1},
1926 unicode.Range32{Lo: 0x10530, Hi: 0x10563, Stride: 0x1},
1927 unicode.Range32{Lo: 0x10600, Hi: 0x10736, Stride: 0x1},
1928 unicode.Range32{Lo: 0x10740, Hi: 0x10755, Stride: 0x1},
1929 unicode.Range32{Lo: 0x10760, Hi: 0x10767, Stride: 0x1},
1930 unicode.Range32{Lo: 0x10800, Hi: 0x10805, Stride: 0x1},
1931 unicode.Range32{Lo: 0x10808, Hi: 0x10808, Stride: 0x1},
1932 unicode.Range32{Lo: 0x1080a, Hi: 0x10835, Stride: 0x1},
1933 unicode.Range32{Lo: 0x10837, Hi: 0x10838, Stride: 0x1},
1934 unicode.Range32{Lo: 0x1083c, Hi: 0x1083c, Stride: 0x1},
1935 unicode.Range32{Lo: 0x1083f, Hi: 0x10855, Stride: 0x1},
1936 unicode.Range32{Lo: 0x10860, Hi: 0x10876, Stride: 0x1},
1937 unicode.Range32{Lo: 0x10880, Hi: 0x1089e, Stride: 0x1},
1938 unicode.Range32{Lo: 0x108e0, Hi: 0x108f2, Stride: 0x1},
1939 unicode.Range32{Lo: 0x108f4, Hi: 0x108f5, Stride: 0x1},
1940 unicode.Range32{Lo: 0x10900, Hi: 0x10915, Stride: 0x1},
1941 unicode.Range32{Lo: 0x10920, Hi: 0x10939, Stride: 0x1},
1942 unicode.Range32{Lo: 0x10980, Hi: 0x109b7, Stride: 0x1},
1943 unicode.Range32{Lo: 0x109be, Hi: 0x109bf, Stride: 0x1},
1944 unicode.Range32{Lo: 0x10a00, Hi: 0x10a00, Stride: 0x1},
1945 unicode.Range32{Lo: 0x10a10, Hi: 0x10a13, Stride: 0x1},
1946 unicode.Range32{Lo: 0x10a15, Hi: 0x10a17, Stride: 0x1},
1947 unicode.Range32{Lo: 0x10a19, Hi: 0x10a33, Stride: 0x1},
1948 unicode.Range32{Lo: 0x10a60, Hi: 0x10a7c, Stride: 0x1},
1949 unicode.Range32{Lo: 0x10a80, Hi: 0x10a9c, Stride: 0x1},
1950 unicode.Range32{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 0x1},
1951 unicode.Range32{Lo: 0x10ac9, Hi: 0x10ae4, Stride: 0x1},
1952 unicode.Range32{Lo: 0x10b00, Hi: 0x10b35, Stride: 0x1},
1953 unicode.Range32{Lo: 0x10b40, Hi: 0x10b55, Stride: 0x1},
1954 unicode.Range32{Lo: 0x10b60, Hi: 0x10b72, Stride: 0x1},
1955 unicode.Range32{Lo: 0x10b80, Hi: 0x10b91, Stride: 0x1},
1956 unicode.Range32{Lo: 0x10c00, Hi: 0x10c48, Stride: 0x1},
1957 unicode.Range32{Lo: 0x10c80, Hi: 0x10cb2, Stride: 0x1},
1958 unicode.Range32{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 0x1},
1959 unicode.Range32{Lo: 0x11003, Hi: 0x11037, Stride: 0x1},
1960 unicode.Range32{Lo: 0x11083, Hi: 0x110af, Stride: 0x1},
1961 unicode.Range32{Lo: 0x110d0, Hi: 0x110e8, Stride: 0x1},
1962 unicode.Range32{Lo: 0x11103, Hi: 0x11126, Stride: 0x1},
1963 unicode.Range32{Lo: 0x11150, Hi: 0x11172, Stride: 0x1},
1964 unicode.Range32{Lo: 0x11176, Hi: 0x11176, Stride: 0x1},
1965 unicode.Range32{Lo: 0x11183, Hi: 0x111b2, Stride: 0x1},
1966 unicode.Range32{Lo: 0x111c1, Hi: 0x111c4, Stride: 0x1},
1967 unicode.Range32{Lo: 0x111da, Hi: 0x111da, Stride: 0x1},
1968 unicode.Range32{Lo: 0x111dc, Hi: 0x111dc, Stride: 0x1},
1969 unicode.Range32{Lo: 0x11200, Hi: 0x11211, Stride: 0x1},
1970 unicode.Range32{Lo: 0x11213, Hi: 0x1122b, Stride: 0x1},
1971 unicode.Range32{Lo: 0x11280, Hi: 0x11286, Stride: 0x1},
1972 unicode.Range32{Lo: 0x11288, Hi: 0x11288, Stride: 0x1},
1973 unicode.Range32{Lo: 0x1128a, Hi: 0x1128d, Stride: 0x1},
1974 unicode.Range32{Lo: 0x1128f, Hi: 0x1129d, Stride: 0x1},
1975 unicode.Range32{Lo: 0x1129f, Hi: 0x112a8, Stride: 0x1},
1976 unicode.Range32{Lo: 0x112b0, Hi: 0x112de, Stride: 0x1},
1977 unicode.Range32{Lo: 0x11305, Hi: 0x1130c, Stride: 0x1},
1978 unicode.Range32{Lo: 0x1130f, Hi: 0x11310, Stride: 0x1},
1979 unicode.Range32{Lo: 0x11313, Hi: 0x11328, Stride: 0x1},
1980 unicode.Range32{Lo: 0x1132a, Hi: 0x11330, Stride: 0x1},
1981 unicode.Range32{Lo: 0x11332, Hi: 0x11333, Stride: 0x1},
1982 unicode.Range32{Lo: 0x11335, Hi: 0x11339, Stride: 0x1},
1983 unicode.Range32{Lo: 0x1133d, Hi: 0x1133d, Stride: 0x1},
1984 unicode.Range32{Lo: 0x11350, Hi: 0x11350, Stride: 0x1},
1985 unicode.Range32{Lo: 0x1135d, Hi: 0x11361, Stride: 0x1},
1986 unicode.Range32{Lo: 0x11400, Hi: 0x11434, Stride: 0x1},
1987 unicode.Range32{Lo: 0x11447, Hi: 0x1144a, Stride: 0x1},
1988 unicode.Range32{Lo: 0x11480, Hi: 0x114af, Stride: 0x1},
1989 unicode.Range32{Lo: 0x114c4, Hi: 0x114c5, Stride: 0x1},
1990 unicode.Range32{Lo: 0x114c7, Hi: 0x114c7, Stride: 0x1},
1991 unicode.Range32{Lo: 0x11580, Hi: 0x115ae, Stride: 0x1},
1992 unicode.Range32{Lo: 0x115d8, Hi: 0x115db, Stride: 0x1},
1993 unicode.Range32{Lo: 0x11600, Hi: 0x1162f, Stride: 0x1},
1994 unicode.Range32{Lo: 0x11644, Hi: 0x11644, Stride: 0x1},
1995 unicode.Range32{Lo: 0x11680, Hi: 0x116aa, Stride: 0x1},
1996 unicode.Range32{Lo: 0x118a0, Hi: 0x118df, Stride: 0x1},
1997 unicode.Range32{Lo: 0x118ff, Hi: 0x118ff, Stride: 0x1},
1998 unicode.Range32{Lo: 0x11ac0, Hi: 0x11af8, Stride: 0x1},
1999 unicode.Range32{Lo: 0x11c00, Hi: 0x11c08, Stride: 0x1},
2000 unicode.Range32{Lo: 0x11c0a, Hi: 0x11c2e, Stride: 0x1},
2001 unicode.Range32{Lo: 0x11c40, Hi: 0x11c40, Stride: 0x1},
2002 unicode.Range32{Lo: 0x11c72, Hi: 0x11c8f, Stride: 0x1},
2003 unicode.Range32{Lo: 0x12000, Hi: 0x12399, Stride: 0x1},
2004 unicode.Range32{Lo: 0x12400, Hi: 0x1246e, Stride: 0x1},
2005 unicode.Range32{Lo: 0x12480, Hi: 0x12543, Stride: 0x1},
2006 unicode.Range32{Lo: 0x13000, Hi: 0x1342e, Stride: 0x1},
2007 unicode.Range32{Lo: 0x14400, Hi: 0x14646, Stride: 0x1},
2008 unicode.Range32{Lo: 0x16800, Hi: 0x16a38, Stride: 0x1},
2009 unicode.Range32{Lo: 0x16a40, Hi: 0x16a5e, Stride: 0x1},
2010 unicode.Range32{Lo: 0x16ad0, Hi: 0x16aed, Stride: 0x1},
2011 unicode.Range32{Lo: 0x16b00, Hi: 0x16b2f, Stride: 0x1},
2012 unicode.Range32{Lo: 0x16b40, Hi: 0x16b43, Stride: 0x1},
2013 unicode.Range32{Lo: 0x16b63, Hi: 0x16b77, Stride: 0x1},
2014 unicode.Range32{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 0x1},
2015 unicode.Range32{Lo: 0x16f00, Hi: 0x16f44, Stride: 0x1},
2016 unicode.Range32{Lo: 0x16f50, Hi: 0x16f50, Stride: 0x1},
2017 unicode.Range32{Lo: 0x16f93, Hi: 0x16f9f, Stride: 0x1},
2018 unicode.Range32{Lo: 0x16fe0, Hi: 0x16fe0, Stride: 0x1},
2019 unicode.Range32{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 0x1},
2020 unicode.Range32{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 0x1},
2021 unicode.Range32{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 0x1},
2022 unicode.Range32{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 0x1},
2023 unicode.Range32{Lo: 0x1d400, Hi: 0x1d454, Stride: 0x1},
2024 unicode.Range32{Lo: 0x1d456, Hi: 0x1d49c, Stride: 0x1},
2025 unicode.Range32{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 0x1},
2026 unicode.Range32{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 0x1},
2027 unicode.Range32{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 0x1},
2028 unicode.Range32{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 0x1},
2029 unicode.Range32{Lo: 0x1d4ae, Hi: 0x1d4b9, Stride: 0x1},
2030 unicode.Range32{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 0x1},
2031 unicode.Range32{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 0x1},
2032 unicode.Range32{Lo: 0x1d4c5, Hi: 0x1d505, Stride: 0x1},
2033 unicode.Range32{Lo: 0x1d507, Hi: 0x1d50a, Stride: 0x1},
2034 unicode.Range32{Lo: 0x1d50d, Hi: 0x1d514, Stride: 0x1},
2035 unicode.Range32{Lo: 0x1d516, Hi: 0x1d51c, Stride: 0x1},
2036 unicode.Range32{Lo: 0x1d51e, Hi: 0x1d539, Stride: 0x1},
2037 unicode.Range32{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 0x1},
2038 unicode.Range32{Lo: 0x1d540, Hi: 0x1d544, Stride: 0x1},
2039 unicode.Range32{Lo: 0x1d546, Hi: 0x1d546, Stride: 0x1},
2040 unicode.Range32{Lo: 0x1d54a, Hi: 0x1d550, Stride: 0x1},
2041 unicode.Range32{Lo: 0x1d552, Hi: 0x1d6a5, Stride: 0x1},
2042 unicode.Range32{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 0x1},
2043 unicode.Range32{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 0x1},
2044 unicode.Range32{Lo: 0x1d6dc, Hi: 0x1d6fa, Stride: 0x1},
2045 unicode.Range32{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 0x1},
2046 unicode.Range32{Lo: 0x1d716, Hi: 0x1d734, Stride: 0x1},
2047 unicode.Range32{Lo: 0x1d736, Hi: 0x1d74e, Stride: 0x1},
2048 unicode.Range32{Lo: 0x1d750, Hi: 0x1d76e, Stride: 0x1},
2049 unicode.Range32{Lo: 0x1d770, Hi: 0x1d788, Stride: 0x1},
2050 unicode.Range32{Lo: 0x1d78a, Hi: 0x1d7a8, Stride: 0x1},
2051 unicode.Range32{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 0x1},
2052 unicode.Range32{Lo: 0x1d7c4, Hi: 0x1d7cb, Stride: 0x1},
2053 unicode.Range32{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 0x1},
2054 unicode.Range32{Lo: 0x1e900, Hi: 0x1e943, Stride: 0x1},
2055 unicode.Range32{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 0x1},
2056 unicode.Range32{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 0x1},
2057 unicode.Range32{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 0x1},
2058 unicode.Range32{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 0x1},
2059 unicode.Range32{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 0x1},
2060 unicode.Range32{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 0x1},
2061 unicode.Range32{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 0x1},
2062 unicode.Range32{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 0x1},
2063 unicode.Range32{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 0x1},
2064 unicode.Range32{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 0x1},
2065 unicode.Range32{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 0x1},
2066 unicode.Range32{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 0x1},
2067 unicode.Range32{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 0x1},
2068 unicode.Range32{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 0x1},
2069 unicode.Range32{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 0x1},
2070 unicode.Range32{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 0x1},
2071 unicode.Range32{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 0x1},
2072 unicode.Range32{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 0x1},
2073 unicode.Range32{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 0x1},
2074 unicode.Range32{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 0x1},
2075 unicode.Range32{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 0x1},
2076 unicode.Range32{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 0x1},
2077 unicode.Range32{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 0x1},
2078 unicode.Range32{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 0x1},
2079 unicode.Range32{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 0x1},
2080 unicode.Range32{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 0x1},
2081 unicode.Range32{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 0x1},
2082 unicode.Range32{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 0x1},
2083 unicode.Range32{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 0x1},
2084 unicode.Range32{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 0x1},
2085 unicode.Range32{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 0x1},
2086 unicode.Range32{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 0x1},
2087 unicode.Range32{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 0x1},
2088 unicode.Range32{Lo: 0x1f130, Hi: 0x1f149, Stride: 0x1},
2089 unicode.Range32{Lo: 0x1f150, Hi: 0x1f169, Stride: 0x1},
2090 unicode.Range32{Lo: 0x1f170, Hi: 0x1f189, Stride: 0x1},
2091 },
2092 LatinOffset: 7,
2093}
2094
2095var _WordCR = &unicode.RangeTable{
2096 R16: []unicode.Range16{
2097 unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1},
2098 },
2099 LatinOffset: 1,
2100}
2101
2102var _WordDouble_Quote = &unicode.RangeTable{
2103 R16: []unicode.Range16{
2104 unicode.Range16{Lo: 0x22, Hi: 0x22, Stride: 0x1},
2105 },
2106 LatinOffset: 1,
2107}
2108
2109var _WordE_Base = &unicode.RangeTable{
2110 R16: []unicode.Range16{
2111 unicode.Range16{Lo: 0x261d, Hi: 0x261d, Stride: 0x1},
2112 unicode.Range16{Lo: 0x26f9, Hi: 0x26f9, Stride: 0x1},
2113 unicode.Range16{Lo: 0x270a, Hi: 0x270d, Stride: 0x1},
2114 },
2115 R32: []unicode.Range32{
2116 unicode.Range32{Lo: 0x1f385, Hi: 0x1f385, Stride: 0x1},
2117 unicode.Range32{Lo: 0x1f3c3, Hi: 0x1f3c4, Stride: 0x1},
2118 unicode.Range32{Lo: 0x1f3ca, Hi: 0x1f3cb, Stride: 0x1},
2119 unicode.Range32{Lo: 0x1f442, Hi: 0x1f443, Stride: 0x1},
2120 unicode.Range32{Lo: 0x1f446, Hi: 0x1f450, Stride: 0x1},
2121 unicode.Range32{Lo: 0x1f46e, Hi: 0x1f46e, Stride: 0x1},
2122 unicode.Range32{Lo: 0x1f470, Hi: 0x1f478, Stride: 0x1},
2123 unicode.Range32{Lo: 0x1f47c, Hi: 0x1f47c, Stride: 0x1},
2124 unicode.Range32{Lo: 0x1f481, Hi: 0x1f483, Stride: 0x1},
2125 unicode.Range32{Lo: 0x1f485, Hi: 0x1f487, Stride: 0x1},
2126 unicode.Range32{Lo: 0x1f4aa, Hi: 0x1f4aa, Stride: 0x1},
2127 unicode.Range32{Lo: 0x1f575, Hi: 0x1f575, Stride: 0x1},
2128 unicode.Range32{Lo: 0x1f57a, Hi: 0x1f57a, Stride: 0x1},
2129 unicode.Range32{Lo: 0x1f590, Hi: 0x1f590, Stride: 0x1},
2130 unicode.Range32{Lo: 0x1f595, Hi: 0x1f596, Stride: 0x1},
2131 unicode.Range32{Lo: 0x1f645, Hi: 0x1f647, Stride: 0x1},
2132 unicode.Range32{Lo: 0x1f64b, Hi: 0x1f64f, Stride: 0x1},
2133 unicode.Range32{Lo: 0x1f6a3, Hi: 0x1f6a3, Stride: 0x1},
2134 unicode.Range32{Lo: 0x1f6b4, Hi: 0x1f6b6, Stride: 0x1},
2135 unicode.Range32{Lo: 0x1f6c0, Hi: 0x1f6c0, Stride: 0x1},
2136 unicode.Range32{Lo: 0x1f918, Hi: 0x1f91e, Stride: 0x1},
2137 unicode.Range32{Lo: 0x1f926, Hi: 0x1f926, Stride: 0x1},
2138 unicode.Range32{Lo: 0x1f930, Hi: 0x1f930, Stride: 0x1},
2139 unicode.Range32{Lo: 0x1f933, Hi: 0x1f939, Stride: 0x1},
2140 unicode.Range32{Lo: 0x1f93c, Hi: 0x1f93e, Stride: 0x1},
2141 },
2142 LatinOffset: 0,
2143}
2144
2145var _WordE_Base_GAZ = &unicode.RangeTable{
2146 R32: []unicode.Range32{
2147 unicode.Range32{Lo: 0x1f466, Hi: 0x1f469, Stride: 0x1},
2148 },
2149 LatinOffset: 0,
2150}
2151
2152var _WordE_Modifier = &unicode.RangeTable{
2153 R32: []unicode.Range32{
2154 unicode.Range32{Lo: 0x1f3fb, Hi: 0x1f3ff, Stride: 0x1},
2155 },
2156 LatinOffset: 0,
2157}
2158
2159var _WordExtend = &unicode.RangeTable{
2160 R16: []unicode.Range16{
2161 unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1},
2162 unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1},
2163 unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1},
2164 unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1},
2165 unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1},
2166 unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1},
2167 unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1},
2168 unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1},
2169 unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1},
2170 unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1},
2171 unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1},
2172 unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1},
2173 unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1},
2174 unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1},
2175 unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1},
2176 unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1},
2177 unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1},
2178 unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1},
2179 unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1},
2180 unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1},
2181 unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1},
2182 unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1},
2183 unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1},
2184 unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1},
2185 unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1},
2186 unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1},
2187 unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1},
2188 unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1},
2189 unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1},
2190 unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1},
2191 unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1},
2192 unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1},
2193 unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1},
2194 unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1},
2195 unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1},
2196 unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1},
2197 unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1},
2198 unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1},
2199 unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1},
2200 unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1},
2201 unicode.Range16{Lo: 0x9be, Hi: 0x9c0, Stride: 0x1},
2202 unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1},
2203 unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1},
2204 unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1},
2205 unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1},
2206 unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1},
2207 unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1},
2208 unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1},
2209 unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1},
2210 unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1},
2211 unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1},
2212 unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1},
2213 unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1},
2214 unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1},
2215 unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1},
2216 unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1},
2217 unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1},
2218 unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1},
2219 unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1},
2220 unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1},
2221 unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1},
2222 unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1},
2223 unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1},
2224 unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1},
2225 unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1},
2226 unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1},
2227 unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1},
2228 unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1},
2229 unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1},
2230 unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1},
2231 unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1},
2232 unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1},
2233 unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1},
2234 unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1},
2235 unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1},
2236 unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1},
2237 unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1},
2238 unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1},
2239 unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1},
2240 unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1},
2241 unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1},
2242 unicode.Range16{Lo: 0xbbe, Hi: 0xbbf, Stride: 0x1},
2243 unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1},
2244 unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1},
2245 unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1},
2246 unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1},
2247 unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1},
2248 unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1},
2249 unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1},
2250 unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1},
2251 unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1},
2252 unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1},
2253 unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1},
2254 unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1},
2255 unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1},
2256 unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1},
2257 unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1},
2258 unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1},
2259 unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1},
2260 unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1},
2261 unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1},
2262 unicode.Range16{Lo: 0xcc0, Hi: 0xcc4, Stride: 0x1},
2263 unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1},
2264 unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1},
2265 unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1},
2266 unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1},
2267 unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1},
2268 unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1},
2269 unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1},
2270 unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1},
2271 unicode.Range16{Lo: 0xd3e, Hi: 0xd40, Stride: 0x1},
2272 unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1},
2273 unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1},
2274 unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1},
2275 unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1},
2276 unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1},
2277 unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1},
2278 unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1},
2279 unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1},
2280 unicode.Range16{Lo: 0xdcf, Hi: 0xdd1, Stride: 0x1},
2281 unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1},
2282 unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1},
2283 unicode.Range16{Lo: 0xdd8, Hi: 0xddf, Stride: 0x1},
2284 unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1},
2285 unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1},
2286 unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1},
2287 unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1},
2288 unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1},
2289 unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1},
2290 unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1},
2291 unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1},
2292 unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1},
2293 unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1},
2294 unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1},
2295 unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1},
2296 unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1},
2297 unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1},
2298 unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1},
2299 unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1},
2300 unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1},
2301 unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1},
2302 unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1},
2303 unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1},
2304 unicode.Range16{Lo: 0x102b, Hi: 0x102c, Stride: 0x1},
2305 unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1},
2306 unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1},
2307 unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1},
2308 unicode.Range16{Lo: 0x1038, Hi: 0x1038, Stride: 0x1},
2309 unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1},
2310 unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1},
2311 unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1},
2312 unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1},
2313 unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1},
2314 unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1},
2315 unicode.Range16{Lo: 0x1062, Hi: 0x1064, Stride: 0x1},
2316 unicode.Range16{Lo: 0x1067, Hi: 0x106d, Stride: 0x1},
2317 unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1},
2318 unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1},
2319 unicode.Range16{Lo: 0x1083, Hi: 0x1084, Stride: 0x1},
2320 unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1},
2321 unicode.Range16{Lo: 0x1087, Hi: 0x108c, Stride: 0x1},
2322 unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1},
2323 unicode.Range16{Lo: 0x108f, Hi: 0x108f, Stride: 0x1},
2324 unicode.Range16{Lo: 0x109a, Hi: 0x109c, Stride: 0x1},
2325 unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1},
2326 unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1},
2327 unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1},
2328 unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1},
2329 unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1},
2330 unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1},
2331 unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1},
2332 unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1},
2333 unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1},
2334 unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1},
2335 unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1},
2336 unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1},
2337 unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1},
2338 unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1},
2339 unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1},
2340 unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1},
2341 unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1},
2342 unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1},
2343 unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1},
2344 unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1},
2345 unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1},
2346 unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1},
2347 unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1},
2348 unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1},
2349 unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1},
2350 unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1},
2351 unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1},
2352 unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1},
2353 unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1},
2354 unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1},
2355 unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1},
2356 unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1},
2357 unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1},
2358 unicode.Range16{Lo: 0x1a61, Hi: 0x1a61, Stride: 0x1},
2359 unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1},
2360 unicode.Range16{Lo: 0x1a63, Hi: 0x1a64, Stride: 0x1},
2361 unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1},
2362 unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1},
2363 unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1},
2364 unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1},
2365 unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1},
2366 unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1},
2367 unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1},
2368 unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1},
2369 unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1},
2370 unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1},
2371 unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1},
2372 unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1},
2373 unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1},
2374 unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1},
2375 unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1},
2376 unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1},
2377 unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1},
2378 unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1},
2379 unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1},
2380 unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1},
2381 unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1},
2382 unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1},
2383 unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1},
2384 unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1},
2385 unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1},
2386 unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1},
2387 unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1},
2388 unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1},
2389 unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1},
2390 unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1},
2391 unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1},
2392 unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1},
2393 unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1},
2394 unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1},
2395 unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1},
2396 unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1},
2397 unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1},
2398 unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1},
2399 unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1},
2400 unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1},
2401 unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1},
2402 unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1},
2403 unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1},
2404 unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1},
2405 unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1},
2406 unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1},
2407 unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1},
2408 unicode.Range16{Lo: 0x200c, Hi: 0x200c, Stride: 0x1},
2409 unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1},
2410 unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1},
2411 unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1},
2412 unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1},
2413 unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1},
2414 unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1},
2415 unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1},
2416 unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1},
2417 unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1},
2418 unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1},
2419 unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1},
2420 unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1},
2421 unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1},
2422 unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1},
2423 unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1},
2424 unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1},
2425 unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1},
2426 unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1},
2427 unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1},
2428 unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1},
2429 unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1},
2430 unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1},
2431 unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1},
2432 unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1},
2433 unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1},
2434 unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1},
2435 unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1},
2436 unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1},
2437 unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1},
2438 unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1},
2439 unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1},
2440 unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1},
2441 unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1},
2442 unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1},
2443 unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1},
2444 unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1},
2445 unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1},
2446 unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1},
2447 unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1},
2448 unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1},
2449 unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1},
2450 unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1},
2451 unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1},
2452 unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1},
2453 unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1},
2454 unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1},
2455 unicode.Range16{Lo: 0xaa7b, Hi: 0xaa7b, Stride: 0x1},
2456 unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1},
2457 unicode.Range16{Lo: 0xaa7d, Hi: 0xaa7d, Stride: 0x1},
2458 unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1},
2459 unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1},
2460 unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1},
2461 unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1},
2462 unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1},
2463 unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1},
2464 unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1},
2465 unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1},
2466 unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1},
2467 unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1},
2468 unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1},
2469 unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1},
2470 unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1},
2471 unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1},
2472 unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1},
2473 unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1},
2474 unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1},
2475 unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1},
2476 unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1},
2477 unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1},
2478 unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1},
2479 },
2480 R32: []unicode.Range32{
2481 unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1},
2482 unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1},
2483 unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1},
2484 unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1},
2485 unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1},
2486 unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1},
2487 unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1},
2488 unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1},
2489 unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1},
2490 unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1},
2491 unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1},
2492 unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1},
2493 unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1},
2494 unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1},
2495 unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1},
2496 unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1},
2497 unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1},
2498 unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1},
2499 unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1},
2500 unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1},
2501 unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1},
2502 unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1},
2503 unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1},
2504 unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1},
2505 unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1},
2506 unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1},
2507 unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1},
2508 unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1},
2509 unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1},
2510 unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1},
2511 unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1},
2512 unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1},
2513 unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1},
2514 unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1},
2515 unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1},
2516 unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1},
2517 unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1},
2518 unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1},
2519 unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1},
2520 unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1},
2521 unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1},
2522 unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1},
2523 unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1},
2524 unicode.Range32{Lo: 0x1133e, Hi: 0x1133f, Stride: 0x1},
2525 unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1},
2526 unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1},
2527 unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1},
2528 unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1},
2529 unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1},
2530 unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1},
2531 unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1},
2532 unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1},
2533 unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1},
2534 unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1},
2535 unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1},
2536 unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1},
2537 unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1},
2538 unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1},
2539 unicode.Range32{Lo: 0x114b0, Hi: 0x114b2, Stride: 0x1},
2540 unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1},
2541 unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1},
2542 unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1},
2543 unicode.Range32{Lo: 0x114bb, Hi: 0x114be, Stride: 0x1},
2544 unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1},
2545 unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1},
2546 unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1},
2547 unicode.Range32{Lo: 0x115af, Hi: 0x115b1, Stride: 0x1},
2548 unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1},
2549 unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1},
2550 unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1},
2551 unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1},
2552 unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1},
2553 unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1},
2554 unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1},
2555 unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1},
2556 unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1},
2557 unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1},
2558 unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1},
2559 unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1},
2560 unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1},
2561 unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1},
2562 unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1},
2563 unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1},
2564 unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1},
2565 unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1},
2566 unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1},
2567 unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1},
2568 unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1},
2569 unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1},
2570 unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1},
2571 unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1},
2572 unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1},
2573 unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1},
2574 unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1},
2575 unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1},
2576 unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1},
2577 unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1},
2578 unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1},
2579 unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1},
2580 unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1},
2581 unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1},
2582 unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1},
2583 unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1},
2584 unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1},
2585 unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1},
2586 unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1},
2587 unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1},
2588 unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1},
2589 unicode.Range32{Lo: 0x1d165, Hi: 0x1d166, Stride: 0x1},
2590 unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1},
2591 unicode.Range32{Lo: 0x1d16d, Hi: 0x1d172, Stride: 0x1},
2592 unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1},
2593 unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1},
2594 unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1},
2595 unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1},
2596 unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1},
2597 unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1},
2598 unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1},
2599 unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1},
2600 unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1},
2601 unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1},
2602 unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1},
2603 unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1},
2604 unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1},
2605 unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1},
2606 unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1},
2607 unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1},
2608 unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1},
2609 unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1},
2610 unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1},
2611 },
2612 LatinOffset: 0,
2613}
2614
2615var _WordExtendNumLet = &unicode.RangeTable{
2616 R16: []unicode.Range16{
2617 unicode.Range16{Lo: 0x5f, Hi: 0x5f, Stride: 0x1},
2618 unicode.Range16{Lo: 0x202f, Hi: 0x202f, Stride: 0x1},
2619 unicode.Range16{Lo: 0x203f, Hi: 0x2040, Stride: 0x1},
2620 unicode.Range16{Lo: 0x2054, Hi: 0x2054, Stride: 0x1},
2621 unicode.Range16{Lo: 0xfe33, Hi: 0xfe34, Stride: 0x1},
2622 unicode.Range16{Lo: 0xfe4d, Hi: 0xfe4f, Stride: 0x1},
2623 unicode.Range16{Lo: 0xff3f, Hi: 0xff3f, Stride: 0x1},
2624 },
2625 LatinOffset: 1,
2626}
2627
2628var _WordFormat = &unicode.RangeTable{
2629 R16: []unicode.Range16{
2630 unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1},
2631 unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1},
2632 unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1},
2633 unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1},
2634 unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1},
2635 unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1},
2636 unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1},
2637 unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1},
2638 unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1},
2639 unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1},
2640 unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1},
2641 unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1},
2642 unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1},
2643 },
2644 R32: []unicode.Range32{
2645 unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1},
2646 unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1},
2647 unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1},
2648 unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1},
2649 },
2650 LatinOffset: 1,
2651}
2652
2653var _WordGlue_After_Zwj = &unicode.RangeTable{
2654 R16: []unicode.Range16{
2655 unicode.Range16{Lo: 0x2764, Hi: 0x2764, Stride: 0x1},
2656 },
2657 R32: []unicode.Range32{
2658 unicode.Range32{Lo: 0x1f48b, Hi: 0x1f48b, Stride: 0x1},
2659 unicode.Range32{Lo: 0x1f5e8, Hi: 0x1f5e8, Stride: 0x1},
2660 },
2661 LatinOffset: 0,
2662}
2663
2664var _WordHebrew_Letter = &unicode.RangeTable{
2665 R16: []unicode.Range16{
2666 unicode.Range16{Lo: 0x5d0, Hi: 0x5ea, Stride: 0x1},
2667 unicode.Range16{Lo: 0x5f0, Hi: 0x5f2, Stride: 0x1},
2668 unicode.Range16{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 0x1},
2669 unicode.Range16{Lo: 0xfb1f, Hi: 0xfb28, Stride: 0x1},
2670 unicode.Range16{Lo: 0xfb2a, Hi: 0xfb36, Stride: 0x1},
2671 unicode.Range16{Lo: 0xfb38, Hi: 0xfb3c, Stride: 0x1},
2672 unicode.Range16{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 0x1},
2673 unicode.Range16{Lo: 0xfb40, Hi: 0xfb41, Stride: 0x1},
2674 unicode.Range16{Lo: 0xfb43, Hi: 0xfb44, Stride: 0x1},
2675 unicode.Range16{Lo: 0xfb46, Hi: 0xfb4f, Stride: 0x1},
2676 },
2677 LatinOffset: 0,
2678}
2679
2680var _WordKatakana = &unicode.RangeTable{
2681 R16: []unicode.Range16{
2682 unicode.Range16{Lo: 0x3031, Hi: 0x3035, Stride: 0x1},
2683 unicode.Range16{Lo: 0x309b, Hi: 0x309c, Stride: 0x1},
2684 unicode.Range16{Lo: 0x30a0, Hi: 0x30a0, Stride: 0x1},
2685 unicode.Range16{Lo: 0x30a1, Hi: 0x30fa, Stride: 0x1},
2686 unicode.Range16{Lo: 0x30fc, Hi: 0x30fe, Stride: 0x1},
2687 unicode.Range16{Lo: 0x30ff, Hi: 0x30ff, Stride: 0x1},
2688 unicode.Range16{Lo: 0x31f0, Hi: 0x31ff, Stride: 0x1},
2689 unicode.Range16{Lo: 0x32d0, Hi: 0x32fe, Stride: 0x1},
2690 unicode.Range16{Lo: 0x3300, Hi: 0x3357, Stride: 0x1},
2691 unicode.Range16{Lo: 0xff66, Hi: 0xff6f, Stride: 0x1},
2692 unicode.Range16{Lo: 0xff70, Hi: 0xff70, Stride: 0x1},
2693 unicode.Range16{Lo: 0xff71, Hi: 0xff9d, Stride: 0x1},
2694 },
2695 R32: []unicode.Range32{
2696 unicode.Range32{Lo: 0x1b000, Hi: 0x1b000, Stride: 0x1},
2697 },
2698 LatinOffset: 0,
2699}
2700
2701var _WordLF = &unicode.RangeTable{
2702 R16: []unicode.Range16{
2703 unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1},
2704 },
2705 LatinOffset: 1,
2706}
2707
2708var _WordMidLetter = &unicode.RangeTable{
2709 R16: []unicode.Range16{
2710 unicode.Range16{Lo: 0x3a, Hi: 0x3a, Stride: 0x1},
2711 unicode.Range16{Lo: 0xb7, Hi: 0xb7, Stride: 0x1},
2712 unicode.Range16{Lo: 0x2d7, Hi: 0x2d7, Stride: 0x1},
2713 unicode.Range16{Lo: 0x387, Hi: 0x387, Stride: 0x1},
2714 unicode.Range16{Lo: 0x5f4, Hi: 0x5f4, Stride: 0x1},
2715 unicode.Range16{Lo: 0x2027, Hi: 0x2027, Stride: 0x1},
2716 unicode.Range16{Lo: 0xfe13, Hi: 0xfe13, Stride: 0x1},
2717 unicode.Range16{Lo: 0xfe55, Hi: 0xfe55, Stride: 0x1},
2718 unicode.Range16{Lo: 0xff1a, Hi: 0xff1a, Stride: 0x1},
2719 },
2720 LatinOffset: 2,
2721}
2722
2723var _WordMidNum = &unicode.RangeTable{
2724 R16: []unicode.Range16{
2725 unicode.Range16{Lo: 0x2c, Hi: 0x2c, Stride: 0x1},
2726 unicode.Range16{Lo: 0x3b, Hi: 0x3b, Stride: 0x1},
2727 unicode.Range16{Lo: 0x37e, Hi: 0x37e, Stride: 0x1},
2728 unicode.Range16{Lo: 0x589, Hi: 0x589, Stride: 0x1},
2729 unicode.Range16{Lo: 0x60c, Hi: 0x60d, Stride: 0x1},
2730 unicode.Range16{Lo: 0x66c, Hi: 0x66c, Stride: 0x1},
2731 unicode.Range16{Lo: 0x7f8, Hi: 0x7f8, Stride: 0x1},
2732 unicode.Range16{Lo: 0x2044, Hi: 0x2044, Stride: 0x1},
2733 unicode.Range16{Lo: 0xfe10, Hi: 0xfe10, Stride: 0x1},
2734 unicode.Range16{Lo: 0xfe14, Hi: 0xfe14, Stride: 0x1},
2735 unicode.Range16{Lo: 0xfe50, Hi: 0xfe50, Stride: 0x1},
2736 unicode.Range16{Lo: 0xfe54, Hi: 0xfe54, Stride: 0x1},
2737 unicode.Range16{Lo: 0xff0c, Hi: 0xff0c, Stride: 0x1},
2738 unicode.Range16{Lo: 0xff1b, Hi: 0xff1b, Stride: 0x1},
2739 },
2740 LatinOffset: 2,
2741}
2742
2743var _WordMidNumLet = &unicode.RangeTable{
2744 R16: []unicode.Range16{
2745 unicode.Range16{Lo: 0x2e, Hi: 0x2e, Stride: 0x1},
2746 unicode.Range16{Lo: 0x2018, Hi: 0x2018, Stride: 0x1},
2747 unicode.Range16{Lo: 0x2019, Hi: 0x2019, Stride: 0x1},
2748 unicode.Range16{Lo: 0x2024, Hi: 0x2024, Stride: 0x1},
2749 unicode.Range16{Lo: 0xfe52, Hi: 0xfe52, Stride: 0x1},
2750 unicode.Range16{Lo: 0xff07, Hi: 0xff07, Stride: 0x1},
2751 unicode.Range16{Lo: 0xff0e, Hi: 0xff0e, Stride: 0x1},
2752 },
2753 LatinOffset: 1,
2754}
2755
2756var _WordNewline = &unicode.RangeTable{
2757 R16: []unicode.Range16{
2758 unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1},
2759 unicode.Range16{Lo: 0x85, Hi: 0x85, Stride: 0x1},
2760 unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1},
2761 unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1},
2762 },
2763 LatinOffset: 2,
2764}
2765
2766var _WordNumeric = &unicode.RangeTable{
2767 R16: []unicode.Range16{
2768 unicode.Range16{Lo: 0x30, Hi: 0x39, Stride: 0x1},
2769 unicode.Range16{Lo: 0x660, Hi: 0x669, Stride: 0x1},
2770 unicode.Range16{Lo: 0x66b, Hi: 0x66b, Stride: 0x1},
2771 unicode.Range16{Lo: 0x6f0, Hi: 0x6f9, Stride: 0x1},
2772 unicode.Range16{Lo: 0x7c0, Hi: 0x7c9, Stride: 0x1},
2773 unicode.Range16{Lo: 0x966, Hi: 0x96f, Stride: 0x1},
2774 unicode.Range16{Lo: 0x9e6, Hi: 0x9ef, Stride: 0x1},
2775 unicode.Range16{Lo: 0xa66, Hi: 0xa6f, Stride: 0x1},
2776 unicode.Range16{Lo: 0xae6, Hi: 0xaef, Stride: 0x1},
2777 unicode.Range16{Lo: 0xb66, Hi: 0xb6f, Stride: 0x1},
2778 unicode.Range16{Lo: 0xbe6, Hi: 0xbef, Stride: 0x1},
2779 unicode.Range16{Lo: 0xc66, Hi: 0xc6f, Stride: 0x1},
2780 unicode.Range16{Lo: 0xce6, Hi: 0xcef, Stride: 0x1},
2781 unicode.Range16{Lo: 0xd66, Hi: 0xd6f, Stride: 0x1},
2782 unicode.Range16{Lo: 0xde6, Hi: 0xdef, Stride: 0x1},
2783 unicode.Range16{Lo: 0xe50, Hi: 0xe59, Stride: 0x1},
2784 unicode.Range16{Lo: 0xed0, Hi: 0xed9, Stride: 0x1},
2785 unicode.Range16{Lo: 0xf20, Hi: 0xf29, Stride: 0x1},
2786 unicode.Range16{Lo: 0x1040, Hi: 0x1049, Stride: 0x1},
2787 unicode.Range16{Lo: 0x1090, Hi: 0x1099, Stride: 0x1},
2788 unicode.Range16{Lo: 0x17e0, Hi: 0x17e9, Stride: 0x1},
2789 unicode.Range16{Lo: 0x1810, Hi: 0x1819, Stride: 0x1},
2790 unicode.Range16{Lo: 0x1946, Hi: 0x194f, Stride: 0x1},
2791 unicode.Range16{Lo: 0x19d0, Hi: 0x19d9, Stride: 0x1},
2792 unicode.Range16{Lo: 0x1a80, Hi: 0x1a89, Stride: 0x1},
2793 unicode.Range16{Lo: 0x1a90, Hi: 0x1a99, Stride: 0x1},
2794 unicode.Range16{Lo: 0x1b50, Hi: 0x1b59, Stride: 0x1},
2795 unicode.Range16{Lo: 0x1bb0, Hi: 0x1bb9, Stride: 0x1},
2796 unicode.Range16{Lo: 0x1c40, Hi: 0x1c49, Stride: 0x1},
2797 unicode.Range16{Lo: 0x1c50, Hi: 0x1c59, Stride: 0x1},
2798 unicode.Range16{Lo: 0xa620, Hi: 0xa629, Stride: 0x1},
2799 unicode.Range16{Lo: 0xa8d0, Hi: 0xa8d9, Stride: 0x1},
2800 unicode.Range16{Lo: 0xa900, Hi: 0xa909, Stride: 0x1},
2801 unicode.Range16{Lo: 0xa9d0, Hi: 0xa9d9, Stride: 0x1},
2802 unicode.Range16{Lo: 0xa9f0, Hi: 0xa9f9, Stride: 0x1},
2803 unicode.Range16{Lo: 0xaa50, Hi: 0xaa59, Stride: 0x1},
2804 unicode.Range16{Lo: 0xabf0, Hi: 0xabf9, Stride: 0x1},
2805 },
2806 R32: []unicode.Range32{
2807 unicode.Range32{Lo: 0x104a0, Hi: 0x104a9, Stride: 0x1},
2808 unicode.Range32{Lo: 0x11066, Hi: 0x1106f, Stride: 0x1},
2809 unicode.Range32{Lo: 0x110f0, Hi: 0x110f9, Stride: 0x1},
2810 unicode.Range32{Lo: 0x11136, Hi: 0x1113f, Stride: 0x1},
2811 unicode.Range32{Lo: 0x111d0, Hi: 0x111d9, Stride: 0x1},
2812 unicode.Range32{Lo: 0x112f0, Hi: 0x112f9, Stride: 0x1},
2813 unicode.Range32{Lo: 0x11450, Hi: 0x11459, Stride: 0x1},
2814 unicode.Range32{Lo: 0x114d0, Hi: 0x114d9, Stride: 0x1},
2815 unicode.Range32{Lo: 0x11650, Hi: 0x11659, Stride: 0x1},
2816 unicode.Range32{Lo: 0x116c0, Hi: 0x116c9, Stride: 0x1},
2817 unicode.Range32{Lo: 0x11730, Hi: 0x11739, Stride: 0x1},
2818 unicode.Range32{Lo: 0x118e0, Hi: 0x118e9, Stride: 0x1},
2819 unicode.Range32{Lo: 0x11c50, Hi: 0x11c59, Stride: 0x1},
2820 unicode.Range32{Lo: 0x16a60, Hi: 0x16a69, Stride: 0x1},
2821 unicode.Range32{Lo: 0x16b50, Hi: 0x16b59, Stride: 0x1},
2822 unicode.Range32{Lo: 0x1d7ce, Hi: 0x1d7ff, Stride: 0x1},
2823 unicode.Range32{Lo: 0x1e950, Hi: 0x1e959, Stride: 0x1},
2824 },
2825 LatinOffset: 1,
2826}
2827
2828var _WordRegional_Indicator = &unicode.RangeTable{
2829 R32: []unicode.Range32{
2830 unicode.Range32{Lo: 0x1f1e6, Hi: 0x1f1ff, Stride: 0x1},
2831 },
2832 LatinOffset: 0,
2833}
2834
2835var _WordSingle_Quote = &unicode.RangeTable{
2836 R16: []unicode.Range16{
2837 unicode.Range16{Lo: 0x27, Hi: 0x27, Stride: 0x1},
2838 },
2839 LatinOffset: 1,
2840}
2841
2842var _WordZWJ = &unicode.RangeTable{
2843 R16: []unicode.Range16{
2844 unicode.Range16{Lo: 0x200d, Hi: 0x200d, Stride: 0x1},
2845 },
2846 LatinOffset: 0,
2847}
2848
2849type _WordRuneRange unicode.RangeTable
2850
2851func _WordRuneType(r rune) *_WordRuneRange {
2852 switch {
2853 case unicode.Is(_WordALetter, r):
2854 return (*_WordRuneRange)(_WordALetter)
2855 case unicode.Is(_WordCR, r):
2856 return (*_WordRuneRange)(_WordCR)
2857 case unicode.Is(_WordDouble_Quote, r):
2858 return (*_WordRuneRange)(_WordDouble_Quote)
2859 case unicode.Is(_WordE_Base, r):
2860 return (*_WordRuneRange)(_WordE_Base)
2861 case unicode.Is(_WordE_Base_GAZ, r):
2862 return (*_WordRuneRange)(_WordE_Base_GAZ)
2863 case unicode.Is(_WordE_Modifier, r):
2864 return (*_WordRuneRange)(_WordE_Modifier)
2865 case unicode.Is(_WordExtend, r):
2866 return (*_WordRuneRange)(_WordExtend)
2867 case unicode.Is(_WordExtendNumLet, r):
2868 return (*_WordRuneRange)(_WordExtendNumLet)
2869 case unicode.Is(_WordFormat, r):
2870 return (*_WordRuneRange)(_WordFormat)
2871 case unicode.Is(_WordGlue_After_Zwj, r):
2872 return (*_WordRuneRange)(_WordGlue_After_Zwj)
2873 case unicode.Is(_WordHebrew_Letter, r):
2874 return (*_WordRuneRange)(_WordHebrew_Letter)
2875 case unicode.Is(_WordKatakana, r):
2876 return (*_WordRuneRange)(_WordKatakana)
2877 case unicode.Is(_WordLF, r):
2878 return (*_WordRuneRange)(_WordLF)
2879 case unicode.Is(_WordMidLetter, r):
2880 return (*_WordRuneRange)(_WordMidLetter)
2881 case unicode.Is(_WordMidNum, r):
2882 return (*_WordRuneRange)(_WordMidNum)
2883 case unicode.Is(_WordMidNumLet, r):
2884 return (*_WordRuneRange)(_WordMidNumLet)
2885 case unicode.Is(_WordNewline, r):
2886 return (*_WordRuneRange)(_WordNewline)
2887 case unicode.Is(_WordNumeric, r):
2888 return (*_WordRuneRange)(_WordNumeric)
2889 case unicode.Is(_WordRegional_Indicator, r):
2890 return (*_WordRuneRange)(_WordRegional_Indicator)
2891 case unicode.Is(_WordSingle_Quote, r):
2892 return (*_WordRuneRange)(_WordSingle_Quote)
2893 case unicode.Is(_WordZWJ, r):
2894 return (*_WordRuneRange)(_WordZWJ)
2895 default:
2896 return nil
2897 }
2898}
2899func (rng *_WordRuneRange) String() string {
2900 switch (*unicode.RangeTable)(rng) {
2901 case _WordALetter:
2902 return "ALetter"
2903 case _WordCR:
2904 return "CR"
2905 case _WordDouble_Quote:
2906 return "Double_Quote"
2907 case _WordE_Base:
2908 return "E_Base"
2909 case _WordE_Base_GAZ:
2910 return "E_Base_GAZ"
2911 case _WordE_Modifier:
2912 return "E_Modifier"
2913 case _WordExtend:
2914 return "Extend"
2915 case _WordExtendNumLet:
2916 return "ExtendNumLet"
2917 case _WordFormat:
2918 return "Format"
2919 case _WordGlue_After_Zwj:
2920 return "Glue_After_Zwj"
2921 case _WordHebrew_Letter:
2922 return "Hebrew_Letter"
2923 case _WordKatakana:
2924 return "Katakana"
2925 case _WordLF:
2926 return "LF"
2927 case _WordMidLetter:
2928 return "MidLetter"
2929 case _WordMidNum:
2930 return "MidNum"
2931 case _WordMidNumLet:
2932 return "MidNumLet"
2933 case _WordNewline:
2934 return "Newline"
2935 case _WordNumeric:
2936 return "Numeric"
2937 case _WordRegional_Indicator:
2938 return "Regional_Indicator"
2939 case _WordSingle_Quote:
2940 return "Single_Quote"
2941 case _WordZWJ:
2942 return "ZWJ"
2943 default:
2944 return "Other"
2945 }
2946}
2947
2948var _SentenceATerm = &unicode.RangeTable{
2949 R16: []unicode.Range16{
2950 unicode.Range16{Lo: 0x2e, Hi: 0x2e, Stride: 0x1},
2951 unicode.Range16{Lo: 0x2024, Hi: 0x2024, Stride: 0x1},
2952 unicode.Range16{Lo: 0xfe52, Hi: 0xfe52, Stride: 0x1},
2953 unicode.Range16{Lo: 0xff0e, Hi: 0xff0e, Stride: 0x1},
2954 },
2955 LatinOffset: 1,
2956}
2957
2958var _SentenceCR = &unicode.RangeTable{
2959 R16: []unicode.Range16{
2960 unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1},
2961 },
2962 LatinOffset: 1,
2963}
2964
2965var _SentenceClose = &unicode.RangeTable{
2966 R16: []unicode.Range16{
2967 unicode.Range16{Lo: 0x22, Hi: 0x22, Stride: 0x1},
2968 unicode.Range16{Lo: 0x27, Hi: 0x27, Stride: 0x1},
2969 unicode.Range16{Lo: 0x28, Hi: 0x28, Stride: 0x1},
2970 unicode.Range16{Lo: 0x29, Hi: 0x29, Stride: 0x1},
2971 unicode.Range16{Lo: 0x5b, Hi: 0x5b, Stride: 0x1},
2972 unicode.Range16{Lo: 0x5d, Hi: 0x5d, Stride: 0x1},
2973 unicode.Range16{Lo: 0x7b, Hi: 0x7b, Stride: 0x1},
2974 unicode.Range16{Lo: 0x7d, Hi: 0x7d, Stride: 0x1},
2975 unicode.Range16{Lo: 0xab, Hi: 0xab, Stride: 0x1},
2976 unicode.Range16{Lo: 0xbb, Hi: 0xbb, Stride: 0x1},
2977 unicode.Range16{Lo: 0xf3a, Hi: 0xf3a, Stride: 0x1},
2978 unicode.Range16{Lo: 0xf3b, Hi: 0xf3b, Stride: 0x1},
2979 unicode.Range16{Lo: 0xf3c, Hi: 0xf3c, Stride: 0x1},
2980 unicode.Range16{Lo: 0xf3d, Hi: 0xf3d, Stride: 0x1},
2981 unicode.Range16{Lo: 0x169b, Hi: 0x169b, Stride: 0x1},
2982 unicode.Range16{Lo: 0x169c, Hi: 0x169c, Stride: 0x1},
2983 unicode.Range16{Lo: 0x2018, Hi: 0x2018, Stride: 0x1},
2984 unicode.Range16{Lo: 0x2019, Hi: 0x2019, Stride: 0x1},
2985 unicode.Range16{Lo: 0x201a, Hi: 0x201a, Stride: 0x1},
2986 unicode.Range16{Lo: 0x201b, Hi: 0x201c, Stride: 0x1},
2987 unicode.Range16{Lo: 0x201d, Hi: 0x201d, Stride: 0x1},
2988 unicode.Range16{Lo: 0x201e, Hi: 0x201e, Stride: 0x1},
2989 unicode.Range16{Lo: 0x201f, Hi: 0x201f, Stride: 0x1},
2990 unicode.Range16{Lo: 0x2039, Hi: 0x2039, Stride: 0x1},
2991 unicode.Range16{Lo: 0x203a, Hi: 0x203a, Stride: 0x1},
2992 unicode.Range16{Lo: 0x2045, Hi: 0x2045, Stride: 0x1},
2993 unicode.Range16{Lo: 0x2046, Hi: 0x2046, Stride: 0x1},
2994 unicode.Range16{Lo: 0x207d, Hi: 0x207d, Stride: 0x1},
2995 unicode.Range16{Lo: 0x207e, Hi: 0x207e, Stride: 0x1},
2996 unicode.Range16{Lo: 0x208d, Hi: 0x208d, Stride: 0x1},
2997 unicode.Range16{Lo: 0x208e, Hi: 0x208e, Stride: 0x1},
2998 unicode.Range16{Lo: 0x2308, Hi: 0x2308, Stride: 0x1},
2999 unicode.Range16{Lo: 0x2309, Hi: 0x2309, Stride: 0x1},
3000 unicode.Range16{Lo: 0x230a, Hi: 0x230a, Stride: 0x1},
3001 unicode.Range16{Lo: 0x230b, Hi: 0x230b, Stride: 0x1},
3002 unicode.Range16{Lo: 0x2329, Hi: 0x2329, Stride: 0x1},
3003 unicode.Range16{Lo: 0x232a, Hi: 0x232a, Stride: 0x1},
3004 unicode.Range16{Lo: 0x275b, Hi: 0x2760, Stride: 0x1},
3005 unicode.Range16{Lo: 0x2768, Hi: 0x2768, Stride: 0x1},
3006 unicode.Range16{Lo: 0x2769, Hi: 0x2769, Stride: 0x1},
3007 unicode.Range16{Lo: 0x276a, Hi: 0x276a, Stride: 0x1},
3008 unicode.Range16{Lo: 0x276b, Hi: 0x276b, Stride: 0x1},
3009 unicode.Range16{Lo: 0x276c, Hi: 0x276c, Stride: 0x1},
3010 unicode.Range16{Lo: 0x276d, Hi: 0x276d, Stride: 0x1},
3011 unicode.Range16{Lo: 0x276e, Hi: 0x276e, Stride: 0x1},
3012 unicode.Range16{Lo: 0x276f, Hi: 0x276f, Stride: 0x1},
3013 unicode.Range16{Lo: 0x2770, Hi: 0x2770, Stride: 0x1},
3014 unicode.Range16{Lo: 0x2771, Hi: 0x2771, Stride: 0x1},
3015 unicode.Range16{Lo: 0x2772, Hi: 0x2772, Stride: 0x1},
3016 unicode.Range16{Lo: 0x2773, Hi: 0x2773, Stride: 0x1},
3017 unicode.Range16{Lo: 0x2774, Hi: 0x2774, Stride: 0x1},
3018 unicode.Range16{Lo: 0x2775, Hi: 0x2775, Stride: 0x1},
3019 unicode.Range16{Lo: 0x27c5, Hi: 0x27c5, Stride: 0x1},
3020 unicode.Range16{Lo: 0x27c6, Hi: 0x27c6, Stride: 0x1},
3021 unicode.Range16{Lo: 0x27e6, Hi: 0x27e6, Stride: 0x1},
3022 unicode.Range16{Lo: 0x27e7, Hi: 0x27e7, Stride: 0x1},
3023 unicode.Range16{Lo: 0x27e8, Hi: 0x27e8, Stride: 0x1},
3024 unicode.Range16{Lo: 0x27e9, Hi: 0x27e9, Stride: 0x1},
3025 unicode.Range16{Lo: 0x27ea, Hi: 0x27ea, Stride: 0x1},
3026 unicode.Range16{Lo: 0x27eb, Hi: 0x27eb, Stride: 0x1},
3027 unicode.Range16{Lo: 0x27ec, Hi: 0x27ec, Stride: 0x1},
3028 unicode.Range16{Lo: 0x27ed, Hi: 0x27ed, Stride: 0x1},
3029 unicode.Range16{Lo: 0x27ee, Hi: 0x27ee, Stride: 0x1},
3030 unicode.Range16{Lo: 0x27ef, Hi: 0x27ef, Stride: 0x1},
3031 unicode.Range16{Lo: 0x2983, Hi: 0x2983, Stride: 0x1},
3032 unicode.Range16{Lo: 0x2984, Hi: 0x2984, Stride: 0x1},
3033 unicode.Range16{Lo: 0x2985, Hi: 0x2985, Stride: 0x1},
3034 unicode.Range16{Lo: 0x2986, Hi: 0x2986, Stride: 0x1},
3035 unicode.Range16{Lo: 0x2987, Hi: 0x2987, Stride: 0x1},
3036 unicode.Range16{Lo: 0x2988, Hi: 0x2988, Stride: 0x1},
3037 unicode.Range16{Lo: 0x2989, Hi: 0x2989, Stride: 0x1},
3038 unicode.Range16{Lo: 0x298a, Hi: 0x298a, Stride: 0x1},
3039 unicode.Range16{Lo: 0x298b, Hi: 0x298b, Stride: 0x1},
3040 unicode.Range16{Lo: 0x298c, Hi: 0x298c, Stride: 0x1},
3041 unicode.Range16{Lo: 0x298d, Hi: 0x298d, Stride: 0x1},
3042 unicode.Range16{Lo: 0x298e, Hi: 0x298e, Stride: 0x1},
3043 unicode.Range16{Lo: 0x298f, Hi: 0x298f, Stride: 0x1},
3044 unicode.Range16{Lo: 0x2990, Hi: 0x2990, Stride: 0x1},
3045 unicode.Range16{Lo: 0x2991, Hi: 0x2991, Stride: 0x1},
3046 unicode.Range16{Lo: 0x2992, Hi: 0x2992, Stride: 0x1},
3047 unicode.Range16{Lo: 0x2993, Hi: 0x2993, Stride: 0x1},
3048 unicode.Range16{Lo: 0x2994, Hi: 0x2994, Stride: 0x1},
3049 unicode.Range16{Lo: 0x2995, Hi: 0x2995, Stride: 0x1},
3050 unicode.Range16{Lo: 0x2996, Hi: 0x2996, Stride: 0x1},
3051 unicode.Range16{Lo: 0x2997, Hi: 0x2997, Stride: 0x1},
3052 unicode.Range16{Lo: 0x2998, Hi: 0x2998, Stride: 0x1},
3053 unicode.Range16{Lo: 0x29d8, Hi: 0x29d8, Stride: 0x1},
3054 unicode.Range16{Lo: 0x29d9, Hi: 0x29d9, Stride: 0x1},
3055 unicode.Range16{Lo: 0x29da, Hi: 0x29da, Stride: 0x1},
3056 unicode.Range16{Lo: 0x29db, Hi: 0x29db, Stride: 0x1},
3057 unicode.Range16{Lo: 0x29fc, Hi: 0x29fc, Stride: 0x1},
3058 unicode.Range16{Lo: 0x29fd, Hi: 0x29fd, Stride: 0x1},
3059 unicode.Range16{Lo: 0x2e00, Hi: 0x2e01, Stride: 0x1},
3060 unicode.Range16{Lo: 0x2e02, Hi: 0x2e02, Stride: 0x1},
3061 unicode.Range16{Lo: 0x2e03, Hi: 0x2e03, Stride: 0x1},
3062 unicode.Range16{Lo: 0x2e04, Hi: 0x2e04, Stride: 0x1},
3063 unicode.Range16{Lo: 0x2e05, Hi: 0x2e05, Stride: 0x1},
3064 unicode.Range16{Lo: 0x2e06, Hi: 0x2e08, Stride: 0x1},
3065 unicode.Range16{Lo: 0x2e09, Hi: 0x2e09, Stride: 0x1},
3066 unicode.Range16{Lo: 0x2e0a, Hi: 0x2e0a, Stride: 0x1},
3067 unicode.Range16{Lo: 0x2e0b, Hi: 0x2e0b, Stride: 0x1},
3068 unicode.Range16{Lo: 0x2e0c, Hi: 0x2e0c, Stride: 0x1},
3069 unicode.Range16{Lo: 0x2e0d, Hi: 0x2e0d, Stride: 0x1},
3070 unicode.Range16{Lo: 0x2e1c, Hi: 0x2e1c, Stride: 0x1},
3071 unicode.Range16{Lo: 0x2e1d, Hi: 0x2e1d, Stride: 0x1},
3072 unicode.Range16{Lo: 0x2e20, Hi: 0x2e20, Stride: 0x1},
3073 unicode.Range16{Lo: 0x2e21, Hi: 0x2e21, Stride: 0x1},
3074 unicode.Range16{Lo: 0x2e22, Hi: 0x2e22, Stride: 0x1},
3075 unicode.Range16{Lo: 0x2e23, Hi: 0x2e23, Stride: 0x1},
3076 unicode.Range16{Lo: 0x2e24, Hi: 0x2e24, Stride: 0x1},
3077 unicode.Range16{Lo: 0x2e25, Hi: 0x2e25, Stride: 0x1},
3078 unicode.Range16{Lo: 0x2e26, Hi: 0x2e26, Stride: 0x1},
3079 unicode.Range16{Lo: 0x2e27, Hi: 0x2e27, Stride: 0x1},
3080 unicode.Range16{Lo: 0x2e28, Hi: 0x2e28, Stride: 0x1},
3081 unicode.Range16{Lo: 0x2e29, Hi: 0x2e29, Stride: 0x1},
3082 unicode.Range16{Lo: 0x2e42, Hi: 0x2e42, Stride: 0x1},
3083 unicode.Range16{Lo: 0x3008, Hi: 0x3008, Stride: 0x1},
3084 unicode.Range16{Lo: 0x3009, Hi: 0x3009, Stride: 0x1},
3085 unicode.Range16{Lo: 0x300a, Hi: 0x300a, Stride: 0x1},
3086 unicode.Range16{Lo: 0x300b, Hi: 0x300b, Stride: 0x1},
3087 unicode.Range16{Lo: 0x300c, Hi: 0x300c, Stride: 0x1},
3088 unicode.Range16{Lo: 0x300d, Hi: 0x300d, Stride: 0x1},
3089 unicode.Range16{Lo: 0x300e, Hi: 0x300e, Stride: 0x1},
3090 unicode.Range16{Lo: 0x300f, Hi: 0x300f, Stride: 0x1},
3091 unicode.Range16{Lo: 0x3010, Hi: 0x3010, Stride: 0x1},
3092 unicode.Range16{Lo: 0x3011, Hi: 0x3011, Stride: 0x1},
3093 unicode.Range16{Lo: 0x3014, Hi: 0x3014, Stride: 0x1},
3094 unicode.Range16{Lo: 0x3015, Hi: 0x3015, Stride: 0x1},
3095 unicode.Range16{Lo: 0x3016, Hi: 0x3016, Stride: 0x1},
3096 unicode.Range16{Lo: 0x3017, Hi: 0x3017, Stride: 0x1},
3097 unicode.Range16{Lo: 0x3018, Hi: 0x3018, Stride: 0x1},
3098 unicode.Range16{Lo: 0x3019, Hi: 0x3019, Stride: 0x1},
3099 unicode.Range16{Lo: 0x301a, Hi: 0x301a, Stride: 0x1},
3100 unicode.Range16{Lo: 0x301b, Hi: 0x301b, Stride: 0x1},
3101 unicode.Range16{Lo: 0x301d, Hi: 0x301d, Stride: 0x1},
3102 unicode.Range16{Lo: 0x301e, Hi: 0x301f, Stride: 0x1},
3103 unicode.Range16{Lo: 0xfd3e, Hi: 0xfd3e, Stride: 0x1},
3104 unicode.Range16{Lo: 0xfd3f, Hi: 0xfd3f, Stride: 0x1},
3105 unicode.Range16{Lo: 0xfe17, Hi: 0xfe17, Stride: 0x1},
3106 unicode.Range16{Lo: 0xfe18, Hi: 0xfe18, Stride: 0x1},
3107 unicode.Range16{Lo: 0xfe35, Hi: 0xfe35, Stride: 0x1},
3108 unicode.Range16{Lo: 0xfe36, Hi: 0xfe36, Stride: 0x1},
3109 unicode.Range16{Lo: 0xfe37, Hi: 0xfe37, Stride: 0x1},
3110 unicode.Range16{Lo: 0xfe38, Hi: 0xfe38, Stride: 0x1},
3111 unicode.Range16{Lo: 0xfe39, Hi: 0xfe39, Stride: 0x1},
3112 unicode.Range16{Lo: 0xfe3a, Hi: 0xfe3a, Stride: 0x1},
3113 unicode.Range16{Lo: 0xfe3b, Hi: 0xfe3b, Stride: 0x1},
3114 unicode.Range16{Lo: 0xfe3c, Hi: 0xfe3c, Stride: 0x1},
3115 unicode.Range16{Lo: 0xfe3d, Hi: 0xfe3d, Stride: 0x1},
3116 unicode.Range16{Lo: 0xfe3e, Hi: 0xfe3e, Stride: 0x1},
3117 unicode.Range16{Lo: 0xfe3f, Hi: 0xfe3f, Stride: 0x1},
3118 unicode.Range16{Lo: 0xfe40, Hi: 0xfe40, Stride: 0x1},
3119 unicode.Range16{Lo: 0xfe41, Hi: 0xfe41, Stride: 0x1},
3120 unicode.Range16{Lo: 0xfe42, Hi: 0xfe42, Stride: 0x1},
3121 unicode.Range16{Lo: 0xfe43, Hi: 0xfe43, Stride: 0x1},
3122 unicode.Range16{Lo: 0xfe44, Hi: 0xfe44, Stride: 0x1},
3123 unicode.Range16{Lo: 0xfe47, Hi: 0xfe47, Stride: 0x1},
3124 unicode.Range16{Lo: 0xfe48, Hi: 0xfe48, Stride: 0x1},
3125 unicode.Range16{Lo: 0xfe59, Hi: 0xfe59, Stride: 0x1},
3126 unicode.Range16{Lo: 0xfe5a, Hi: 0xfe5a, Stride: 0x1},
3127 unicode.Range16{Lo: 0xfe5b, Hi: 0xfe5b, Stride: 0x1},
3128 unicode.Range16{Lo: 0xfe5c, Hi: 0xfe5c, Stride: 0x1},
3129 unicode.Range16{Lo: 0xfe5d, Hi: 0xfe5d, Stride: 0x1},
3130 unicode.Range16{Lo: 0xfe5e, Hi: 0xfe5e, Stride: 0x1},
3131 unicode.Range16{Lo: 0xff08, Hi: 0xff08, Stride: 0x1},
3132 unicode.Range16{Lo: 0xff09, Hi: 0xff09, Stride: 0x1},
3133 unicode.Range16{Lo: 0xff3b, Hi: 0xff3b, Stride: 0x1},
3134 unicode.Range16{Lo: 0xff3d, Hi: 0xff3d, Stride: 0x1},
3135 unicode.Range16{Lo: 0xff5b, Hi: 0xff5b, Stride: 0x1},
3136 unicode.Range16{Lo: 0xff5d, Hi: 0xff5d, Stride: 0x1},
3137 unicode.Range16{Lo: 0xff5f, Hi: 0xff5f, Stride: 0x1},
3138 unicode.Range16{Lo: 0xff60, Hi: 0xff60, Stride: 0x1},
3139 unicode.Range16{Lo: 0xff62, Hi: 0xff62, Stride: 0x1},
3140 unicode.Range16{Lo: 0xff63, Hi: 0xff63, Stride: 0x1},
3141 },
3142 R32: []unicode.Range32{
3143 unicode.Range32{Lo: 0x1f676, Hi: 0x1f678, Stride: 0x1},
3144 },
3145 LatinOffset: 10,
3146}
3147
3148var _SentenceExtend = &unicode.RangeTable{
3149 R16: []unicode.Range16{
3150 unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1},
3151 unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1},
3152 unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1},
3153 unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1},
3154 unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1},
3155 unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1},
3156 unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1},
3157 unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1},
3158 unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1},
3159 unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1},
3160 unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1},
3161 unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1},
3162 unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1},
3163 unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1},
3164 unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1},
3165 unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1},
3166 unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1},
3167 unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1},
3168 unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1},
3169 unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1},
3170 unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1},
3171 unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1},
3172 unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1},
3173 unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1},
3174 unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1},
3175 unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1},
3176 unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1},
3177 unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1},
3178 unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1},
3179 unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1},
3180 unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1},
3181 unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1},
3182 unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1},
3183 unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1},
3184 unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1},
3185 unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1},
3186 unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1},
3187 unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1},
3188 unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1},
3189 unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1},
3190 unicode.Range16{Lo: 0x9be, Hi: 0x9c0, Stride: 0x1},
3191 unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1},
3192 unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1},
3193 unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1},
3194 unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1},
3195 unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1},
3196 unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1},
3197 unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1},
3198 unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1},
3199 unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1},
3200 unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1},
3201 unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1},
3202 unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1},
3203 unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1},
3204 unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1},
3205 unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1},
3206 unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1},
3207 unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1},
3208 unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1},
3209 unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1},
3210 unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1},
3211 unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1},
3212 unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1},
3213 unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1},
3214 unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1},
3215 unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1},
3216 unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1},
3217 unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1},
3218 unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1},
3219 unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1},
3220 unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1},
3221 unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1},
3222 unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1},
3223 unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1},
3224 unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1},
3225 unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1},
3226 unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1},
3227 unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1},
3228 unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1},
3229 unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1},
3230 unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1},
3231 unicode.Range16{Lo: 0xbbe, Hi: 0xbbf, Stride: 0x1},
3232 unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1},
3233 unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1},
3234 unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1},
3235 unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1},
3236 unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1},
3237 unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1},
3238 unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1},
3239 unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1},
3240 unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1},
3241 unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1},
3242 unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1},
3243 unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1},
3244 unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1},
3245 unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1},
3246 unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1},
3247 unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1},
3248 unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1},
3249 unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1},
3250 unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1},
3251 unicode.Range16{Lo: 0xcc0, Hi: 0xcc4, Stride: 0x1},
3252 unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1},
3253 unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1},
3254 unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1},
3255 unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1},
3256 unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1},
3257 unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1},
3258 unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1},
3259 unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1},
3260 unicode.Range16{Lo: 0xd3e, Hi: 0xd40, Stride: 0x1},
3261 unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1},
3262 unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1},
3263 unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1},
3264 unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1},
3265 unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1},
3266 unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1},
3267 unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1},
3268 unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1},
3269 unicode.Range16{Lo: 0xdcf, Hi: 0xdd1, Stride: 0x1},
3270 unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1},
3271 unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1},
3272 unicode.Range16{Lo: 0xdd8, Hi: 0xddf, Stride: 0x1},
3273 unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1},
3274 unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1},
3275 unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1},
3276 unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1},
3277 unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1},
3278 unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1},
3279 unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1},
3280 unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1},
3281 unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1},
3282 unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1},
3283 unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1},
3284 unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1},
3285 unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1},
3286 unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1},
3287 unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1},
3288 unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1},
3289 unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1},
3290 unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1},
3291 unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1},
3292 unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1},
3293 unicode.Range16{Lo: 0x102b, Hi: 0x102c, Stride: 0x1},
3294 unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1},
3295 unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1},
3296 unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1},
3297 unicode.Range16{Lo: 0x1038, Hi: 0x1038, Stride: 0x1},
3298 unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1},
3299 unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1},
3300 unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1},
3301 unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1},
3302 unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1},
3303 unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1},
3304 unicode.Range16{Lo: 0x1062, Hi: 0x1064, Stride: 0x1},
3305 unicode.Range16{Lo: 0x1067, Hi: 0x106d, Stride: 0x1},
3306 unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1},
3307 unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1},
3308 unicode.Range16{Lo: 0x1083, Hi: 0x1084, Stride: 0x1},
3309 unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1},
3310 unicode.Range16{Lo: 0x1087, Hi: 0x108c, Stride: 0x1},
3311 unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1},
3312 unicode.Range16{Lo: 0x108f, Hi: 0x108f, Stride: 0x1},
3313 unicode.Range16{Lo: 0x109a, Hi: 0x109c, Stride: 0x1},
3314 unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1},
3315 unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1},
3316 unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1},
3317 unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1},
3318 unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1},
3319 unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1},
3320 unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1},
3321 unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1},
3322 unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1},
3323 unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1},
3324 unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1},
3325 unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1},
3326 unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1},
3327 unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1},
3328 unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1},
3329 unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1},
3330 unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1},
3331 unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1},
3332 unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1},
3333 unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1},
3334 unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1},
3335 unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1},
3336 unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1},
3337 unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1},
3338 unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1},
3339 unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1},
3340 unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1},
3341 unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1},
3342 unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1},
3343 unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1},
3344 unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1},
3345 unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1},
3346 unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1},
3347 unicode.Range16{Lo: 0x1a61, Hi: 0x1a61, Stride: 0x1},
3348 unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1},
3349 unicode.Range16{Lo: 0x1a63, Hi: 0x1a64, Stride: 0x1},
3350 unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1},
3351 unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1},
3352 unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1},
3353 unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1},
3354 unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1},
3355 unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1},
3356 unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1},
3357 unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1},
3358 unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1},
3359 unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1},
3360 unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1},
3361 unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1},
3362 unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1},
3363 unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1},
3364 unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1},
3365 unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1},
3366 unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1},
3367 unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1},
3368 unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1},
3369 unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1},
3370 unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1},
3371 unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1},
3372 unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1},
3373 unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1},
3374 unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1},
3375 unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1},
3376 unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1},
3377 unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1},
3378 unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1},
3379 unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1},
3380 unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1},
3381 unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1},
3382 unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1},
3383 unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1},
3384 unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1},
3385 unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1},
3386 unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1},
3387 unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1},
3388 unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1},
3389 unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1},
3390 unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1},
3391 unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1},
3392 unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1},
3393 unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1},
3394 unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1},
3395 unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1},
3396 unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1},
3397 unicode.Range16{Lo: 0x200c, Hi: 0x200d, Stride: 0x1},
3398 unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1},
3399 unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1},
3400 unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1},
3401 unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1},
3402 unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1},
3403 unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1},
3404 unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1},
3405 unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1},
3406 unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1},
3407 unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1},
3408 unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1},
3409 unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1},
3410 unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1},
3411 unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1},
3412 unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1},
3413 unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1},
3414 unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1},
3415 unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1},
3416 unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1},
3417 unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1},
3418 unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1},
3419 unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1},
3420 unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1},
3421 unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1},
3422 unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1},
3423 unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1},
3424 unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1},
3425 unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1},
3426 unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1},
3427 unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1},
3428 unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1},
3429 unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1},
3430 unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1},
3431 unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1},
3432 unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1},
3433 unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1},
3434 unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1},
3435 unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1},
3436 unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1},
3437 unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1},
3438 unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1},
3439 unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1},
3440 unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1},
3441 unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1},
3442 unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1},
3443 unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1},
3444 unicode.Range16{Lo: 0xaa7b, Hi: 0xaa7b, Stride: 0x1},
3445 unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1},
3446 unicode.Range16{Lo: 0xaa7d, Hi: 0xaa7d, Stride: 0x1},
3447 unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1},
3448 unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1},
3449 unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1},
3450 unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1},
3451 unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1},
3452 unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1},
3453 unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1},
3454 unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1},
3455 unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1},
3456 unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1},
3457 unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1},
3458 unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1},
3459 unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1},
3460 unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1},
3461 unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1},
3462 unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1},
3463 unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1},
3464 unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1},
3465 unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1},
3466 unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1},
3467 unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1},
3468 },
3469 R32: []unicode.Range32{
3470 unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1},
3471 unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1},
3472 unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1},
3473 unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1},
3474 unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1},
3475 unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1},
3476 unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1},
3477 unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1},
3478 unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1},
3479 unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1},
3480 unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1},
3481 unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1},
3482 unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1},
3483 unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1},
3484 unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1},
3485 unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1},
3486 unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1},
3487 unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1},
3488 unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1},
3489 unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1},
3490 unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1},
3491 unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1},
3492 unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1},
3493 unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1},
3494 unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1},
3495 unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1},
3496 unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1},
3497 unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1},
3498 unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1},
3499 unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1},
3500 unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1},
3501 unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1},
3502 unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1},
3503 unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1},
3504 unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1},
3505 unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1},
3506 unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1},
3507 unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1},
3508 unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1},
3509 unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1},
3510 unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1},
3511 unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1},
3512 unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1},
3513 unicode.Range32{Lo: 0x1133e, Hi: 0x1133f, Stride: 0x1},
3514 unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1},
3515 unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1},
3516 unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1},
3517 unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1},
3518 unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1},
3519 unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1},
3520 unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1},
3521 unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1},
3522 unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1},
3523 unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1},
3524 unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1},
3525 unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1},
3526 unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1},
3527 unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1},
3528 unicode.Range32{Lo: 0x114b0, Hi: 0x114b2, Stride: 0x1},
3529 unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1},
3530 unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1},
3531 unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1},
3532 unicode.Range32{Lo: 0x114bb, Hi: 0x114be, Stride: 0x1},
3533 unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1},
3534 unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1},
3535 unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1},
3536 unicode.Range32{Lo: 0x115af, Hi: 0x115b1, Stride: 0x1},
3537 unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1},
3538 unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1},
3539 unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1},
3540 unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1},
3541 unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1},
3542 unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1},
3543 unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1},
3544 unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1},
3545 unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1},
3546 unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1},
3547 unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1},
3548 unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1},
3549 unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1},
3550 unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1},
3551 unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1},
3552 unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1},
3553 unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1},
3554 unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1},
3555 unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1},
3556 unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1},
3557 unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1},
3558 unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1},
3559 unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1},
3560 unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1},
3561 unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1},
3562 unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1},
3563 unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1},
3564 unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1},
3565 unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1},
3566 unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1},
3567 unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1},
3568 unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1},
3569 unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1},
3570 unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1},
3571 unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1},
3572 unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1},
3573 unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1},
3574 unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1},
3575 unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1},
3576 unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1},
3577 unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1},
3578 unicode.Range32{Lo: 0x1d165, Hi: 0x1d166, Stride: 0x1},
3579 unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1},
3580 unicode.Range32{Lo: 0x1d16d, Hi: 0x1d172, Stride: 0x1},
3581 unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1},
3582 unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1},
3583 unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1},
3584 unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1},
3585 unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1},
3586 unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1},
3587 unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1},
3588 unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1},
3589 unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1},
3590 unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1},
3591 unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1},
3592 unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1},
3593 unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1},
3594 unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1},
3595 unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1},
3596 unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1},
3597 unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1},
3598 unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1},
3599 unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1},
3600 },
3601 LatinOffset: 0,
3602}
3603
3604var _SentenceFormat = &unicode.RangeTable{
3605 R16: []unicode.Range16{
3606 unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1},
3607 unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1},
3608 unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1},
3609 unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1},
3610 unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1},
3611 unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1},
3612 unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1},
3613 unicode.Range16{Lo: 0x200b, Hi: 0x200b, Stride: 0x1},
3614 unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1},
3615 unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1},
3616 unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1},
3617 unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1},
3618 unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1},
3619 unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1},
3620 },
3621 R32: []unicode.Range32{
3622 unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1},
3623 unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1},
3624 unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1},
3625 unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1},
3626 },
3627 LatinOffset: 1,
3628}
3629
3630var _SentenceLF = &unicode.RangeTable{
3631 R16: []unicode.Range16{
3632 unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1},
3633 },
3634 LatinOffset: 1,
3635}
3636
3637var _SentenceLower = &unicode.RangeTable{
3638 R16: []unicode.Range16{
3639 unicode.Range16{Lo: 0x61, Hi: 0x7a, Stride: 0x1},
3640 unicode.Range16{Lo: 0xaa, Hi: 0xaa, Stride: 0x1},
3641 unicode.Range16{Lo: 0xb5, Hi: 0xb5, Stride: 0x1},
3642 unicode.Range16{Lo: 0xba, Hi: 0xba, Stride: 0x1},
3643 unicode.Range16{Lo: 0xdf, Hi: 0xf6, Stride: 0x1},
3644 unicode.Range16{Lo: 0xf8, Hi: 0xff, Stride: 0x1},
3645 unicode.Range16{Lo: 0x101, Hi: 0x101, Stride: 0x1},
3646 unicode.Range16{Lo: 0x103, Hi: 0x103, Stride: 0x1},
3647 unicode.Range16{Lo: 0x105, Hi: 0x105, Stride: 0x1},
3648 unicode.Range16{Lo: 0x107, Hi: 0x107, Stride: 0x1},
3649 unicode.Range16{Lo: 0x109, Hi: 0x109, Stride: 0x1},
3650 unicode.Range16{Lo: 0x10b, Hi: 0x10b, Stride: 0x1},
3651 unicode.Range16{Lo: 0x10d, Hi: 0x10d, Stride: 0x1},
3652 unicode.Range16{Lo: 0x10f, Hi: 0x10f, Stride: 0x1},
3653 unicode.Range16{Lo: 0x111, Hi: 0x111, Stride: 0x1},
3654 unicode.Range16{Lo: 0x113, Hi: 0x113, Stride: 0x1},
3655 unicode.Range16{Lo: 0x115, Hi: 0x115, Stride: 0x1},
3656 unicode.Range16{Lo: 0x117, Hi: 0x117, Stride: 0x1},
3657 unicode.Range16{Lo: 0x119, Hi: 0x119, Stride: 0x1},
3658 unicode.Range16{Lo: 0x11b, Hi: 0x11b, Stride: 0x1},
3659 unicode.Range16{Lo: 0x11d, Hi: 0x11d, Stride: 0x1},
3660 unicode.Range16{Lo: 0x11f, Hi: 0x11f, Stride: 0x1},
3661 unicode.Range16{Lo: 0x121, Hi: 0x121, Stride: 0x1},
3662 unicode.Range16{Lo: 0x123, Hi: 0x123, Stride: 0x1},
3663 unicode.Range16{Lo: 0x125, Hi: 0x125, Stride: 0x1},
3664 unicode.Range16{Lo: 0x127, Hi: 0x127, Stride: 0x1},
3665 unicode.Range16{Lo: 0x129, Hi: 0x129, Stride: 0x1},
3666 unicode.Range16{Lo: 0x12b, Hi: 0x12b, Stride: 0x1},
3667 unicode.Range16{Lo: 0x12d, Hi: 0x12d, Stride: 0x1},
3668 unicode.Range16{Lo: 0x12f, Hi: 0x12f, Stride: 0x1},
3669 unicode.Range16{Lo: 0x131, Hi: 0x131, Stride: 0x1},
3670 unicode.Range16{Lo: 0x133, Hi: 0x133, Stride: 0x1},
3671 unicode.Range16{Lo: 0x135, Hi: 0x135, Stride: 0x1},
3672 unicode.Range16{Lo: 0x137, Hi: 0x138, Stride: 0x1},
3673 unicode.Range16{Lo: 0x13a, Hi: 0x13a, Stride: 0x1},
3674 unicode.Range16{Lo: 0x13c, Hi: 0x13c, Stride: 0x1},
3675 unicode.Range16{Lo: 0x13e, Hi: 0x13e, Stride: 0x1},
3676 unicode.Range16{Lo: 0x140, Hi: 0x140, Stride: 0x1},
3677 unicode.Range16{Lo: 0x142, Hi: 0x142, Stride: 0x1},
3678 unicode.Range16{Lo: 0x144, Hi: 0x144, Stride: 0x1},
3679 unicode.Range16{Lo: 0x146, Hi: 0x146, Stride: 0x1},
3680 unicode.Range16{Lo: 0x148, Hi: 0x149, Stride: 0x1},
3681 unicode.Range16{Lo: 0x14b, Hi: 0x14b, Stride: 0x1},
3682 unicode.Range16{Lo: 0x14d, Hi: 0x14d, Stride: 0x1},
3683 unicode.Range16{Lo: 0x14f, Hi: 0x14f, Stride: 0x1},
3684 unicode.Range16{Lo: 0x151, Hi: 0x151, Stride: 0x1},
3685 unicode.Range16{Lo: 0x153, Hi: 0x153, Stride: 0x1},
3686 unicode.Range16{Lo: 0x155, Hi: 0x155, Stride: 0x1},
3687 unicode.Range16{Lo: 0x157, Hi: 0x157, Stride: 0x1},
3688 unicode.Range16{Lo: 0x159, Hi: 0x159, Stride: 0x1},
3689 unicode.Range16{Lo: 0x15b, Hi: 0x15b, Stride: 0x1},
3690 unicode.Range16{Lo: 0x15d, Hi: 0x15d, Stride: 0x1},
3691 unicode.Range16{Lo: 0x15f, Hi: 0x15f, Stride: 0x1},
3692 unicode.Range16{Lo: 0x161, Hi: 0x161, Stride: 0x1},
3693 unicode.Range16{Lo: 0x163, Hi: 0x163, Stride: 0x1},
3694 unicode.Range16{Lo: 0x165, Hi: 0x165, Stride: 0x1},
3695 unicode.Range16{Lo: 0x167, Hi: 0x167, Stride: 0x1},
3696 unicode.Range16{Lo: 0x169, Hi: 0x169, Stride: 0x1},
3697 unicode.Range16{Lo: 0x16b, Hi: 0x16b, Stride: 0x1},
3698 unicode.Range16{Lo: 0x16d, Hi: 0x16d, Stride: 0x1},
3699 unicode.Range16{Lo: 0x16f, Hi: 0x16f, Stride: 0x1},
3700 unicode.Range16{Lo: 0x171, Hi: 0x171, Stride: 0x1},
3701 unicode.Range16{Lo: 0x173, Hi: 0x173, Stride: 0x1},
3702 unicode.Range16{Lo: 0x175, Hi: 0x175, Stride: 0x1},
3703 unicode.Range16{Lo: 0x177, Hi: 0x177, Stride: 0x1},
3704 unicode.Range16{Lo: 0x17a, Hi: 0x17a, Stride: 0x1},
3705 unicode.Range16{Lo: 0x17c, Hi: 0x17c, Stride: 0x1},
3706 unicode.Range16{Lo: 0x17e, Hi: 0x180, Stride: 0x1},
3707 unicode.Range16{Lo: 0x183, Hi: 0x183, Stride: 0x1},
3708 unicode.Range16{Lo: 0x185, Hi: 0x185, Stride: 0x1},
3709 unicode.Range16{Lo: 0x188, Hi: 0x188, Stride: 0x1},
3710 unicode.Range16{Lo: 0x18c, Hi: 0x18d, Stride: 0x1},
3711 unicode.Range16{Lo: 0x192, Hi: 0x192, Stride: 0x1},
3712 unicode.Range16{Lo: 0x195, Hi: 0x195, Stride: 0x1},
3713 unicode.Range16{Lo: 0x199, Hi: 0x19b, Stride: 0x1},
3714 unicode.Range16{Lo: 0x19e, Hi: 0x19e, Stride: 0x1},
3715 unicode.Range16{Lo: 0x1a1, Hi: 0x1a1, Stride: 0x1},
3716 unicode.Range16{Lo: 0x1a3, Hi: 0x1a3, Stride: 0x1},
3717 unicode.Range16{Lo: 0x1a5, Hi: 0x1a5, Stride: 0x1},
3718 unicode.Range16{Lo: 0x1a8, Hi: 0x1a8, Stride: 0x1},
3719 unicode.Range16{Lo: 0x1aa, Hi: 0x1ab, Stride: 0x1},
3720 unicode.Range16{Lo: 0x1ad, Hi: 0x1ad, Stride: 0x1},
3721 unicode.Range16{Lo: 0x1b0, Hi: 0x1b0, Stride: 0x1},
3722 unicode.Range16{Lo: 0x1b4, Hi: 0x1b4, Stride: 0x1},
3723 unicode.Range16{Lo: 0x1b6, Hi: 0x1b6, Stride: 0x1},
3724 unicode.Range16{Lo: 0x1b9, Hi: 0x1ba, Stride: 0x1},
3725 unicode.Range16{Lo: 0x1bd, Hi: 0x1bf, Stride: 0x1},
3726 unicode.Range16{Lo: 0x1c6, Hi: 0x1c6, Stride: 0x1},
3727 unicode.Range16{Lo: 0x1c9, Hi: 0x1c9, Stride: 0x1},
3728 unicode.Range16{Lo: 0x1cc, Hi: 0x1cc, Stride: 0x1},
3729 unicode.Range16{Lo: 0x1ce, Hi: 0x1ce, Stride: 0x1},
3730 unicode.Range16{Lo: 0x1d0, Hi: 0x1d0, Stride: 0x1},
3731 unicode.Range16{Lo: 0x1d2, Hi: 0x1d2, Stride: 0x1},
3732 unicode.Range16{Lo: 0x1d4, Hi: 0x1d4, Stride: 0x1},
3733 unicode.Range16{Lo: 0x1d6, Hi: 0x1d6, Stride: 0x1},
3734 unicode.Range16{Lo: 0x1d8, Hi: 0x1d8, Stride: 0x1},
3735 unicode.Range16{Lo: 0x1da, Hi: 0x1da, Stride: 0x1},
3736 unicode.Range16{Lo: 0x1dc, Hi: 0x1dd, Stride: 0x1},
3737 unicode.Range16{Lo: 0x1df, Hi: 0x1df, Stride: 0x1},
3738 unicode.Range16{Lo: 0x1e1, Hi: 0x1e1, Stride: 0x1},
3739 unicode.Range16{Lo: 0x1e3, Hi: 0x1e3, Stride: 0x1},
3740 unicode.Range16{Lo: 0x1e5, Hi: 0x1e5, Stride: 0x1},
3741 unicode.Range16{Lo: 0x1e7, Hi: 0x1e7, Stride: 0x1},
3742 unicode.Range16{Lo: 0x1e9, Hi: 0x1e9, Stride: 0x1},
3743 unicode.Range16{Lo: 0x1eb, Hi: 0x1eb, Stride: 0x1},
3744 unicode.Range16{Lo: 0x1ed, Hi: 0x1ed, Stride: 0x1},
3745 unicode.Range16{Lo: 0x1ef, Hi: 0x1f0, Stride: 0x1},
3746 unicode.Range16{Lo: 0x1f3, Hi: 0x1f3, Stride: 0x1},
3747 unicode.Range16{Lo: 0x1f5, Hi: 0x1f5, Stride: 0x1},
3748 unicode.Range16{Lo: 0x1f9, Hi: 0x1f9, Stride: 0x1},
3749 unicode.Range16{Lo: 0x1fb, Hi: 0x1fb, Stride: 0x1},
3750 unicode.Range16{Lo: 0x1fd, Hi: 0x1fd, Stride: 0x1},
3751 unicode.Range16{Lo: 0x1ff, Hi: 0x1ff, Stride: 0x1},
3752 unicode.Range16{Lo: 0x201, Hi: 0x201, Stride: 0x1},
3753 unicode.Range16{Lo: 0x203, Hi: 0x203, Stride: 0x1},
3754 unicode.Range16{Lo: 0x205, Hi: 0x205, Stride: 0x1},
3755 unicode.Range16{Lo: 0x207, Hi: 0x207, Stride: 0x1},
3756 unicode.Range16{Lo: 0x209, Hi: 0x209, Stride: 0x1},
3757 unicode.Range16{Lo: 0x20b, Hi: 0x20b, Stride: 0x1},
3758 unicode.Range16{Lo: 0x20d, Hi: 0x20d, Stride: 0x1},
3759 unicode.Range16{Lo: 0x20f, Hi: 0x20f, Stride: 0x1},
3760 unicode.Range16{Lo: 0x211, Hi: 0x211, Stride: 0x1},
3761 unicode.Range16{Lo: 0x213, Hi: 0x213, Stride: 0x1},
3762 unicode.Range16{Lo: 0x215, Hi: 0x215, Stride: 0x1},
3763 unicode.Range16{Lo: 0x217, Hi: 0x217, Stride: 0x1},
3764 unicode.Range16{Lo: 0x219, Hi: 0x219, Stride: 0x1},
3765 unicode.Range16{Lo: 0x21b, Hi: 0x21b, Stride: 0x1},
3766 unicode.Range16{Lo: 0x21d, Hi: 0x21d, Stride: 0x1},
3767 unicode.Range16{Lo: 0x21f, Hi: 0x21f, Stride: 0x1},
3768 unicode.Range16{Lo: 0x221, Hi: 0x221, Stride: 0x1},
3769 unicode.Range16{Lo: 0x223, Hi: 0x223, Stride: 0x1},
3770 unicode.Range16{Lo: 0x225, Hi: 0x225, Stride: 0x1},
3771 unicode.Range16{Lo: 0x227, Hi: 0x227, Stride: 0x1},
3772 unicode.Range16{Lo: 0x229, Hi: 0x229, Stride: 0x1},
3773 unicode.Range16{Lo: 0x22b, Hi: 0x22b, Stride: 0x1},
3774 unicode.Range16{Lo: 0x22d, Hi: 0x22d, Stride: 0x1},
3775 unicode.Range16{Lo: 0x22f, Hi: 0x22f, Stride: 0x1},
3776 unicode.Range16{Lo: 0x231, Hi: 0x231, Stride: 0x1},
3777 unicode.Range16{Lo: 0x233, Hi: 0x239, Stride: 0x1},
3778 unicode.Range16{Lo: 0x23c, Hi: 0x23c, Stride: 0x1},
3779 unicode.Range16{Lo: 0x23f, Hi: 0x240, Stride: 0x1},
3780 unicode.Range16{Lo: 0x242, Hi: 0x242, Stride: 0x1},
3781 unicode.Range16{Lo: 0x247, Hi: 0x247, Stride: 0x1},
3782 unicode.Range16{Lo: 0x249, Hi: 0x249, Stride: 0x1},
3783 unicode.Range16{Lo: 0x24b, Hi: 0x24b, Stride: 0x1},
3784 unicode.Range16{Lo: 0x24d, Hi: 0x24d, Stride: 0x1},
3785 unicode.Range16{Lo: 0x24f, Hi: 0x293, Stride: 0x1},
3786 unicode.Range16{Lo: 0x295, Hi: 0x2af, Stride: 0x1},
3787 unicode.Range16{Lo: 0x2b0, Hi: 0x2b8, Stride: 0x1},
3788 unicode.Range16{Lo: 0x2c0, Hi: 0x2c1, Stride: 0x1},
3789 unicode.Range16{Lo: 0x2e0, Hi: 0x2e4, Stride: 0x1},
3790 unicode.Range16{Lo: 0x371, Hi: 0x371, Stride: 0x1},
3791 unicode.Range16{Lo: 0x373, Hi: 0x373, Stride: 0x1},
3792 unicode.Range16{Lo: 0x377, Hi: 0x377, Stride: 0x1},
3793 unicode.Range16{Lo: 0x37a, Hi: 0x37a, Stride: 0x1},
3794 unicode.Range16{Lo: 0x37b, Hi: 0x37d, Stride: 0x1},
3795 unicode.Range16{Lo: 0x390, Hi: 0x390, Stride: 0x1},
3796 unicode.Range16{Lo: 0x3ac, Hi: 0x3ce, Stride: 0x1},
3797 unicode.Range16{Lo: 0x3d0, Hi: 0x3d1, Stride: 0x1},
3798 unicode.Range16{Lo: 0x3d5, Hi: 0x3d7, Stride: 0x1},
3799 unicode.Range16{Lo: 0x3d9, Hi: 0x3d9, Stride: 0x1},
3800 unicode.Range16{Lo: 0x3db, Hi: 0x3db, Stride: 0x1},
3801 unicode.Range16{Lo: 0x3dd, Hi: 0x3dd, Stride: 0x1},
3802 unicode.Range16{Lo: 0x3df, Hi: 0x3df, Stride: 0x1},
3803 unicode.Range16{Lo: 0x3e1, Hi: 0x3e1, Stride: 0x1},
3804 unicode.Range16{Lo: 0x3e3, Hi: 0x3e3, Stride: 0x1},
3805 unicode.Range16{Lo: 0x3e5, Hi: 0x3e5, Stride: 0x1},
3806 unicode.Range16{Lo: 0x3e7, Hi: 0x3e7, Stride: 0x1},
3807 unicode.Range16{Lo: 0x3e9, Hi: 0x3e9, Stride: 0x1},
3808 unicode.Range16{Lo: 0x3eb, Hi: 0x3eb, Stride: 0x1},
3809 unicode.Range16{Lo: 0x3ed, Hi: 0x3ed, Stride: 0x1},
3810 unicode.Range16{Lo: 0x3ef, Hi: 0x3f3, Stride: 0x1},
3811 unicode.Range16{Lo: 0x3f5, Hi: 0x3f5, Stride: 0x1},
3812 unicode.Range16{Lo: 0x3f8, Hi: 0x3f8, Stride: 0x1},
3813 unicode.Range16{Lo: 0x3fb, Hi: 0x3fc, Stride: 0x1},
3814 unicode.Range16{Lo: 0x430, Hi: 0x45f, Stride: 0x1},
3815 unicode.Range16{Lo: 0x461, Hi: 0x461, Stride: 0x1},
3816 unicode.Range16{Lo: 0x463, Hi: 0x463, Stride: 0x1},
3817 unicode.Range16{Lo: 0x465, Hi: 0x465, Stride: 0x1},
3818 unicode.Range16{Lo: 0x467, Hi: 0x467, Stride: 0x1},
3819 unicode.Range16{Lo: 0x469, Hi: 0x469, Stride: 0x1},
3820 unicode.Range16{Lo: 0x46b, Hi: 0x46b, Stride: 0x1},
3821 unicode.Range16{Lo: 0x46d, Hi: 0x46d, Stride: 0x1},
3822 unicode.Range16{Lo: 0x46f, Hi: 0x46f, Stride: 0x1},
3823 unicode.Range16{Lo: 0x471, Hi: 0x471, Stride: 0x1},
3824 unicode.Range16{Lo: 0x473, Hi: 0x473, Stride: 0x1},
3825 unicode.Range16{Lo: 0x475, Hi: 0x475, Stride: 0x1},
3826 unicode.Range16{Lo: 0x477, Hi: 0x477, Stride: 0x1},
3827 unicode.Range16{Lo: 0x479, Hi: 0x479, Stride: 0x1},
3828 unicode.Range16{Lo: 0x47b, Hi: 0x47b, Stride: 0x1},
3829 unicode.Range16{Lo: 0x47d, Hi: 0x47d, Stride: 0x1},
3830 unicode.Range16{Lo: 0x47f, Hi: 0x47f, Stride: 0x1},
3831 unicode.Range16{Lo: 0x481, Hi: 0x481, Stride: 0x1},
3832 unicode.Range16{Lo: 0x48b, Hi: 0x48b, Stride: 0x1},
3833 unicode.Range16{Lo: 0x48d, Hi: 0x48d, Stride: 0x1},
3834 unicode.Range16{Lo: 0x48f, Hi: 0x48f, Stride: 0x1},
3835 unicode.Range16{Lo: 0x491, Hi: 0x491, Stride: 0x1},
3836 unicode.Range16{Lo: 0x493, Hi: 0x493, Stride: 0x1},
3837 unicode.Range16{Lo: 0x495, Hi: 0x495, Stride: 0x1},
3838 unicode.Range16{Lo: 0x497, Hi: 0x497, Stride: 0x1},
3839 unicode.Range16{Lo: 0x499, Hi: 0x499, Stride: 0x1},
3840 unicode.Range16{Lo: 0x49b, Hi: 0x49b, Stride: 0x1},
3841 unicode.Range16{Lo: 0x49d, Hi: 0x49d, Stride: 0x1},
3842 unicode.Range16{Lo: 0x49f, Hi: 0x49f, Stride: 0x1},
3843 unicode.Range16{Lo: 0x4a1, Hi: 0x4a1, Stride: 0x1},
3844 unicode.Range16{Lo: 0x4a3, Hi: 0x4a3, Stride: 0x1},
3845 unicode.Range16{Lo: 0x4a5, Hi: 0x4a5, Stride: 0x1},
3846 unicode.Range16{Lo: 0x4a7, Hi: 0x4a7, Stride: 0x1},
3847 unicode.Range16{Lo: 0x4a9, Hi: 0x4a9, Stride: 0x1},
3848 unicode.Range16{Lo: 0x4ab, Hi: 0x4ab, Stride: 0x1},
3849 unicode.Range16{Lo: 0x4ad, Hi: 0x4ad, Stride: 0x1},
3850 unicode.Range16{Lo: 0x4af, Hi: 0x4af, Stride: 0x1},
3851 unicode.Range16{Lo: 0x4b1, Hi: 0x4b1, Stride: 0x1},
3852 unicode.Range16{Lo: 0x4b3, Hi: 0x4b3, Stride: 0x1},
3853 unicode.Range16{Lo: 0x4b5, Hi: 0x4b5, Stride: 0x1},
3854 unicode.Range16{Lo: 0x4b7, Hi: 0x4b7, Stride: 0x1},
3855 unicode.Range16{Lo: 0x4b9, Hi: 0x4b9, Stride: 0x1},
3856 unicode.Range16{Lo: 0x4bb, Hi: 0x4bb, Stride: 0x1},
3857 unicode.Range16{Lo: 0x4bd, Hi: 0x4bd, Stride: 0x1},
3858 unicode.Range16{Lo: 0x4bf, Hi: 0x4bf, Stride: 0x1},
3859 unicode.Range16{Lo: 0x4c2, Hi: 0x4c2, Stride: 0x1},
3860 unicode.Range16{Lo: 0x4c4, Hi: 0x4c4, Stride: 0x1},
3861 unicode.Range16{Lo: 0x4c6, Hi: 0x4c6, Stride: 0x1},
3862 unicode.Range16{Lo: 0x4c8, Hi: 0x4c8, Stride: 0x1},
3863 unicode.Range16{Lo: 0x4ca, Hi: 0x4ca, Stride: 0x1},
3864 unicode.Range16{Lo: 0x4cc, Hi: 0x4cc, Stride: 0x1},
3865 unicode.Range16{Lo: 0x4ce, Hi: 0x4cf, Stride: 0x1},
3866 unicode.Range16{Lo: 0x4d1, Hi: 0x4d1, Stride: 0x1},
3867 unicode.Range16{Lo: 0x4d3, Hi: 0x4d3, Stride: 0x1},
3868 unicode.Range16{Lo: 0x4d5, Hi: 0x4d5, Stride: 0x1},
3869 unicode.Range16{Lo: 0x4d7, Hi: 0x4d7, Stride: 0x1},
3870 unicode.Range16{Lo: 0x4d9, Hi: 0x4d9, Stride: 0x1},
3871 unicode.Range16{Lo: 0x4db, Hi: 0x4db, Stride: 0x1},
3872 unicode.Range16{Lo: 0x4dd, Hi: 0x4dd, Stride: 0x1},
3873 unicode.Range16{Lo: 0x4df, Hi: 0x4df, Stride: 0x1},
3874 unicode.Range16{Lo: 0x4e1, Hi: 0x4e1, Stride: 0x1},
3875 unicode.Range16{Lo: 0x4e3, Hi: 0x4e3, Stride: 0x1},
3876 unicode.Range16{Lo: 0x4e5, Hi: 0x4e5, Stride: 0x1},
3877 unicode.Range16{Lo: 0x4e7, Hi: 0x4e7, Stride: 0x1},
3878 unicode.Range16{Lo: 0x4e9, Hi: 0x4e9, Stride: 0x1},
3879 unicode.Range16{Lo: 0x4eb, Hi: 0x4eb, Stride: 0x1},
3880 unicode.Range16{Lo: 0x4ed, Hi: 0x4ed, Stride: 0x1},
3881 unicode.Range16{Lo: 0x4ef, Hi: 0x4ef, Stride: 0x1},
3882 unicode.Range16{Lo: 0x4f1, Hi: 0x4f1, Stride: 0x1},
3883 unicode.Range16{Lo: 0x4f3, Hi: 0x4f3, Stride: 0x1},
3884 unicode.Range16{Lo: 0x4f5, Hi: 0x4f5, Stride: 0x1},
3885 unicode.Range16{Lo: 0x4f7, Hi: 0x4f7, Stride: 0x1},
3886 unicode.Range16{Lo: 0x4f9, Hi: 0x4f9, Stride: 0x1},
3887 unicode.Range16{Lo: 0x4fb, Hi: 0x4fb, Stride: 0x1},
3888 unicode.Range16{Lo: 0x4fd, Hi: 0x4fd, Stride: 0x1},
3889 unicode.Range16{Lo: 0x4ff, Hi: 0x4ff, Stride: 0x1},
3890 unicode.Range16{Lo: 0x501, Hi: 0x501, Stride: 0x1},
3891 unicode.Range16{Lo: 0x503, Hi: 0x503, Stride: 0x1},
3892 unicode.Range16{Lo: 0x505, Hi: 0x505, Stride: 0x1},
3893 unicode.Range16{Lo: 0x507, Hi: 0x507, Stride: 0x1},
3894 unicode.Range16{Lo: 0x509, Hi: 0x509, Stride: 0x1},
3895 unicode.Range16{Lo: 0x50b, Hi: 0x50b, Stride: 0x1},
3896 unicode.Range16{Lo: 0x50d, Hi: 0x50d, Stride: 0x1},
3897 unicode.Range16{Lo: 0x50f, Hi: 0x50f, Stride: 0x1},
3898 unicode.Range16{Lo: 0x511, Hi: 0x511, Stride: 0x1},
3899 unicode.Range16{Lo: 0x513, Hi: 0x513, Stride: 0x1},
3900 unicode.Range16{Lo: 0x515, Hi: 0x515, Stride: 0x1},
3901 unicode.Range16{Lo: 0x517, Hi: 0x517, Stride: 0x1},
3902 unicode.Range16{Lo: 0x519, Hi: 0x519, Stride: 0x1},
3903 unicode.Range16{Lo: 0x51b, Hi: 0x51b, Stride: 0x1},
3904 unicode.Range16{Lo: 0x51d, Hi: 0x51d, Stride: 0x1},
3905 unicode.Range16{Lo: 0x51f, Hi: 0x51f, Stride: 0x1},
3906 unicode.Range16{Lo: 0x521, Hi: 0x521, Stride: 0x1},
3907 unicode.Range16{Lo: 0x523, Hi: 0x523, Stride: 0x1},
3908 unicode.Range16{Lo: 0x525, Hi: 0x525, Stride: 0x1},
3909 unicode.Range16{Lo: 0x527, Hi: 0x527, Stride: 0x1},
3910 unicode.Range16{Lo: 0x529, Hi: 0x529, Stride: 0x1},
3911 unicode.Range16{Lo: 0x52b, Hi: 0x52b, Stride: 0x1},
3912 unicode.Range16{Lo: 0x52d, Hi: 0x52d, Stride: 0x1},
3913 unicode.Range16{Lo: 0x52f, Hi: 0x52f, Stride: 0x1},
3914 unicode.Range16{Lo: 0x561, Hi: 0x587, Stride: 0x1},
3915 unicode.Range16{Lo: 0x13f8, Hi: 0x13fd, Stride: 0x1},
3916 unicode.Range16{Lo: 0x1c80, Hi: 0x1c88, Stride: 0x1},
3917 unicode.Range16{Lo: 0x1d00, Hi: 0x1d2b, Stride: 0x1},
3918 unicode.Range16{Lo: 0x1d2c, Hi: 0x1d6a, Stride: 0x1},
3919 unicode.Range16{Lo: 0x1d6b, Hi: 0x1d77, Stride: 0x1},
3920 unicode.Range16{Lo: 0x1d78, Hi: 0x1d78, Stride: 0x1},
3921 unicode.Range16{Lo: 0x1d79, Hi: 0x1d9a, Stride: 0x1},
3922 unicode.Range16{Lo: 0x1d9b, Hi: 0x1dbf, Stride: 0x1},
3923 unicode.Range16{Lo: 0x1e01, Hi: 0x1e01, Stride: 0x1},
3924 unicode.Range16{Lo: 0x1e03, Hi: 0x1e03, Stride: 0x1},
3925 unicode.Range16{Lo: 0x1e05, Hi: 0x1e05, Stride: 0x1},
3926 unicode.Range16{Lo: 0x1e07, Hi: 0x1e07, Stride: 0x1},
3927 unicode.Range16{Lo: 0x1e09, Hi: 0x1e09, Stride: 0x1},
3928 unicode.Range16{Lo: 0x1e0b, Hi: 0x1e0b, Stride: 0x1},
3929 unicode.Range16{Lo: 0x1e0d, Hi: 0x1e0d, Stride: 0x1},
3930 unicode.Range16{Lo: 0x1e0f, Hi: 0x1e0f, Stride: 0x1},
3931 unicode.Range16{Lo: 0x1e11, Hi: 0x1e11, Stride: 0x1},
3932 unicode.Range16{Lo: 0x1e13, Hi: 0x1e13, Stride: 0x1},
3933 unicode.Range16{Lo: 0x1e15, Hi: 0x1e15, Stride: 0x1},
3934 unicode.Range16{Lo: 0x1e17, Hi: 0x1e17, Stride: 0x1},
3935 unicode.Range16{Lo: 0x1e19, Hi: 0x1e19, Stride: 0x1},
3936 unicode.Range16{Lo: 0x1e1b, Hi: 0x1e1b, Stride: 0x1},
3937 unicode.Range16{Lo: 0x1e1d, Hi: 0x1e1d, Stride: 0x1},
3938 unicode.Range16{Lo: 0x1e1f, Hi: 0x1e1f, Stride: 0x1},
3939 unicode.Range16{Lo: 0x1e21, Hi: 0x1e21, Stride: 0x1},
3940 unicode.Range16{Lo: 0x1e23, Hi: 0x1e23, Stride: 0x1},
3941 unicode.Range16{Lo: 0x1e25, Hi: 0x1e25, Stride: 0x1},
3942 unicode.Range16{Lo: 0x1e27, Hi: 0x1e27, Stride: 0x1},
3943 unicode.Range16{Lo: 0x1e29, Hi: 0x1e29, Stride: 0x1},
3944 unicode.Range16{Lo: 0x1e2b, Hi: 0x1e2b, Stride: 0x1},
3945 unicode.Range16{Lo: 0x1e2d, Hi: 0x1e2d, Stride: 0x1},
3946 unicode.Range16{Lo: 0x1e2f, Hi: 0x1e2f, Stride: 0x1},
3947 unicode.Range16{Lo: 0x1e31, Hi: 0x1e31, Stride: 0x1},
3948 unicode.Range16{Lo: 0x1e33, Hi: 0x1e33, Stride: 0x1},
3949 unicode.Range16{Lo: 0x1e35, Hi: 0x1e35, Stride: 0x1},
3950 unicode.Range16{Lo: 0x1e37, Hi: 0x1e37, Stride: 0x1},
3951 unicode.Range16{Lo: 0x1e39, Hi: 0x1e39, Stride: 0x1},
3952 unicode.Range16{Lo: 0x1e3b, Hi: 0x1e3b, Stride: 0x1},
3953 unicode.Range16{Lo: 0x1e3d, Hi: 0x1e3d, Stride: 0x1},
3954 unicode.Range16{Lo: 0x1e3f, Hi: 0x1e3f, Stride: 0x1},
3955 unicode.Range16{Lo: 0x1e41, Hi: 0x1e41, Stride: 0x1},
3956 unicode.Range16{Lo: 0x1e43, Hi: 0x1e43, Stride: 0x1},
3957 unicode.Range16{Lo: 0x1e45, Hi: 0x1e45, Stride: 0x1},
3958 unicode.Range16{Lo: 0x1e47, Hi: 0x1e47, Stride: 0x1},
3959 unicode.Range16{Lo: 0x1e49, Hi: 0x1e49, Stride: 0x1},
3960 unicode.Range16{Lo: 0x1e4b, Hi: 0x1e4b, Stride: 0x1},
3961 unicode.Range16{Lo: 0x1e4d, Hi: 0x1e4d, Stride: 0x1},
3962 unicode.Range16{Lo: 0x1e4f, Hi: 0x1e4f, Stride: 0x1},
3963 unicode.Range16{Lo: 0x1e51, Hi: 0x1e51, Stride: 0x1},
3964 unicode.Range16{Lo: 0x1e53, Hi: 0x1e53, Stride: 0x1},
3965 unicode.Range16{Lo: 0x1e55, Hi: 0x1e55, Stride: 0x1},
3966 unicode.Range16{Lo: 0x1e57, Hi: 0x1e57, Stride: 0x1},
3967 unicode.Range16{Lo: 0x1e59, Hi: 0x1e59, Stride: 0x1},
3968 unicode.Range16{Lo: 0x1e5b, Hi: 0x1e5b, Stride: 0x1},
3969 unicode.Range16{Lo: 0x1e5d, Hi: 0x1e5d, Stride: 0x1},
3970 unicode.Range16{Lo: 0x1e5f, Hi: 0x1e5f, Stride: 0x1},
3971 unicode.Range16{Lo: 0x1e61, Hi: 0x1e61, Stride: 0x1},
3972 unicode.Range16{Lo: 0x1e63, Hi: 0x1e63, Stride: 0x1},
3973 unicode.Range16{Lo: 0x1e65, Hi: 0x1e65, Stride: 0x1},
3974 unicode.Range16{Lo: 0x1e67, Hi: 0x1e67, Stride: 0x1},
3975 unicode.Range16{Lo: 0x1e69, Hi: 0x1e69, Stride: 0x1},
3976 unicode.Range16{Lo: 0x1e6b, Hi: 0x1e6b, Stride: 0x1},
3977 unicode.Range16{Lo: 0x1e6d, Hi: 0x1e6d, Stride: 0x1},
3978 unicode.Range16{Lo: 0x1e6f, Hi: 0x1e6f, Stride: 0x1},
3979 unicode.Range16{Lo: 0x1e71, Hi: 0x1e71, Stride: 0x1},
3980 unicode.Range16{Lo: 0x1e73, Hi: 0x1e73, Stride: 0x1},
3981 unicode.Range16{Lo: 0x1e75, Hi: 0x1e75, Stride: 0x1},
3982 unicode.Range16{Lo: 0x1e77, Hi: 0x1e77, Stride: 0x1},
3983 unicode.Range16{Lo: 0x1e79, Hi: 0x1e79, Stride: 0x1},
3984 unicode.Range16{Lo: 0x1e7b, Hi: 0x1e7b, Stride: 0x1},
3985 unicode.Range16{Lo: 0x1e7d, Hi: 0x1e7d, Stride: 0x1},
3986 unicode.Range16{Lo: 0x1e7f, Hi: 0x1e7f, Stride: 0x1},
3987 unicode.Range16{Lo: 0x1e81, Hi: 0x1e81, Stride: 0x1},
3988 unicode.Range16{Lo: 0x1e83, Hi: 0x1e83, Stride: 0x1},
3989 unicode.Range16{Lo: 0x1e85, Hi: 0x1e85, Stride: 0x1},
3990 unicode.Range16{Lo: 0x1e87, Hi: 0x1e87, Stride: 0x1},
3991 unicode.Range16{Lo: 0x1e89, Hi: 0x1e89, Stride: 0x1},
3992 unicode.Range16{Lo: 0x1e8b, Hi: 0x1e8b, Stride: 0x1},
3993 unicode.Range16{Lo: 0x1e8d, Hi: 0x1e8d, Stride: 0x1},
3994 unicode.Range16{Lo: 0x1e8f, Hi: 0x1e8f, Stride: 0x1},
3995 unicode.Range16{Lo: 0x1e91, Hi: 0x1e91, Stride: 0x1},
3996 unicode.Range16{Lo: 0x1e93, Hi: 0x1e93, Stride: 0x1},
3997 unicode.Range16{Lo: 0x1e95, Hi: 0x1e9d, Stride: 0x1},
3998 unicode.Range16{Lo: 0x1e9f, Hi: 0x1e9f, Stride: 0x1},
3999 unicode.Range16{Lo: 0x1ea1, Hi: 0x1ea1, Stride: 0x1},
4000 unicode.Range16{Lo: 0x1ea3, Hi: 0x1ea3, Stride: 0x1},
4001 unicode.Range16{Lo: 0x1ea5, Hi: 0x1ea5, Stride: 0x1},
4002 unicode.Range16{Lo: 0x1ea7, Hi: 0x1ea7, Stride: 0x1},
4003 unicode.Range16{Lo: 0x1ea9, Hi: 0x1ea9, Stride: 0x1},
4004 unicode.Range16{Lo: 0x1eab, Hi: 0x1eab, Stride: 0x1},
4005 unicode.Range16{Lo: 0x1ead, Hi: 0x1ead, Stride: 0x1},
4006 unicode.Range16{Lo: 0x1eaf, Hi: 0x1eaf, Stride: 0x1},
4007 unicode.Range16{Lo: 0x1eb1, Hi: 0x1eb1, Stride: 0x1},
4008 unicode.Range16{Lo: 0x1eb3, Hi: 0x1eb3, Stride: 0x1},
4009 unicode.Range16{Lo: 0x1eb5, Hi: 0x1eb5, Stride: 0x1},
4010 unicode.Range16{Lo: 0x1eb7, Hi: 0x1eb7, Stride: 0x1},
4011 unicode.Range16{Lo: 0x1eb9, Hi: 0x1eb9, Stride: 0x1},
4012 unicode.Range16{Lo: 0x1ebb, Hi: 0x1ebb, Stride: 0x1},
4013 unicode.Range16{Lo: 0x1ebd, Hi: 0x1ebd, Stride: 0x1},
4014 unicode.Range16{Lo: 0x1ebf, Hi: 0x1ebf, Stride: 0x1},
4015 unicode.Range16{Lo: 0x1ec1, Hi: 0x1ec1, Stride: 0x1},
4016 unicode.Range16{Lo: 0x1ec3, Hi: 0x1ec3, Stride: 0x1},
4017 unicode.Range16{Lo: 0x1ec5, Hi: 0x1ec5, Stride: 0x1},
4018 unicode.Range16{Lo: 0x1ec7, Hi: 0x1ec7, Stride: 0x1},
4019 unicode.Range16{Lo: 0x1ec9, Hi: 0x1ec9, Stride: 0x1},
4020 unicode.Range16{Lo: 0x1ecb, Hi: 0x1ecb, Stride: 0x1},
4021 unicode.Range16{Lo: 0x1ecd, Hi: 0x1ecd, Stride: 0x1},
4022 unicode.Range16{Lo: 0x1ecf, Hi: 0x1ecf, Stride: 0x1},
4023 unicode.Range16{Lo: 0x1ed1, Hi: 0x1ed1, Stride: 0x1},
4024 unicode.Range16{Lo: 0x1ed3, Hi: 0x1ed3, Stride: 0x1},
4025 unicode.Range16{Lo: 0x1ed5, Hi: 0x1ed5, Stride: 0x1},
4026 unicode.Range16{Lo: 0x1ed7, Hi: 0x1ed7, Stride: 0x1},
4027 unicode.Range16{Lo: 0x1ed9, Hi: 0x1ed9, Stride: 0x1},
4028 unicode.Range16{Lo: 0x1edb, Hi: 0x1edb, Stride: 0x1},
4029 unicode.Range16{Lo: 0x1edd, Hi: 0x1edd, Stride: 0x1},
4030 unicode.Range16{Lo: 0x1edf, Hi: 0x1edf, Stride: 0x1},
4031 unicode.Range16{Lo: 0x1ee1, Hi: 0x1ee1, Stride: 0x1},
4032 unicode.Range16{Lo: 0x1ee3, Hi: 0x1ee3, Stride: 0x1},
4033 unicode.Range16{Lo: 0x1ee5, Hi: 0x1ee5, Stride: 0x1},
4034 unicode.Range16{Lo: 0x1ee7, Hi: 0x1ee7, Stride: 0x1},
4035 unicode.Range16{Lo: 0x1ee9, Hi: 0x1ee9, Stride: 0x1},
4036 unicode.Range16{Lo: 0x1eeb, Hi: 0x1eeb, Stride: 0x1},
4037 unicode.Range16{Lo: 0x1eed, Hi: 0x1eed, Stride: 0x1},
4038 unicode.Range16{Lo: 0x1eef, Hi: 0x1eef, Stride: 0x1},
4039 unicode.Range16{Lo: 0x1ef1, Hi: 0x1ef1, Stride: 0x1},
4040 unicode.Range16{Lo: 0x1ef3, Hi: 0x1ef3, Stride: 0x1},
4041 unicode.Range16{Lo: 0x1ef5, Hi: 0x1ef5, Stride: 0x1},
4042 unicode.Range16{Lo: 0x1ef7, Hi: 0x1ef7, Stride: 0x1},
4043 unicode.Range16{Lo: 0x1ef9, Hi: 0x1ef9, Stride: 0x1},
4044 unicode.Range16{Lo: 0x1efb, Hi: 0x1efb, Stride: 0x1},
4045 unicode.Range16{Lo: 0x1efd, Hi: 0x1efd, Stride: 0x1},
4046 unicode.Range16{Lo: 0x1eff, Hi: 0x1f07, Stride: 0x1},
4047 unicode.Range16{Lo: 0x1f10, Hi: 0x1f15, Stride: 0x1},
4048 unicode.Range16{Lo: 0x1f20, Hi: 0x1f27, Stride: 0x1},
4049 unicode.Range16{Lo: 0x1f30, Hi: 0x1f37, Stride: 0x1},
4050 unicode.Range16{Lo: 0x1f40, Hi: 0x1f45, Stride: 0x1},
4051 unicode.Range16{Lo: 0x1f50, Hi: 0x1f57, Stride: 0x1},
4052 unicode.Range16{Lo: 0x1f60, Hi: 0x1f67, Stride: 0x1},
4053 unicode.Range16{Lo: 0x1f70, Hi: 0x1f7d, Stride: 0x1},
4054 unicode.Range16{Lo: 0x1f80, Hi: 0x1f87, Stride: 0x1},
4055 unicode.Range16{Lo: 0x1f90, Hi: 0x1f97, Stride: 0x1},
4056 unicode.Range16{Lo: 0x1fa0, Hi: 0x1fa7, Stride: 0x1},
4057 unicode.Range16{Lo: 0x1fb0, Hi: 0x1fb4, Stride: 0x1},
4058 unicode.Range16{Lo: 0x1fb6, Hi: 0x1fb7, Stride: 0x1},
4059 unicode.Range16{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 0x1},
4060 unicode.Range16{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 0x1},
4061 unicode.Range16{Lo: 0x1fc6, Hi: 0x1fc7, Stride: 0x1},
4062 unicode.Range16{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 0x1},
4063 unicode.Range16{Lo: 0x1fd6, Hi: 0x1fd7, Stride: 0x1},
4064 unicode.Range16{Lo: 0x1fe0, Hi: 0x1fe7, Stride: 0x1},
4065 unicode.Range16{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 0x1},
4066 unicode.Range16{Lo: 0x1ff6, Hi: 0x1ff7, Stride: 0x1},
4067 unicode.Range16{Lo: 0x2071, Hi: 0x2071, Stride: 0x1},
4068 unicode.Range16{Lo: 0x207f, Hi: 0x207f, Stride: 0x1},
4069 unicode.Range16{Lo: 0x2090, Hi: 0x209c, Stride: 0x1},
4070 unicode.Range16{Lo: 0x210a, Hi: 0x210a, Stride: 0x1},
4071 unicode.Range16{Lo: 0x210e, Hi: 0x210f, Stride: 0x1},
4072 unicode.Range16{Lo: 0x2113, Hi: 0x2113, Stride: 0x1},
4073 unicode.Range16{Lo: 0x212f, Hi: 0x212f, Stride: 0x1},
4074 unicode.Range16{Lo: 0x2134, Hi: 0x2134, Stride: 0x1},
4075 unicode.Range16{Lo: 0x2139, Hi: 0x2139, Stride: 0x1},
4076 unicode.Range16{Lo: 0x213c, Hi: 0x213d, Stride: 0x1},
4077 unicode.Range16{Lo: 0x2146, Hi: 0x2149, Stride: 0x1},
4078 unicode.Range16{Lo: 0x214e, Hi: 0x214e, Stride: 0x1},
4079 unicode.Range16{Lo: 0x2170, Hi: 0x217f, Stride: 0x1},
4080 unicode.Range16{Lo: 0x2184, Hi: 0x2184, Stride: 0x1},
4081 unicode.Range16{Lo: 0x24d0, Hi: 0x24e9, Stride: 0x1},
4082 unicode.Range16{Lo: 0x2c30, Hi: 0x2c5e, Stride: 0x1},
4083 unicode.Range16{Lo: 0x2c61, Hi: 0x2c61, Stride: 0x1},
4084 unicode.Range16{Lo: 0x2c65, Hi: 0x2c66, Stride: 0x1},
4085 unicode.Range16{Lo: 0x2c68, Hi: 0x2c68, Stride: 0x1},
4086 unicode.Range16{Lo: 0x2c6a, Hi: 0x2c6a, Stride: 0x1},
4087 unicode.Range16{Lo: 0x2c6c, Hi: 0x2c6c, Stride: 0x1},
4088 unicode.Range16{Lo: 0x2c71, Hi: 0x2c71, Stride: 0x1},
4089 unicode.Range16{Lo: 0x2c73, Hi: 0x2c74, Stride: 0x1},
4090 unicode.Range16{Lo: 0x2c76, Hi: 0x2c7b, Stride: 0x1},
4091 unicode.Range16{Lo: 0x2c7c, Hi: 0x2c7d, Stride: 0x1},
4092 unicode.Range16{Lo: 0x2c81, Hi: 0x2c81, Stride: 0x1},
4093 unicode.Range16{Lo: 0x2c83, Hi: 0x2c83, Stride: 0x1},
4094 unicode.Range16{Lo: 0x2c85, Hi: 0x2c85, Stride: 0x1},
4095 unicode.Range16{Lo: 0x2c87, Hi: 0x2c87, Stride: 0x1},
4096 unicode.Range16{Lo: 0x2c89, Hi: 0x2c89, Stride: 0x1},
4097 unicode.Range16{Lo: 0x2c8b, Hi: 0x2c8b, Stride: 0x1},
4098 unicode.Range16{Lo: 0x2c8d, Hi: 0x2c8d, Stride: 0x1},
4099 unicode.Range16{Lo: 0x2c8f, Hi: 0x2c8f, Stride: 0x1},
4100 unicode.Range16{Lo: 0x2c91, Hi: 0x2c91, Stride: 0x1},
4101 unicode.Range16{Lo: 0x2c93, Hi: 0x2c93, Stride: 0x1},
4102 unicode.Range16{Lo: 0x2c95, Hi: 0x2c95, Stride: 0x1},
4103 unicode.Range16{Lo: 0x2c97, Hi: 0x2c97, Stride: 0x1},
4104 unicode.Range16{Lo: 0x2c99, Hi: 0x2c99, Stride: 0x1},
4105 unicode.Range16{Lo: 0x2c9b, Hi: 0x2c9b, Stride: 0x1},
4106 unicode.Range16{Lo: 0x2c9d, Hi: 0x2c9d, Stride: 0x1},
4107 unicode.Range16{Lo: 0x2c9f, Hi: 0x2c9f, Stride: 0x1},
4108 unicode.Range16{Lo: 0x2ca1, Hi: 0x2ca1, Stride: 0x1},
4109 unicode.Range16{Lo: 0x2ca3, Hi: 0x2ca3, Stride: 0x1},
4110 unicode.Range16{Lo: 0x2ca5, Hi: 0x2ca5, Stride: 0x1},
4111 unicode.Range16{Lo: 0x2ca7, Hi: 0x2ca7, Stride: 0x1},
4112 unicode.Range16{Lo: 0x2ca9, Hi: 0x2ca9, Stride: 0x1},
4113 unicode.Range16{Lo: 0x2cab, Hi: 0x2cab, Stride: 0x1},
4114 unicode.Range16{Lo: 0x2cad, Hi: 0x2cad, Stride: 0x1},
4115 unicode.Range16{Lo: 0x2caf, Hi: 0x2caf, Stride: 0x1},
4116 unicode.Range16{Lo: 0x2cb1, Hi: 0x2cb1, Stride: 0x1},
4117 unicode.Range16{Lo: 0x2cb3, Hi: 0x2cb3, Stride: 0x1},
4118 unicode.Range16{Lo: 0x2cb5, Hi: 0x2cb5, Stride: 0x1},
4119 unicode.Range16{Lo: 0x2cb7, Hi: 0x2cb7, Stride: 0x1},
4120 unicode.Range16{Lo: 0x2cb9, Hi: 0x2cb9, Stride: 0x1},
4121 unicode.Range16{Lo: 0x2cbb, Hi: 0x2cbb, Stride: 0x1},
4122 unicode.Range16{Lo: 0x2cbd, Hi: 0x2cbd, Stride: 0x1},
4123 unicode.Range16{Lo: 0x2cbf, Hi: 0x2cbf, Stride: 0x1},
4124 unicode.Range16{Lo: 0x2cc1, Hi: 0x2cc1, Stride: 0x1},
4125 unicode.Range16{Lo: 0x2cc3, Hi: 0x2cc3, Stride: 0x1},
4126 unicode.Range16{Lo: 0x2cc5, Hi: 0x2cc5, Stride: 0x1},
4127 unicode.Range16{Lo: 0x2cc7, Hi: 0x2cc7, Stride: 0x1},
4128 unicode.Range16{Lo: 0x2cc9, Hi: 0x2cc9, Stride: 0x1},
4129 unicode.Range16{Lo: 0x2ccb, Hi: 0x2ccb, Stride: 0x1},
4130 unicode.Range16{Lo: 0x2ccd, Hi: 0x2ccd, Stride: 0x1},
4131 unicode.Range16{Lo: 0x2ccf, Hi: 0x2ccf, Stride: 0x1},
4132 unicode.Range16{Lo: 0x2cd1, Hi: 0x2cd1, Stride: 0x1},
4133 unicode.Range16{Lo: 0x2cd3, Hi: 0x2cd3, Stride: 0x1},
4134 unicode.Range16{Lo: 0x2cd5, Hi: 0x2cd5, Stride: 0x1},
4135 unicode.Range16{Lo: 0x2cd7, Hi: 0x2cd7, Stride: 0x1},
4136 unicode.Range16{Lo: 0x2cd9, Hi: 0x2cd9, Stride: 0x1},
4137 unicode.Range16{Lo: 0x2cdb, Hi: 0x2cdb, Stride: 0x1},
4138 unicode.Range16{Lo: 0x2cdd, Hi: 0x2cdd, Stride: 0x1},
4139 unicode.Range16{Lo: 0x2cdf, Hi: 0x2cdf, Stride: 0x1},
4140 unicode.Range16{Lo: 0x2ce1, Hi: 0x2ce1, Stride: 0x1},
4141 unicode.Range16{Lo: 0x2ce3, Hi: 0x2ce4, Stride: 0x1},
4142 unicode.Range16{Lo: 0x2cec, Hi: 0x2cec, Stride: 0x1},
4143 unicode.Range16{Lo: 0x2cee, Hi: 0x2cee, Stride: 0x1},
4144 unicode.Range16{Lo: 0x2cf3, Hi: 0x2cf3, Stride: 0x1},
4145 unicode.Range16{Lo: 0x2d00, Hi: 0x2d25, Stride: 0x1},
4146 unicode.Range16{Lo: 0x2d27, Hi: 0x2d27, Stride: 0x1},
4147 unicode.Range16{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 0x1},
4148 unicode.Range16{Lo: 0xa641, Hi: 0xa641, Stride: 0x1},
4149 unicode.Range16{Lo: 0xa643, Hi: 0xa643, Stride: 0x1},
4150 unicode.Range16{Lo: 0xa645, Hi: 0xa645, Stride: 0x1},
4151 unicode.Range16{Lo: 0xa647, Hi: 0xa647, Stride: 0x1},
4152 unicode.Range16{Lo: 0xa649, Hi: 0xa649, Stride: 0x1},
4153 unicode.Range16{Lo: 0xa64b, Hi: 0xa64b, Stride: 0x1},
4154 unicode.Range16{Lo: 0xa64d, Hi: 0xa64d, Stride: 0x1},
4155 unicode.Range16{Lo: 0xa64f, Hi: 0xa64f, Stride: 0x1},
4156 unicode.Range16{Lo: 0xa651, Hi: 0xa651, Stride: 0x1},
4157 unicode.Range16{Lo: 0xa653, Hi: 0xa653, Stride: 0x1},
4158 unicode.Range16{Lo: 0xa655, Hi: 0xa655, Stride: 0x1},
4159 unicode.Range16{Lo: 0xa657, Hi: 0xa657, Stride: 0x1},
4160 unicode.Range16{Lo: 0xa659, Hi: 0xa659, Stride: 0x1},
4161 unicode.Range16{Lo: 0xa65b, Hi: 0xa65b, Stride: 0x1},
4162 unicode.Range16{Lo: 0xa65d, Hi: 0xa65d, Stride: 0x1},
4163 unicode.Range16{Lo: 0xa65f, Hi: 0xa65f, Stride: 0x1},
4164 unicode.Range16{Lo: 0xa661, Hi: 0xa661, Stride: 0x1},
4165 unicode.Range16{Lo: 0xa663, Hi: 0xa663, Stride: 0x1},
4166 unicode.Range16{Lo: 0xa665, Hi: 0xa665, Stride: 0x1},
4167 unicode.Range16{Lo: 0xa667, Hi: 0xa667, Stride: 0x1},
4168 unicode.Range16{Lo: 0xa669, Hi: 0xa669, Stride: 0x1},
4169 unicode.Range16{Lo: 0xa66b, Hi: 0xa66b, Stride: 0x1},
4170 unicode.Range16{Lo: 0xa66d, Hi: 0xa66d, Stride: 0x1},
4171 unicode.Range16{Lo: 0xa681, Hi: 0xa681, Stride: 0x1},
4172 unicode.Range16{Lo: 0xa683, Hi: 0xa683, Stride: 0x1},
4173 unicode.Range16{Lo: 0xa685, Hi: 0xa685, Stride: 0x1},
4174 unicode.Range16{Lo: 0xa687, Hi: 0xa687, Stride: 0x1},
4175 unicode.Range16{Lo: 0xa689, Hi: 0xa689, Stride: 0x1},
4176 unicode.Range16{Lo: 0xa68b, Hi: 0xa68b, Stride: 0x1},
4177 unicode.Range16{Lo: 0xa68d, Hi: 0xa68d, Stride: 0x1},
4178 unicode.Range16{Lo: 0xa68f, Hi: 0xa68f, Stride: 0x1},
4179 unicode.Range16{Lo: 0xa691, Hi: 0xa691, Stride: 0x1},
4180 unicode.Range16{Lo: 0xa693, Hi: 0xa693, Stride: 0x1},
4181 unicode.Range16{Lo: 0xa695, Hi: 0xa695, Stride: 0x1},
4182 unicode.Range16{Lo: 0xa697, Hi: 0xa697, Stride: 0x1},
4183 unicode.Range16{Lo: 0xa699, Hi: 0xa699, Stride: 0x1},
4184 unicode.Range16{Lo: 0xa69b, Hi: 0xa69b, Stride: 0x1},
4185 unicode.Range16{Lo: 0xa69c, Hi: 0xa69d, Stride: 0x1},
4186 unicode.Range16{Lo: 0xa723, Hi: 0xa723, Stride: 0x1},
4187 unicode.Range16{Lo: 0xa725, Hi: 0xa725, Stride: 0x1},
4188 unicode.Range16{Lo: 0xa727, Hi: 0xa727, Stride: 0x1},
4189 unicode.Range16{Lo: 0xa729, Hi: 0xa729, Stride: 0x1},
4190 unicode.Range16{Lo: 0xa72b, Hi: 0xa72b, Stride: 0x1},
4191 unicode.Range16{Lo: 0xa72d, Hi: 0xa72d, Stride: 0x1},
4192 unicode.Range16{Lo: 0xa72f, Hi: 0xa731, Stride: 0x1},
4193 unicode.Range16{Lo: 0xa733, Hi: 0xa733, Stride: 0x1},
4194 unicode.Range16{Lo: 0xa735, Hi: 0xa735, Stride: 0x1},
4195 unicode.Range16{Lo: 0xa737, Hi: 0xa737, Stride: 0x1},
4196 unicode.Range16{Lo: 0xa739, Hi: 0xa739, Stride: 0x1},
4197 unicode.Range16{Lo: 0xa73b, Hi: 0xa73b, Stride: 0x1},
4198 unicode.Range16{Lo: 0xa73d, Hi: 0xa73d, Stride: 0x1},
4199 unicode.Range16{Lo: 0xa73f, Hi: 0xa73f, Stride: 0x1},
4200 unicode.Range16{Lo: 0xa741, Hi: 0xa741, Stride: 0x1},
4201 unicode.Range16{Lo: 0xa743, Hi: 0xa743, Stride: 0x1},
4202 unicode.Range16{Lo: 0xa745, Hi: 0xa745, Stride: 0x1},
4203 unicode.Range16{Lo: 0xa747, Hi: 0xa747, Stride: 0x1},
4204 unicode.Range16{Lo: 0xa749, Hi: 0xa749, Stride: 0x1},
4205 unicode.Range16{Lo: 0xa74b, Hi: 0xa74b, Stride: 0x1},
4206 unicode.Range16{Lo: 0xa74d, Hi: 0xa74d, Stride: 0x1},
4207 unicode.Range16{Lo: 0xa74f, Hi: 0xa74f, Stride: 0x1},
4208 unicode.Range16{Lo: 0xa751, Hi: 0xa751, Stride: 0x1},
4209 unicode.Range16{Lo: 0xa753, Hi: 0xa753, Stride: 0x1},
4210 unicode.Range16{Lo: 0xa755, Hi: 0xa755, Stride: 0x1},
4211 unicode.Range16{Lo: 0xa757, Hi: 0xa757, Stride: 0x1},
4212 unicode.Range16{Lo: 0xa759, Hi: 0xa759, Stride: 0x1},
4213 unicode.Range16{Lo: 0xa75b, Hi: 0xa75b, Stride: 0x1},
4214 unicode.Range16{Lo: 0xa75d, Hi: 0xa75d, Stride: 0x1},
4215 unicode.Range16{Lo: 0xa75f, Hi: 0xa75f, Stride: 0x1},
4216 unicode.Range16{Lo: 0xa761, Hi: 0xa761, Stride: 0x1},
4217 unicode.Range16{Lo: 0xa763, Hi: 0xa763, Stride: 0x1},
4218 unicode.Range16{Lo: 0xa765, Hi: 0xa765, Stride: 0x1},
4219 unicode.Range16{Lo: 0xa767, Hi: 0xa767, Stride: 0x1},
4220 unicode.Range16{Lo: 0xa769, Hi: 0xa769, Stride: 0x1},
4221 unicode.Range16{Lo: 0xa76b, Hi: 0xa76b, Stride: 0x1},
4222 unicode.Range16{Lo: 0xa76d, Hi: 0xa76d, Stride: 0x1},
4223 unicode.Range16{Lo: 0xa76f, Hi: 0xa76f, Stride: 0x1},
4224 unicode.Range16{Lo: 0xa770, Hi: 0xa770, Stride: 0x1},
4225 unicode.Range16{Lo: 0xa771, Hi: 0xa778, Stride: 0x1},
4226 unicode.Range16{Lo: 0xa77a, Hi: 0xa77a, Stride: 0x1},
4227 unicode.Range16{Lo: 0xa77c, Hi: 0xa77c, Stride: 0x1},
4228 unicode.Range16{Lo: 0xa77f, Hi: 0xa77f, Stride: 0x1},
4229 unicode.Range16{Lo: 0xa781, Hi: 0xa781, Stride: 0x1},
4230 unicode.Range16{Lo: 0xa783, Hi: 0xa783, Stride: 0x1},
4231 unicode.Range16{Lo: 0xa785, Hi: 0xa785, Stride: 0x1},
4232 unicode.Range16{Lo: 0xa787, Hi: 0xa787, Stride: 0x1},
4233 unicode.Range16{Lo: 0xa78c, Hi: 0xa78c, Stride: 0x1},
4234 unicode.Range16{Lo: 0xa78e, Hi: 0xa78e, Stride: 0x1},
4235 unicode.Range16{Lo: 0xa791, Hi: 0xa791, Stride: 0x1},
4236 unicode.Range16{Lo: 0xa793, Hi: 0xa795, Stride: 0x1},
4237 unicode.Range16{Lo: 0xa797, Hi: 0xa797, Stride: 0x1},
4238 unicode.Range16{Lo: 0xa799, Hi: 0xa799, Stride: 0x1},
4239 unicode.Range16{Lo: 0xa79b, Hi: 0xa79b, Stride: 0x1},
4240 unicode.Range16{Lo: 0xa79d, Hi: 0xa79d, Stride: 0x1},
4241 unicode.Range16{Lo: 0xa79f, Hi: 0xa79f, Stride: 0x1},
4242 unicode.Range16{Lo: 0xa7a1, Hi: 0xa7a1, Stride: 0x1},
4243 unicode.Range16{Lo: 0xa7a3, Hi: 0xa7a3, Stride: 0x1},
4244 unicode.Range16{Lo: 0xa7a5, Hi: 0xa7a5, Stride: 0x1},
4245 unicode.Range16{Lo: 0xa7a7, Hi: 0xa7a7, Stride: 0x1},
4246 unicode.Range16{Lo: 0xa7a9, Hi: 0xa7a9, Stride: 0x1},
4247 unicode.Range16{Lo: 0xa7b5, Hi: 0xa7b5, Stride: 0x1},
4248 unicode.Range16{Lo: 0xa7b7, Hi: 0xa7b7, Stride: 0x1},
4249 unicode.Range16{Lo: 0xa7f8, Hi: 0xa7f9, Stride: 0x1},
4250 unicode.Range16{Lo: 0xa7fa, Hi: 0xa7fa, Stride: 0x1},
4251 unicode.Range16{Lo: 0xab30, Hi: 0xab5a, Stride: 0x1},
4252 unicode.Range16{Lo: 0xab5c, Hi: 0xab5f, Stride: 0x1},
4253 unicode.Range16{Lo: 0xab60, Hi: 0xab65, Stride: 0x1},
4254 unicode.Range16{Lo: 0xab70, Hi: 0xabbf, Stride: 0x1},
4255 unicode.Range16{Lo: 0xfb00, Hi: 0xfb06, Stride: 0x1},
4256 unicode.Range16{Lo: 0xfb13, Hi: 0xfb17, Stride: 0x1},
4257 unicode.Range16{Lo: 0xff41, Hi: 0xff5a, Stride: 0x1},
4258 },
4259 R32: []unicode.Range32{
4260 unicode.Range32{Lo: 0x10428, Hi: 0x1044f, Stride: 0x1},
4261 unicode.Range32{Lo: 0x104d8, Hi: 0x104fb, Stride: 0x1},
4262 unicode.Range32{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 0x1},
4263 unicode.Range32{Lo: 0x118c0, Hi: 0x118df, Stride: 0x1},
4264 unicode.Range32{Lo: 0x1d41a, Hi: 0x1d433, Stride: 0x1},
4265 unicode.Range32{Lo: 0x1d44e, Hi: 0x1d454, Stride: 0x1},
4266 unicode.Range32{Lo: 0x1d456, Hi: 0x1d467, Stride: 0x1},
4267 unicode.Range32{Lo: 0x1d482, Hi: 0x1d49b, Stride: 0x1},
4268 unicode.Range32{Lo: 0x1d4b6, Hi: 0x1d4b9, Stride: 0x1},
4269 unicode.Range32{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 0x1},
4270 unicode.Range32{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 0x1},
4271 unicode.Range32{Lo: 0x1d4c5, Hi: 0x1d4cf, Stride: 0x1},
4272 unicode.Range32{Lo: 0x1d4ea, Hi: 0x1d503, Stride: 0x1},
4273 unicode.Range32{Lo: 0x1d51e, Hi: 0x1d537, Stride: 0x1},
4274 unicode.Range32{Lo: 0x1d552, Hi: 0x1d56b, Stride: 0x1},
4275 unicode.Range32{Lo: 0x1d586, Hi: 0x1d59f, Stride: 0x1},
4276 unicode.Range32{Lo: 0x1d5ba, Hi: 0x1d5d3, Stride: 0x1},
4277 unicode.Range32{Lo: 0x1d5ee, Hi: 0x1d607, Stride: 0x1},
4278 unicode.Range32{Lo: 0x1d622, Hi: 0x1d63b, Stride: 0x1},
4279 unicode.Range32{Lo: 0x1d656, Hi: 0x1d66f, Stride: 0x1},
4280 unicode.Range32{Lo: 0x1d68a, Hi: 0x1d6a5, Stride: 0x1},
4281 unicode.Range32{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 0x1},
4282 unicode.Range32{Lo: 0x1d6dc, Hi: 0x1d6e1, Stride: 0x1},
4283 unicode.Range32{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 0x1},
4284 unicode.Range32{Lo: 0x1d716, Hi: 0x1d71b, Stride: 0x1},
4285 unicode.Range32{Lo: 0x1d736, Hi: 0x1d74e, Stride: 0x1},
4286 unicode.Range32{Lo: 0x1d750, Hi: 0x1d755, Stride: 0x1},
4287 unicode.Range32{Lo: 0x1d770, Hi: 0x1d788, Stride: 0x1},
4288 unicode.Range32{Lo: 0x1d78a, Hi: 0x1d78f, Stride: 0x1},
4289 unicode.Range32{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 0x1},
4290 unicode.Range32{Lo: 0x1d7c4, Hi: 0x1d7c9, Stride: 0x1},
4291 unicode.Range32{Lo: 0x1d7cb, Hi: 0x1d7cb, Stride: 0x1},
4292 unicode.Range32{Lo: 0x1e922, Hi: 0x1e943, Stride: 0x1},
4293 },
4294 LatinOffset: 6,
4295}
4296
4297var _SentenceNumeric = &unicode.RangeTable{
4298 R16: []unicode.Range16{
4299 unicode.Range16{Lo: 0x30, Hi: 0x39, Stride: 0x1},
4300 unicode.Range16{Lo: 0x660, Hi: 0x669, Stride: 0x1},
4301 unicode.Range16{Lo: 0x66b, Hi: 0x66c, Stride: 0x1},
4302 unicode.Range16{Lo: 0x6f0, Hi: 0x6f9, Stride: 0x1},
4303 unicode.Range16{Lo: 0x7c0, Hi: 0x7c9, Stride: 0x1},
4304 unicode.Range16{Lo: 0x966, Hi: 0x96f, Stride: 0x1},
4305 unicode.Range16{Lo: 0x9e6, Hi: 0x9ef, Stride: 0x1},
4306 unicode.Range16{Lo: 0xa66, Hi: 0xa6f, Stride: 0x1},
4307 unicode.Range16{Lo: 0xae6, Hi: 0xaef, Stride: 0x1},
4308 unicode.Range16{Lo: 0xb66, Hi: 0xb6f, Stride: 0x1},
4309 unicode.Range16{Lo: 0xbe6, Hi: 0xbef, Stride: 0x1},
4310 unicode.Range16{Lo: 0xc66, Hi: 0xc6f, Stride: 0x1},
4311 unicode.Range16{Lo: 0xce6, Hi: 0xcef, Stride: 0x1},
4312 unicode.Range16{Lo: 0xd66, Hi: 0xd6f, Stride: 0x1},
4313 unicode.Range16{Lo: 0xde6, Hi: 0xdef, Stride: 0x1},
4314 unicode.Range16{Lo: 0xe50, Hi: 0xe59, Stride: 0x1},
4315 unicode.Range16{Lo: 0xed0, Hi: 0xed9, Stride: 0x1},
4316 unicode.Range16{Lo: 0xf20, Hi: 0xf29, Stride: 0x1},
4317 unicode.Range16{Lo: 0x1040, Hi: 0x1049, Stride: 0x1},
4318 unicode.Range16{Lo: 0x1090, Hi: 0x1099, Stride: 0x1},
4319 unicode.Range16{Lo: 0x17e0, Hi: 0x17e9, Stride: 0x1},
4320 unicode.Range16{Lo: 0x1810, Hi: 0x1819, Stride: 0x1},
4321 unicode.Range16{Lo: 0x1946, Hi: 0x194f, Stride: 0x1},
4322 unicode.Range16{Lo: 0x19d0, Hi: 0x19d9, Stride: 0x1},
4323 unicode.Range16{Lo: 0x1a80, Hi: 0x1a89, Stride: 0x1},
4324 unicode.Range16{Lo: 0x1a90, Hi: 0x1a99, Stride: 0x1},
4325 unicode.Range16{Lo: 0x1b50, Hi: 0x1b59, Stride: 0x1},
4326 unicode.Range16{Lo: 0x1bb0, Hi: 0x1bb9, Stride: 0x1},
4327 unicode.Range16{Lo: 0x1c40, Hi: 0x1c49, Stride: 0x1},
4328 unicode.Range16{Lo: 0x1c50, Hi: 0x1c59, Stride: 0x1},
4329 unicode.Range16{Lo: 0xa620, Hi: 0xa629, Stride: 0x1},
4330 unicode.Range16{Lo: 0xa8d0, Hi: 0xa8d9, Stride: 0x1},
4331 unicode.Range16{Lo: 0xa900, Hi: 0xa909, Stride: 0x1},
4332 unicode.Range16{Lo: 0xa9d0, Hi: 0xa9d9, Stride: 0x1},
4333 unicode.Range16{Lo: 0xa9f0, Hi: 0xa9f9, Stride: 0x1},
4334 unicode.Range16{Lo: 0xaa50, Hi: 0xaa59, Stride: 0x1},
4335 unicode.Range16{Lo: 0xabf0, Hi: 0xabf9, Stride: 0x1},
4336 },
4337 R32: []unicode.Range32{
4338 unicode.Range32{Lo: 0x104a0, Hi: 0x104a9, Stride: 0x1},
4339 unicode.Range32{Lo: 0x11066, Hi: 0x1106f, Stride: 0x1},
4340 unicode.Range32{Lo: 0x110f0, Hi: 0x110f9, Stride: 0x1},
4341 unicode.Range32{Lo: 0x11136, Hi: 0x1113f, Stride: 0x1},
4342 unicode.Range32{Lo: 0x111d0, Hi: 0x111d9, Stride: 0x1},
4343 unicode.Range32{Lo: 0x112f0, Hi: 0x112f9, Stride: 0x1},
4344 unicode.Range32{Lo: 0x11450, Hi: 0x11459, Stride: 0x1},
4345 unicode.Range32{Lo: 0x114d0, Hi: 0x114d9, Stride: 0x1},
4346 unicode.Range32{Lo: 0x11650, Hi: 0x11659, Stride: 0x1},
4347 unicode.Range32{Lo: 0x116c0, Hi: 0x116c9, Stride: 0x1},
4348 unicode.Range32{Lo: 0x11730, Hi: 0x11739, Stride: 0x1},
4349 unicode.Range32{Lo: 0x118e0, Hi: 0x118e9, Stride: 0x1},
4350 unicode.Range32{Lo: 0x11c50, Hi: 0x11c59, Stride: 0x1},
4351 unicode.Range32{Lo: 0x16a60, Hi: 0x16a69, Stride: 0x1},
4352 unicode.Range32{Lo: 0x16b50, Hi: 0x16b59, Stride: 0x1},
4353 unicode.Range32{Lo: 0x1d7ce, Hi: 0x1d7ff, Stride: 0x1},
4354 unicode.Range32{Lo: 0x1e950, Hi: 0x1e959, Stride: 0x1},
4355 },
4356 LatinOffset: 1,
4357}
4358
4359var _SentenceOLetter = &unicode.RangeTable{
4360 R16: []unicode.Range16{
4361 unicode.Range16{Lo: 0x1bb, Hi: 0x1bb, Stride: 0x1},
4362 unicode.Range16{Lo: 0x1c0, Hi: 0x1c3, Stride: 0x1},
4363 unicode.Range16{Lo: 0x294, Hi: 0x294, Stride: 0x1},
4364 unicode.Range16{Lo: 0x2b9, Hi: 0x2bf, Stride: 0x1},
4365 unicode.Range16{Lo: 0x2c6, Hi: 0x2d1, Stride: 0x1},
4366 unicode.Range16{Lo: 0x2ec, Hi: 0x2ec, Stride: 0x1},
4367 unicode.Range16{Lo: 0x2ee, Hi: 0x2ee, Stride: 0x1},
4368 unicode.Range16{Lo: 0x374, Hi: 0x374, Stride: 0x1},
4369 unicode.Range16{Lo: 0x559, Hi: 0x559, Stride: 0x1},
4370 unicode.Range16{Lo: 0x5d0, Hi: 0x5ea, Stride: 0x1},
4371 unicode.Range16{Lo: 0x5f0, Hi: 0x5f2, Stride: 0x1},
4372 unicode.Range16{Lo: 0x5f3, Hi: 0x5f3, Stride: 0x1},
4373 unicode.Range16{Lo: 0x620, Hi: 0x63f, Stride: 0x1},
4374 unicode.Range16{Lo: 0x640, Hi: 0x640, Stride: 0x1},
4375 unicode.Range16{Lo: 0x641, Hi: 0x64a, Stride: 0x1},
4376 unicode.Range16{Lo: 0x66e, Hi: 0x66f, Stride: 0x1},
4377 unicode.Range16{Lo: 0x671, Hi: 0x6d3, Stride: 0x1},
4378 unicode.Range16{Lo: 0x6d5, Hi: 0x6d5, Stride: 0x1},
4379 unicode.Range16{Lo: 0x6e5, Hi: 0x6e6, Stride: 0x1},
4380 unicode.Range16{Lo: 0x6ee, Hi: 0x6ef, Stride: 0x1},
4381 unicode.Range16{Lo: 0x6fa, Hi: 0x6fc, Stride: 0x1},
4382 unicode.Range16{Lo: 0x6ff, Hi: 0x6ff, Stride: 0x1},
4383 unicode.Range16{Lo: 0x710, Hi: 0x710, Stride: 0x1},
4384 unicode.Range16{Lo: 0x712, Hi: 0x72f, Stride: 0x1},
4385 unicode.Range16{Lo: 0x74d, Hi: 0x7a5, Stride: 0x1},
4386 unicode.Range16{Lo: 0x7b1, Hi: 0x7b1, Stride: 0x1},
4387 unicode.Range16{Lo: 0x7ca, Hi: 0x7ea, Stride: 0x1},
4388 unicode.Range16{Lo: 0x7f4, Hi: 0x7f5, Stride: 0x1},
4389 unicode.Range16{Lo: 0x7fa, Hi: 0x7fa, Stride: 0x1},
4390 unicode.Range16{Lo: 0x800, Hi: 0x815, Stride: 0x1},
4391 unicode.Range16{Lo: 0x81a, Hi: 0x81a, Stride: 0x1},
4392 unicode.Range16{Lo: 0x824, Hi: 0x824, Stride: 0x1},
4393 unicode.Range16{Lo: 0x828, Hi: 0x828, Stride: 0x1},
4394 unicode.Range16{Lo: 0x840, Hi: 0x858, Stride: 0x1},
4395 unicode.Range16{Lo: 0x8a0, Hi: 0x8b4, Stride: 0x1},
4396 unicode.Range16{Lo: 0x8b6, Hi: 0x8bd, Stride: 0x1},
4397 unicode.Range16{Lo: 0x904, Hi: 0x939, Stride: 0x1},
4398 unicode.Range16{Lo: 0x93d, Hi: 0x93d, Stride: 0x1},
4399 unicode.Range16{Lo: 0x950, Hi: 0x950, Stride: 0x1},
4400 unicode.Range16{Lo: 0x958, Hi: 0x961, Stride: 0x1},
4401 unicode.Range16{Lo: 0x971, Hi: 0x971, Stride: 0x1},
4402 unicode.Range16{Lo: 0x972, Hi: 0x980, Stride: 0x1},
4403 unicode.Range16{Lo: 0x985, Hi: 0x98c, Stride: 0x1},
4404 unicode.Range16{Lo: 0x98f, Hi: 0x990, Stride: 0x1},
4405 unicode.Range16{Lo: 0x993, Hi: 0x9a8, Stride: 0x1},
4406 unicode.Range16{Lo: 0x9aa, Hi: 0x9b0, Stride: 0x1},
4407 unicode.Range16{Lo: 0x9b2, Hi: 0x9b2, Stride: 0x1},
4408 unicode.Range16{Lo: 0x9b6, Hi: 0x9b9, Stride: 0x1},
4409 unicode.Range16{Lo: 0x9bd, Hi: 0x9bd, Stride: 0x1},
4410 unicode.Range16{Lo: 0x9ce, Hi: 0x9ce, Stride: 0x1},
4411 unicode.Range16{Lo: 0x9dc, Hi: 0x9dd, Stride: 0x1},
4412 unicode.Range16{Lo: 0x9df, Hi: 0x9e1, Stride: 0x1},
4413 unicode.Range16{Lo: 0x9f0, Hi: 0x9f1, Stride: 0x1},
4414 unicode.Range16{Lo: 0xa05, Hi: 0xa0a, Stride: 0x1},
4415 unicode.Range16{Lo: 0xa0f, Hi: 0xa10, Stride: 0x1},
4416 unicode.Range16{Lo: 0xa13, Hi: 0xa28, Stride: 0x1},
4417 unicode.Range16{Lo: 0xa2a, Hi: 0xa30, Stride: 0x1},
4418 unicode.Range16{Lo: 0xa32, Hi: 0xa33, Stride: 0x1},
4419 unicode.Range16{Lo: 0xa35, Hi: 0xa36, Stride: 0x1},
4420 unicode.Range16{Lo: 0xa38, Hi: 0xa39, Stride: 0x1},
4421 unicode.Range16{Lo: 0xa59, Hi: 0xa5c, Stride: 0x1},
4422 unicode.Range16{Lo: 0xa5e, Hi: 0xa5e, Stride: 0x1},
4423 unicode.Range16{Lo: 0xa72, Hi: 0xa74, Stride: 0x1},
4424 unicode.Range16{Lo: 0xa85, Hi: 0xa8d, Stride: 0x1},
4425 unicode.Range16{Lo: 0xa8f, Hi: 0xa91, Stride: 0x1},
4426 unicode.Range16{Lo: 0xa93, Hi: 0xaa8, Stride: 0x1},
4427 unicode.Range16{Lo: 0xaaa, Hi: 0xab0, Stride: 0x1},
4428 unicode.Range16{Lo: 0xab2, Hi: 0xab3, Stride: 0x1},
4429 unicode.Range16{Lo: 0xab5, Hi: 0xab9, Stride: 0x1},
4430 unicode.Range16{Lo: 0xabd, Hi: 0xabd, Stride: 0x1},
4431 unicode.Range16{Lo: 0xad0, Hi: 0xad0, Stride: 0x1},
4432 unicode.Range16{Lo: 0xae0, Hi: 0xae1, Stride: 0x1},
4433 unicode.Range16{Lo: 0xaf9, Hi: 0xaf9, Stride: 0x1},
4434 unicode.Range16{Lo: 0xb05, Hi: 0xb0c, Stride: 0x1},
4435 unicode.Range16{Lo: 0xb0f, Hi: 0xb10, Stride: 0x1},
4436 unicode.Range16{Lo: 0xb13, Hi: 0xb28, Stride: 0x1},
4437 unicode.Range16{Lo: 0xb2a, Hi: 0xb30, Stride: 0x1},
4438 unicode.Range16{Lo: 0xb32, Hi: 0xb33, Stride: 0x1},
4439 unicode.Range16{Lo: 0xb35, Hi: 0xb39, Stride: 0x1},
4440 unicode.Range16{Lo: 0xb3d, Hi: 0xb3d, Stride: 0x1},
4441 unicode.Range16{Lo: 0xb5c, Hi: 0xb5d, Stride: 0x1},
4442 unicode.Range16{Lo: 0xb5f, Hi: 0xb61, Stride: 0x1},
4443 unicode.Range16{Lo: 0xb71, Hi: 0xb71, Stride: 0x1},
4444 unicode.Range16{Lo: 0xb83, Hi: 0xb83, Stride: 0x1},
4445 unicode.Range16{Lo: 0xb85, Hi: 0xb8a, Stride: 0x1},
4446 unicode.Range16{Lo: 0xb8e, Hi: 0xb90, Stride: 0x1},
4447 unicode.Range16{Lo: 0xb92, Hi: 0xb95, Stride: 0x1},
4448 unicode.Range16{Lo: 0xb99, Hi: 0xb9a, Stride: 0x1},
4449 unicode.Range16{Lo: 0xb9c, Hi: 0xb9c, Stride: 0x1},
4450 unicode.Range16{Lo: 0xb9e, Hi: 0xb9f, Stride: 0x1},
4451 unicode.Range16{Lo: 0xba3, Hi: 0xba4, Stride: 0x1},
4452 unicode.Range16{Lo: 0xba8, Hi: 0xbaa, Stride: 0x1},
4453 unicode.Range16{Lo: 0xbae, Hi: 0xbb9, Stride: 0x1},
4454 unicode.Range16{Lo: 0xbd0, Hi: 0xbd0, Stride: 0x1},
4455 unicode.Range16{Lo: 0xc05, Hi: 0xc0c, Stride: 0x1},
4456 unicode.Range16{Lo: 0xc0e, Hi: 0xc10, Stride: 0x1},
4457 unicode.Range16{Lo: 0xc12, Hi: 0xc28, Stride: 0x1},
4458 unicode.Range16{Lo: 0xc2a, Hi: 0xc39, Stride: 0x1},
4459 unicode.Range16{Lo: 0xc3d, Hi: 0xc3d, Stride: 0x1},
4460 unicode.Range16{Lo: 0xc58, Hi: 0xc5a, Stride: 0x1},
4461 unicode.Range16{Lo: 0xc60, Hi: 0xc61, Stride: 0x1},
4462 unicode.Range16{Lo: 0xc80, Hi: 0xc80, Stride: 0x1},
4463 unicode.Range16{Lo: 0xc85, Hi: 0xc8c, Stride: 0x1},
4464 unicode.Range16{Lo: 0xc8e, Hi: 0xc90, Stride: 0x1},
4465 unicode.Range16{Lo: 0xc92, Hi: 0xca8, Stride: 0x1},
4466 unicode.Range16{Lo: 0xcaa, Hi: 0xcb3, Stride: 0x1},
4467 unicode.Range16{Lo: 0xcb5, Hi: 0xcb9, Stride: 0x1},
4468 unicode.Range16{Lo: 0xcbd, Hi: 0xcbd, Stride: 0x1},
4469 unicode.Range16{Lo: 0xcde, Hi: 0xcde, Stride: 0x1},
4470 unicode.Range16{Lo: 0xce0, Hi: 0xce1, Stride: 0x1},
4471 unicode.Range16{Lo: 0xcf1, Hi: 0xcf2, Stride: 0x1},
4472 unicode.Range16{Lo: 0xd05, Hi: 0xd0c, Stride: 0x1},
4473 unicode.Range16{Lo: 0xd0e, Hi: 0xd10, Stride: 0x1},
4474 unicode.Range16{Lo: 0xd12, Hi: 0xd3a, Stride: 0x1},
4475 unicode.Range16{Lo: 0xd3d, Hi: 0xd3d, Stride: 0x1},
4476 unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1},
4477 unicode.Range16{Lo: 0xd54, Hi: 0xd56, Stride: 0x1},
4478 unicode.Range16{Lo: 0xd5f, Hi: 0xd61, Stride: 0x1},
4479 unicode.Range16{Lo: 0xd7a, Hi: 0xd7f, Stride: 0x1},
4480 unicode.Range16{Lo: 0xd85, Hi: 0xd96, Stride: 0x1},
4481 unicode.Range16{Lo: 0xd9a, Hi: 0xdb1, Stride: 0x1},
4482 unicode.Range16{Lo: 0xdb3, Hi: 0xdbb, Stride: 0x1},
4483 unicode.Range16{Lo: 0xdbd, Hi: 0xdbd, Stride: 0x1},
4484 unicode.Range16{Lo: 0xdc0, Hi: 0xdc6, Stride: 0x1},
4485 unicode.Range16{Lo: 0xe01, Hi: 0xe30, Stride: 0x1},
4486 unicode.Range16{Lo: 0xe32, Hi: 0xe33, Stride: 0x1},
4487 unicode.Range16{Lo: 0xe40, Hi: 0xe45, Stride: 0x1},
4488 unicode.Range16{Lo: 0xe46, Hi: 0xe46, Stride: 0x1},
4489 unicode.Range16{Lo: 0xe81, Hi: 0xe82, Stride: 0x1},
4490 unicode.Range16{Lo: 0xe84, Hi: 0xe84, Stride: 0x1},
4491 unicode.Range16{Lo: 0xe87, Hi: 0xe88, Stride: 0x1},
4492 unicode.Range16{Lo: 0xe8a, Hi: 0xe8a, Stride: 0x1},
4493 unicode.Range16{Lo: 0xe8d, Hi: 0xe8d, Stride: 0x1},
4494 unicode.Range16{Lo: 0xe94, Hi: 0xe97, Stride: 0x1},
4495 unicode.Range16{Lo: 0xe99, Hi: 0xe9f, Stride: 0x1},
4496 unicode.Range16{Lo: 0xea1, Hi: 0xea3, Stride: 0x1},
4497 unicode.Range16{Lo: 0xea5, Hi: 0xea5, Stride: 0x1},
4498 unicode.Range16{Lo: 0xea7, Hi: 0xea7, Stride: 0x1},
4499 unicode.Range16{Lo: 0xeaa, Hi: 0xeab, Stride: 0x1},
4500 unicode.Range16{Lo: 0xead, Hi: 0xeb0, Stride: 0x1},
4501 unicode.Range16{Lo: 0xeb2, Hi: 0xeb3, Stride: 0x1},
4502 unicode.Range16{Lo: 0xebd, Hi: 0xebd, Stride: 0x1},
4503 unicode.Range16{Lo: 0xec0, Hi: 0xec4, Stride: 0x1},
4504 unicode.Range16{Lo: 0xec6, Hi: 0xec6, Stride: 0x1},
4505 unicode.Range16{Lo: 0xedc, Hi: 0xedf, Stride: 0x1},
4506 unicode.Range16{Lo: 0xf00, Hi: 0xf00, Stride: 0x1},
4507 unicode.Range16{Lo: 0xf40, Hi: 0xf47, Stride: 0x1},
4508 unicode.Range16{Lo: 0xf49, Hi: 0xf6c, Stride: 0x1},
4509 unicode.Range16{Lo: 0xf88, Hi: 0xf8c, Stride: 0x1},
4510 unicode.Range16{Lo: 0x1000, Hi: 0x102a, Stride: 0x1},
4511 unicode.Range16{Lo: 0x103f, Hi: 0x103f, Stride: 0x1},
4512 unicode.Range16{Lo: 0x1050, Hi: 0x1055, Stride: 0x1},
4513 unicode.Range16{Lo: 0x105a, Hi: 0x105d, Stride: 0x1},
4514 unicode.Range16{Lo: 0x1061, Hi: 0x1061, Stride: 0x1},
4515 unicode.Range16{Lo: 0x1065, Hi: 0x1066, Stride: 0x1},
4516 unicode.Range16{Lo: 0x106e, Hi: 0x1070, Stride: 0x1},
4517 unicode.Range16{Lo: 0x1075, Hi: 0x1081, Stride: 0x1},
4518 unicode.Range16{Lo: 0x108e, Hi: 0x108e, Stride: 0x1},
4519 unicode.Range16{Lo: 0x10d0, Hi: 0x10fa, Stride: 0x1},
4520 unicode.Range16{Lo: 0x10fc, Hi: 0x10fc, Stride: 0x1},
4521 unicode.Range16{Lo: 0x10fd, Hi: 0x1248, Stride: 0x1},
4522 unicode.Range16{Lo: 0x124a, Hi: 0x124d, Stride: 0x1},
4523 unicode.Range16{Lo: 0x1250, Hi: 0x1256, Stride: 0x1},
4524 unicode.Range16{Lo: 0x1258, Hi: 0x1258, Stride: 0x1},
4525 unicode.Range16{Lo: 0x125a, Hi: 0x125d, Stride: 0x1},
4526 unicode.Range16{Lo: 0x1260, Hi: 0x1288, Stride: 0x1},
4527 unicode.Range16{Lo: 0x128a, Hi: 0x128d, Stride: 0x1},
4528 unicode.Range16{Lo: 0x1290, Hi: 0x12b0, Stride: 0x1},
4529 unicode.Range16{Lo: 0x12b2, Hi: 0x12b5, Stride: 0x1},
4530 unicode.Range16{Lo: 0x12b8, Hi: 0x12be, Stride: 0x1},
4531 unicode.Range16{Lo: 0x12c0, Hi: 0x12c0, Stride: 0x1},
4532 unicode.Range16{Lo: 0x12c2, Hi: 0x12c5, Stride: 0x1},
4533 unicode.Range16{Lo: 0x12c8, Hi: 0x12d6, Stride: 0x1},
4534 unicode.Range16{Lo: 0x12d8, Hi: 0x1310, Stride: 0x1},
4535 unicode.Range16{Lo: 0x1312, Hi: 0x1315, Stride: 0x1},
4536 unicode.Range16{Lo: 0x1318, Hi: 0x135a, Stride: 0x1},
4537 unicode.Range16{Lo: 0x1380, Hi: 0x138f, Stride: 0x1},
4538 unicode.Range16{Lo: 0x1401, Hi: 0x166c, Stride: 0x1},
4539 unicode.Range16{Lo: 0x166f, Hi: 0x167f, Stride: 0x1},
4540 unicode.Range16{Lo: 0x1681, Hi: 0x169a, Stride: 0x1},
4541 unicode.Range16{Lo: 0x16a0, Hi: 0x16ea, Stride: 0x1},
4542 unicode.Range16{Lo: 0x16ee, Hi: 0x16f0, Stride: 0x1},
4543 unicode.Range16{Lo: 0x16f1, Hi: 0x16f8, Stride: 0x1},
4544 unicode.Range16{Lo: 0x1700, Hi: 0x170c, Stride: 0x1},
4545 unicode.Range16{Lo: 0x170e, Hi: 0x1711, Stride: 0x1},
4546 unicode.Range16{Lo: 0x1720, Hi: 0x1731, Stride: 0x1},
4547 unicode.Range16{Lo: 0x1740, Hi: 0x1751, Stride: 0x1},
4548 unicode.Range16{Lo: 0x1760, Hi: 0x176c, Stride: 0x1},
4549 unicode.Range16{Lo: 0x176e, Hi: 0x1770, Stride: 0x1},
4550 unicode.Range16{Lo: 0x1780, Hi: 0x17b3, Stride: 0x1},
4551 unicode.Range16{Lo: 0x17d7, Hi: 0x17d7, Stride: 0x1},
4552 unicode.Range16{Lo: 0x17dc, Hi: 0x17dc, Stride: 0x1},
4553 unicode.Range16{Lo: 0x1820, Hi: 0x1842, Stride: 0x1},
4554 unicode.Range16{Lo: 0x1843, Hi: 0x1843, Stride: 0x1},
4555 unicode.Range16{Lo: 0x1844, Hi: 0x1877, Stride: 0x1},
4556 unicode.Range16{Lo: 0x1880, Hi: 0x1884, Stride: 0x1},
4557 unicode.Range16{Lo: 0x1887, Hi: 0x18a8, Stride: 0x1},
4558 unicode.Range16{Lo: 0x18aa, Hi: 0x18aa, Stride: 0x1},
4559 unicode.Range16{Lo: 0x18b0, Hi: 0x18f5, Stride: 0x1},
4560 unicode.Range16{Lo: 0x1900, Hi: 0x191e, Stride: 0x1},
4561 unicode.Range16{Lo: 0x1950, Hi: 0x196d, Stride: 0x1},
4562 unicode.Range16{Lo: 0x1970, Hi: 0x1974, Stride: 0x1},
4563 unicode.Range16{Lo: 0x1980, Hi: 0x19ab, Stride: 0x1},
4564 unicode.Range16{Lo: 0x19b0, Hi: 0x19c9, Stride: 0x1},
4565 unicode.Range16{Lo: 0x1a00, Hi: 0x1a16, Stride: 0x1},
4566 unicode.Range16{Lo: 0x1a20, Hi: 0x1a54, Stride: 0x1},
4567 unicode.Range16{Lo: 0x1aa7, Hi: 0x1aa7, Stride: 0x1},
4568 unicode.Range16{Lo: 0x1b05, Hi: 0x1b33, Stride: 0x1},
4569 unicode.Range16{Lo: 0x1b45, Hi: 0x1b4b, Stride: 0x1},
4570 unicode.Range16{Lo: 0x1b83, Hi: 0x1ba0, Stride: 0x1},
4571 unicode.Range16{Lo: 0x1bae, Hi: 0x1baf, Stride: 0x1},
4572 unicode.Range16{Lo: 0x1bba, Hi: 0x1be5, Stride: 0x1},
4573 unicode.Range16{Lo: 0x1c00, Hi: 0x1c23, Stride: 0x1},
4574 unicode.Range16{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 0x1},
4575 unicode.Range16{Lo: 0x1c5a, Hi: 0x1c77, Stride: 0x1},
4576 unicode.Range16{Lo: 0x1c78, Hi: 0x1c7d, Stride: 0x1},
4577 unicode.Range16{Lo: 0x1ce9, Hi: 0x1cec, Stride: 0x1},
4578 unicode.Range16{Lo: 0x1cee, Hi: 0x1cf1, Stride: 0x1},
4579 unicode.Range16{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 0x1},
4580 unicode.Range16{Lo: 0x2135, Hi: 0x2138, Stride: 0x1},
4581 unicode.Range16{Lo: 0x2180, Hi: 0x2182, Stride: 0x1},
4582 unicode.Range16{Lo: 0x2185, Hi: 0x2188, Stride: 0x1},
4583 unicode.Range16{Lo: 0x2d30, Hi: 0x2d67, Stride: 0x1},
4584 unicode.Range16{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 0x1},
4585 unicode.Range16{Lo: 0x2d80, Hi: 0x2d96, Stride: 0x1},
4586 unicode.Range16{Lo: 0x2da0, Hi: 0x2da6, Stride: 0x1},
4587 unicode.Range16{Lo: 0x2da8, Hi: 0x2dae, Stride: 0x1},
4588 unicode.Range16{Lo: 0x2db0, Hi: 0x2db6, Stride: 0x1},
4589 unicode.Range16{Lo: 0x2db8, Hi: 0x2dbe, Stride: 0x1},
4590 unicode.Range16{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 0x1},
4591 unicode.Range16{Lo: 0x2dc8, Hi: 0x2dce, Stride: 0x1},
4592 unicode.Range16{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 0x1},
4593 unicode.Range16{Lo: 0x2dd8, Hi: 0x2dde, Stride: 0x1},
4594 unicode.Range16{Lo: 0x2e2f, Hi: 0x2e2f, Stride: 0x1},
4595 unicode.Range16{Lo: 0x3005, Hi: 0x3005, Stride: 0x1},
4596 unicode.Range16{Lo: 0x3006, Hi: 0x3006, Stride: 0x1},
4597 unicode.Range16{Lo: 0x3007, Hi: 0x3007, Stride: 0x1},
4598 unicode.Range16{Lo: 0x3021, Hi: 0x3029, Stride: 0x1},
4599 unicode.Range16{Lo: 0x3031, Hi: 0x3035, Stride: 0x1},
4600 unicode.Range16{Lo: 0x3038, Hi: 0x303a, Stride: 0x1},
4601 unicode.Range16{Lo: 0x303b, Hi: 0x303b, Stride: 0x1},
4602 unicode.Range16{Lo: 0x303c, Hi: 0x303c, Stride: 0x1},
4603 unicode.Range16{Lo: 0x3041, Hi: 0x3096, Stride: 0x1},
4604 unicode.Range16{Lo: 0x309d, Hi: 0x309e, Stride: 0x1},
4605 unicode.Range16{Lo: 0x309f, Hi: 0x309f, Stride: 0x1},
4606 unicode.Range16{Lo: 0x30a1, Hi: 0x30fa, Stride: 0x1},
4607 unicode.Range16{Lo: 0x30fc, Hi: 0x30fe, Stride: 0x1},
4608 unicode.Range16{Lo: 0x30ff, Hi: 0x30ff, Stride: 0x1},
4609 unicode.Range16{Lo: 0x3105, Hi: 0x312d, Stride: 0x1},
4610 unicode.Range16{Lo: 0x3131, Hi: 0x318e, Stride: 0x1},
4611 unicode.Range16{Lo: 0x31a0, Hi: 0x31ba, Stride: 0x1},
4612 unicode.Range16{Lo: 0x31f0, Hi: 0x31ff, Stride: 0x1},
4613 unicode.Range16{Lo: 0x3400, Hi: 0x4db5, Stride: 0x1},
4614 unicode.Range16{Lo: 0x4e00, Hi: 0x9fd5, Stride: 0x1},
4615 unicode.Range16{Lo: 0xa000, Hi: 0xa014, Stride: 0x1},
4616 unicode.Range16{Lo: 0xa015, Hi: 0xa015, Stride: 0x1},
4617 unicode.Range16{Lo: 0xa016, Hi: 0xa48c, Stride: 0x1},
4618 unicode.Range16{Lo: 0xa4d0, Hi: 0xa4f7, Stride: 0x1},
4619 unicode.Range16{Lo: 0xa4f8, Hi: 0xa4fd, Stride: 0x1},
4620 unicode.Range16{Lo: 0xa500, Hi: 0xa60b, Stride: 0x1},
4621 unicode.Range16{Lo: 0xa60c, Hi: 0xa60c, Stride: 0x1},
4622 unicode.Range16{Lo: 0xa610, Hi: 0xa61f, Stride: 0x1},
4623 unicode.Range16{Lo: 0xa62a, Hi: 0xa62b, Stride: 0x1},
4624 unicode.Range16{Lo: 0xa66e, Hi: 0xa66e, Stride: 0x1},
4625 unicode.Range16{Lo: 0xa67f, Hi: 0xa67f, Stride: 0x1},
4626 unicode.Range16{Lo: 0xa6a0, Hi: 0xa6e5, Stride: 0x1},
4627 unicode.Range16{Lo: 0xa6e6, Hi: 0xa6ef, Stride: 0x1},
4628 unicode.Range16{Lo: 0xa717, Hi: 0xa71f, Stride: 0x1},
4629 unicode.Range16{Lo: 0xa788, Hi: 0xa788, Stride: 0x1},
4630 unicode.Range16{Lo: 0xa78f, Hi: 0xa78f, Stride: 0x1},
4631 unicode.Range16{Lo: 0xa7f7, Hi: 0xa7f7, Stride: 0x1},
4632 unicode.Range16{Lo: 0xa7fb, Hi: 0xa801, Stride: 0x1},
4633 unicode.Range16{Lo: 0xa803, Hi: 0xa805, Stride: 0x1},
4634 unicode.Range16{Lo: 0xa807, Hi: 0xa80a, Stride: 0x1},
4635 unicode.Range16{Lo: 0xa80c, Hi: 0xa822, Stride: 0x1},
4636 unicode.Range16{Lo: 0xa840, Hi: 0xa873, Stride: 0x1},
4637 unicode.Range16{Lo: 0xa882, Hi: 0xa8b3, Stride: 0x1},
4638 unicode.Range16{Lo: 0xa8f2, Hi: 0xa8f7, Stride: 0x1},
4639 unicode.Range16{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 0x1},
4640 unicode.Range16{Lo: 0xa8fd, Hi: 0xa8fd, Stride: 0x1},
4641 unicode.Range16{Lo: 0xa90a, Hi: 0xa925, Stride: 0x1},
4642 unicode.Range16{Lo: 0xa930, Hi: 0xa946, Stride: 0x1},
4643 unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1},
4644 unicode.Range16{Lo: 0xa984, Hi: 0xa9b2, Stride: 0x1},
4645 unicode.Range16{Lo: 0xa9cf, Hi: 0xa9cf, Stride: 0x1},
4646 unicode.Range16{Lo: 0xa9e0, Hi: 0xa9e4, Stride: 0x1},
4647 unicode.Range16{Lo: 0xa9e6, Hi: 0xa9e6, Stride: 0x1},
4648 unicode.Range16{Lo: 0xa9e7, Hi: 0xa9ef, Stride: 0x1},
4649 unicode.Range16{Lo: 0xa9fa, Hi: 0xa9fe, Stride: 0x1},
4650 unicode.Range16{Lo: 0xaa00, Hi: 0xaa28, Stride: 0x1},
4651 unicode.Range16{Lo: 0xaa40, Hi: 0xaa42, Stride: 0x1},
4652 unicode.Range16{Lo: 0xaa44, Hi: 0xaa4b, Stride: 0x1},
4653 unicode.Range16{Lo: 0xaa60, Hi: 0xaa6f, Stride: 0x1},
4654 unicode.Range16{Lo: 0xaa70, Hi: 0xaa70, Stride: 0x1},
4655 unicode.Range16{Lo: 0xaa71, Hi: 0xaa76, Stride: 0x1},
4656 unicode.Range16{Lo: 0xaa7a, Hi: 0xaa7a, Stride: 0x1},
4657 unicode.Range16{Lo: 0xaa7e, Hi: 0xaaaf, Stride: 0x1},
4658 unicode.Range16{Lo: 0xaab1, Hi: 0xaab1, Stride: 0x1},
4659 unicode.Range16{Lo: 0xaab5, Hi: 0xaab6, Stride: 0x1},
4660 unicode.Range16{Lo: 0xaab9, Hi: 0xaabd, Stride: 0x1},
4661 unicode.Range16{Lo: 0xaac0, Hi: 0xaac0, Stride: 0x1},
4662 unicode.Range16{Lo: 0xaac2, Hi: 0xaac2, Stride: 0x1},
4663 unicode.Range16{Lo: 0xaadb, Hi: 0xaadc, Stride: 0x1},
4664 unicode.Range16{Lo: 0xaadd, Hi: 0xaadd, Stride: 0x1},
4665 unicode.Range16{Lo: 0xaae0, Hi: 0xaaea, Stride: 0x1},
4666 unicode.Range16{Lo: 0xaaf2, Hi: 0xaaf2, Stride: 0x1},
4667 unicode.Range16{Lo: 0xaaf3, Hi: 0xaaf4, Stride: 0x1},
4668 unicode.Range16{Lo: 0xab01, Hi: 0xab06, Stride: 0x1},
4669 unicode.Range16{Lo: 0xab09, Hi: 0xab0e, Stride: 0x1},
4670 unicode.Range16{Lo: 0xab11, Hi: 0xab16, Stride: 0x1},
4671 unicode.Range16{Lo: 0xab20, Hi: 0xab26, Stride: 0x1},
4672 unicode.Range16{Lo: 0xab28, Hi: 0xab2e, Stride: 0x1},
4673 unicode.Range16{Lo: 0xabc0, Hi: 0xabe2, Stride: 0x1},
4674 unicode.Range16{Lo: 0xac00, Hi: 0xd7a3, Stride: 0x1},
4675 unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1},
4676 unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1},
4677 unicode.Range16{Lo: 0xf900, Hi: 0xfa6d, Stride: 0x1},
4678 unicode.Range16{Lo: 0xfa70, Hi: 0xfad9, Stride: 0x1},
4679 unicode.Range16{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 0x1},
4680 unicode.Range16{Lo: 0xfb1f, Hi: 0xfb28, Stride: 0x1},
4681 unicode.Range16{Lo: 0xfb2a, Hi: 0xfb36, Stride: 0x1},
4682 unicode.Range16{Lo: 0xfb38, Hi: 0xfb3c, Stride: 0x1},
4683 unicode.Range16{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 0x1},
4684 unicode.Range16{Lo: 0xfb40, Hi: 0xfb41, Stride: 0x1},
4685 unicode.Range16{Lo: 0xfb43, Hi: 0xfb44, Stride: 0x1},
4686 unicode.Range16{Lo: 0xfb46, Hi: 0xfbb1, Stride: 0x1},
4687 unicode.Range16{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 0x1},
4688 unicode.Range16{Lo: 0xfd50, Hi: 0xfd8f, Stride: 0x1},
4689 unicode.Range16{Lo: 0xfd92, Hi: 0xfdc7, Stride: 0x1},
4690 unicode.Range16{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 0x1},
4691 unicode.Range16{Lo: 0xfe70, Hi: 0xfe74, Stride: 0x1},
4692 unicode.Range16{Lo: 0xfe76, Hi: 0xfefc, Stride: 0x1},
4693 unicode.Range16{Lo: 0xff66, Hi: 0xff6f, Stride: 0x1},
4694 unicode.Range16{Lo: 0xff70, Hi: 0xff70, Stride: 0x1},
4695 unicode.Range16{Lo: 0xff71, Hi: 0xff9d, Stride: 0x1},
4696 unicode.Range16{Lo: 0xffa0, Hi: 0xffbe, Stride: 0x1},
4697 unicode.Range16{Lo: 0xffc2, Hi: 0xffc7, Stride: 0x1},
4698 unicode.Range16{Lo: 0xffca, Hi: 0xffcf, Stride: 0x1},
4699 unicode.Range16{Lo: 0xffd2, Hi: 0xffd7, Stride: 0x1},
4700 unicode.Range16{Lo: 0xffda, Hi: 0xffdc, Stride: 0x1},
4701 },
4702 R32: []unicode.Range32{
4703 unicode.Range32{Lo: 0x10000, Hi: 0x1000b, Stride: 0x1},
4704 unicode.Range32{Lo: 0x1000d, Hi: 0x10026, Stride: 0x1},
4705 unicode.Range32{Lo: 0x10028, Hi: 0x1003a, Stride: 0x1},
4706 unicode.Range32{Lo: 0x1003c, Hi: 0x1003d, Stride: 0x1},
4707 unicode.Range32{Lo: 0x1003f, Hi: 0x1004d, Stride: 0x1},
4708 unicode.Range32{Lo: 0x10050, Hi: 0x1005d, Stride: 0x1},
4709 unicode.Range32{Lo: 0x10080, Hi: 0x100fa, Stride: 0x1},
4710 unicode.Range32{Lo: 0x10140, Hi: 0x10174, Stride: 0x1},
4711 unicode.Range32{Lo: 0x10280, Hi: 0x1029c, Stride: 0x1},
4712 unicode.Range32{Lo: 0x102a0, Hi: 0x102d0, Stride: 0x1},
4713 unicode.Range32{Lo: 0x10300, Hi: 0x1031f, Stride: 0x1},
4714 unicode.Range32{Lo: 0x10330, Hi: 0x10340, Stride: 0x1},
4715 unicode.Range32{Lo: 0x10341, Hi: 0x10341, Stride: 0x1},
4716 unicode.Range32{Lo: 0x10342, Hi: 0x10349, Stride: 0x1},
4717 unicode.Range32{Lo: 0x1034a, Hi: 0x1034a, Stride: 0x1},
4718 unicode.Range32{Lo: 0x10350, Hi: 0x10375, Stride: 0x1},
4719 unicode.Range32{Lo: 0x10380, Hi: 0x1039d, Stride: 0x1},
4720 unicode.Range32{Lo: 0x103a0, Hi: 0x103c3, Stride: 0x1},
4721 unicode.Range32{Lo: 0x103c8, Hi: 0x103cf, Stride: 0x1},
4722 unicode.Range32{Lo: 0x103d1, Hi: 0x103d5, Stride: 0x1},
4723 unicode.Range32{Lo: 0x10450, Hi: 0x1049d, Stride: 0x1},
4724 unicode.Range32{Lo: 0x10500, Hi: 0x10527, Stride: 0x1},
4725 unicode.Range32{Lo: 0x10530, Hi: 0x10563, Stride: 0x1},
4726 unicode.Range32{Lo: 0x10600, Hi: 0x10736, Stride: 0x1},
4727 unicode.Range32{Lo: 0x10740, Hi: 0x10755, Stride: 0x1},
4728 unicode.Range32{Lo: 0x10760, Hi: 0x10767, Stride: 0x1},
4729 unicode.Range32{Lo: 0x10800, Hi: 0x10805, Stride: 0x1},
4730 unicode.Range32{Lo: 0x10808, Hi: 0x10808, Stride: 0x1},
4731 unicode.Range32{Lo: 0x1080a, Hi: 0x10835, Stride: 0x1},
4732 unicode.Range32{Lo: 0x10837, Hi: 0x10838, Stride: 0x1},
4733 unicode.Range32{Lo: 0x1083c, Hi: 0x1083c, Stride: 0x1},
4734 unicode.Range32{Lo: 0x1083f, Hi: 0x10855, Stride: 0x1},
4735 unicode.Range32{Lo: 0x10860, Hi: 0x10876, Stride: 0x1},
4736 unicode.Range32{Lo: 0x10880, Hi: 0x1089e, Stride: 0x1},
4737 unicode.Range32{Lo: 0x108e0, Hi: 0x108f2, Stride: 0x1},
4738 unicode.Range32{Lo: 0x108f4, Hi: 0x108f5, Stride: 0x1},
4739 unicode.Range32{Lo: 0x10900, Hi: 0x10915, Stride: 0x1},
4740 unicode.Range32{Lo: 0x10920, Hi: 0x10939, Stride: 0x1},
4741 unicode.Range32{Lo: 0x10980, Hi: 0x109b7, Stride: 0x1},
4742 unicode.Range32{Lo: 0x109be, Hi: 0x109bf, Stride: 0x1},
4743 unicode.Range32{Lo: 0x10a00, Hi: 0x10a00, Stride: 0x1},
4744 unicode.Range32{Lo: 0x10a10, Hi: 0x10a13, Stride: 0x1},
4745 unicode.Range32{Lo: 0x10a15, Hi: 0x10a17, Stride: 0x1},
4746 unicode.Range32{Lo: 0x10a19, Hi: 0x10a33, Stride: 0x1},
4747 unicode.Range32{Lo: 0x10a60, Hi: 0x10a7c, Stride: 0x1},
4748 unicode.Range32{Lo: 0x10a80, Hi: 0x10a9c, Stride: 0x1},
4749 unicode.Range32{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 0x1},
4750 unicode.Range32{Lo: 0x10ac9, Hi: 0x10ae4, Stride: 0x1},
4751 unicode.Range32{Lo: 0x10b00, Hi: 0x10b35, Stride: 0x1},
4752 unicode.Range32{Lo: 0x10b40, Hi: 0x10b55, Stride: 0x1},
4753 unicode.Range32{Lo: 0x10b60, Hi: 0x10b72, Stride: 0x1},
4754 unicode.Range32{Lo: 0x10b80, Hi: 0x10b91, Stride: 0x1},
4755 unicode.Range32{Lo: 0x10c00, Hi: 0x10c48, Stride: 0x1},
4756 unicode.Range32{Lo: 0x11003, Hi: 0x11037, Stride: 0x1},
4757 unicode.Range32{Lo: 0x11083, Hi: 0x110af, Stride: 0x1},
4758 unicode.Range32{Lo: 0x110d0, Hi: 0x110e8, Stride: 0x1},
4759 unicode.Range32{Lo: 0x11103, Hi: 0x11126, Stride: 0x1},
4760 unicode.Range32{Lo: 0x11150, Hi: 0x11172, Stride: 0x1},
4761 unicode.Range32{Lo: 0x11176, Hi: 0x11176, Stride: 0x1},
4762 unicode.Range32{Lo: 0x11183, Hi: 0x111b2, Stride: 0x1},
4763 unicode.Range32{Lo: 0x111c1, Hi: 0x111c4, Stride: 0x1},
4764 unicode.Range32{Lo: 0x111da, Hi: 0x111da, Stride: 0x1},
4765 unicode.Range32{Lo: 0x111dc, Hi: 0x111dc, Stride: 0x1},
4766 unicode.Range32{Lo: 0x11200, Hi: 0x11211, Stride: 0x1},
4767 unicode.Range32{Lo: 0x11213, Hi: 0x1122b, Stride: 0x1},
4768 unicode.Range32{Lo: 0x11280, Hi: 0x11286, Stride: 0x1},
4769 unicode.Range32{Lo: 0x11288, Hi: 0x11288, Stride: 0x1},
4770 unicode.Range32{Lo: 0x1128a, Hi: 0x1128d, Stride: 0x1},
4771 unicode.Range32{Lo: 0x1128f, Hi: 0x1129d, Stride: 0x1},
4772 unicode.Range32{Lo: 0x1129f, Hi: 0x112a8, Stride: 0x1},
4773 unicode.Range32{Lo: 0x112b0, Hi: 0x112de, Stride: 0x1},
4774 unicode.Range32{Lo: 0x11305, Hi: 0x1130c, Stride: 0x1},
4775 unicode.Range32{Lo: 0x1130f, Hi: 0x11310, Stride: 0x1},
4776 unicode.Range32{Lo: 0x11313, Hi: 0x11328, Stride: 0x1},
4777 unicode.Range32{Lo: 0x1132a, Hi: 0x11330, Stride: 0x1},
4778 unicode.Range32{Lo: 0x11332, Hi: 0x11333, Stride: 0x1},
4779 unicode.Range32{Lo: 0x11335, Hi: 0x11339, Stride: 0x1},
4780 unicode.Range32{Lo: 0x1133d, Hi: 0x1133d, Stride: 0x1},
4781 unicode.Range32{Lo: 0x11350, Hi: 0x11350, Stride: 0x1},
4782 unicode.Range32{Lo: 0x1135d, Hi: 0x11361, Stride: 0x1},
4783 unicode.Range32{Lo: 0x11400, Hi: 0x11434, Stride: 0x1},
4784 unicode.Range32{Lo: 0x11447, Hi: 0x1144a, Stride: 0x1},
4785 unicode.Range32{Lo: 0x11480, Hi: 0x114af, Stride: 0x1},
4786 unicode.Range32{Lo: 0x114c4, Hi: 0x114c5, Stride: 0x1},
4787 unicode.Range32{Lo: 0x114c7, Hi: 0x114c7, Stride: 0x1},
4788 unicode.Range32{Lo: 0x11580, Hi: 0x115ae, Stride: 0x1},
4789 unicode.Range32{Lo: 0x115d8, Hi: 0x115db, Stride: 0x1},
4790 unicode.Range32{Lo: 0x11600, Hi: 0x1162f, Stride: 0x1},
4791 unicode.Range32{Lo: 0x11644, Hi: 0x11644, Stride: 0x1},
4792 unicode.Range32{Lo: 0x11680, Hi: 0x116aa, Stride: 0x1},
4793 unicode.Range32{Lo: 0x11700, Hi: 0x11719, Stride: 0x1},
4794 unicode.Range32{Lo: 0x118ff, Hi: 0x118ff, Stride: 0x1},
4795 unicode.Range32{Lo: 0x11ac0, Hi: 0x11af8, Stride: 0x1},
4796 unicode.Range32{Lo: 0x11c00, Hi: 0x11c08, Stride: 0x1},
4797 unicode.Range32{Lo: 0x11c0a, Hi: 0x11c2e, Stride: 0x1},
4798 unicode.Range32{Lo: 0x11c40, Hi: 0x11c40, Stride: 0x1},
4799 unicode.Range32{Lo: 0x11c72, Hi: 0x11c8f, Stride: 0x1},
4800 unicode.Range32{Lo: 0x12000, Hi: 0x12399, Stride: 0x1},
4801 unicode.Range32{Lo: 0x12400, Hi: 0x1246e, Stride: 0x1},
4802 unicode.Range32{Lo: 0x12480, Hi: 0x12543, Stride: 0x1},
4803 unicode.Range32{Lo: 0x13000, Hi: 0x1342e, Stride: 0x1},
4804 unicode.Range32{Lo: 0x14400, Hi: 0x14646, Stride: 0x1},
4805 unicode.Range32{Lo: 0x16800, Hi: 0x16a38, Stride: 0x1},
4806 unicode.Range32{Lo: 0x16a40, Hi: 0x16a5e, Stride: 0x1},
4807 unicode.Range32{Lo: 0x16ad0, Hi: 0x16aed, Stride: 0x1},
4808 unicode.Range32{Lo: 0x16b00, Hi: 0x16b2f, Stride: 0x1},
4809 unicode.Range32{Lo: 0x16b40, Hi: 0x16b43, Stride: 0x1},
4810 unicode.Range32{Lo: 0x16b63, Hi: 0x16b77, Stride: 0x1},
4811 unicode.Range32{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 0x1},
4812 unicode.Range32{Lo: 0x16f00, Hi: 0x16f44, Stride: 0x1},
4813 unicode.Range32{Lo: 0x16f50, Hi: 0x16f50, Stride: 0x1},
4814 unicode.Range32{Lo: 0x16f93, Hi: 0x16f9f, Stride: 0x1},
4815 unicode.Range32{Lo: 0x16fe0, Hi: 0x16fe0, Stride: 0x1},
4816 unicode.Range32{Lo: 0x17000, Hi: 0x187ec, Stride: 0x1},
4817 unicode.Range32{Lo: 0x18800, Hi: 0x18af2, Stride: 0x1},
4818 unicode.Range32{Lo: 0x1b000, Hi: 0x1b001, Stride: 0x1},
4819 unicode.Range32{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 0x1},
4820 unicode.Range32{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 0x1},
4821 unicode.Range32{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 0x1},
4822 unicode.Range32{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 0x1},
4823 unicode.Range32{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 0x1},
4824 unicode.Range32{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 0x1},
4825 unicode.Range32{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 0x1},
4826 unicode.Range32{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 0x1},
4827 unicode.Range32{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 0x1},
4828 unicode.Range32{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 0x1},
4829 unicode.Range32{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 0x1},
4830 unicode.Range32{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 0x1},
4831 unicode.Range32{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 0x1},
4832 unicode.Range32{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 0x1},
4833 unicode.Range32{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 0x1},
4834 unicode.Range32{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 0x1},
4835 unicode.Range32{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 0x1},
4836 unicode.Range32{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 0x1},
4837 unicode.Range32{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 0x1},
4838 unicode.Range32{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 0x1},
4839 unicode.Range32{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 0x1},
4840 unicode.Range32{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 0x1},
4841 unicode.Range32{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 0x1},
4842 unicode.Range32{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 0x1},
4843 unicode.Range32{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 0x1},
4844 unicode.Range32{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 0x1},
4845 unicode.Range32{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 0x1},
4846 unicode.Range32{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 0x1},
4847 unicode.Range32{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 0x1},
4848 unicode.Range32{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 0x1},
4849 unicode.Range32{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 0x1},
4850 unicode.Range32{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 0x1},
4851 unicode.Range32{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 0x1},
4852 unicode.Range32{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 0x1},
4853 unicode.Range32{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 0x1},
4854 unicode.Range32{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 0x1},
4855 unicode.Range32{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 0x1},
4856 unicode.Range32{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 0x1},
4857 unicode.Range32{Lo: 0x20000, Hi: 0x2a6d6, Stride: 0x1},
4858 unicode.Range32{Lo: 0x2a700, Hi: 0x2b734, Stride: 0x1},
4859 unicode.Range32{Lo: 0x2b740, Hi: 0x2b81d, Stride: 0x1},
4860 unicode.Range32{Lo: 0x2b820, Hi: 0x2cea1, Stride: 0x1},
4861 unicode.Range32{Lo: 0x2f800, Hi: 0x2fa1d, Stride: 0x1},
4862 },
4863 LatinOffset: 0,
4864}
4865
4866var _SentenceSContinue = &unicode.RangeTable{
4867 R16: []unicode.Range16{
4868 unicode.Range16{Lo: 0x2c, Hi: 0x2c, Stride: 0x1},
4869 unicode.Range16{Lo: 0x2d, Hi: 0x2d, Stride: 0x1},
4870 unicode.Range16{Lo: 0x3a, Hi: 0x3a, Stride: 0x1},
4871 unicode.Range16{Lo: 0x55d, Hi: 0x55d, Stride: 0x1},
4872 unicode.Range16{Lo: 0x60c, Hi: 0x60d, Stride: 0x1},
4873 unicode.Range16{Lo: 0x7f8, Hi: 0x7f8, Stride: 0x1},
4874 unicode.Range16{Lo: 0x1802, Hi: 0x1802, Stride: 0x1},
4875 unicode.Range16{Lo: 0x1808, Hi: 0x1808, Stride: 0x1},
4876 unicode.Range16{Lo: 0x2013, Hi: 0x2014, Stride: 0x1},
4877 unicode.Range16{Lo: 0x3001, Hi: 0x3001, Stride: 0x1},
4878 unicode.Range16{Lo: 0xfe10, Hi: 0xfe11, Stride: 0x1},
4879 unicode.Range16{Lo: 0xfe13, Hi: 0xfe13, Stride: 0x1},
4880 unicode.Range16{Lo: 0xfe31, Hi: 0xfe32, Stride: 0x1},
4881 unicode.Range16{Lo: 0xfe50, Hi: 0xfe51, Stride: 0x1},
4882 unicode.Range16{Lo: 0xfe55, Hi: 0xfe55, Stride: 0x1},
4883 unicode.Range16{Lo: 0xfe58, Hi: 0xfe58, Stride: 0x1},
4884 unicode.Range16{Lo: 0xfe63, Hi: 0xfe63, Stride: 0x1},
4885 unicode.Range16{Lo: 0xff0c, Hi: 0xff0c, Stride: 0x1},
4886 unicode.Range16{Lo: 0xff0d, Hi: 0xff0d, Stride: 0x1},
4887 unicode.Range16{Lo: 0xff1a, Hi: 0xff1a, Stride: 0x1},
4888 unicode.Range16{Lo: 0xff64, Hi: 0xff64, Stride: 0x1},
4889 },
4890 LatinOffset: 3,
4891}
4892
4893var _SentenceSTerm = &unicode.RangeTable{
4894 R16: []unicode.Range16{
4895 unicode.Range16{Lo: 0x21, Hi: 0x21, Stride: 0x1},
4896 unicode.Range16{Lo: 0x3f, Hi: 0x3f, Stride: 0x1},
4897 unicode.Range16{Lo: 0x589, Hi: 0x589, Stride: 0x1},
4898 unicode.Range16{Lo: 0x61f, Hi: 0x61f, Stride: 0x1},
4899 unicode.Range16{Lo: 0x6d4, Hi: 0x6d4, Stride: 0x1},
4900 unicode.Range16{Lo: 0x700, Hi: 0x702, Stride: 0x1},
4901 unicode.Range16{Lo: 0x7f9, Hi: 0x7f9, Stride: 0x1},
4902 unicode.Range16{Lo: 0x964, Hi: 0x965, Stride: 0x1},
4903 unicode.Range16{Lo: 0x104a, Hi: 0x104b, Stride: 0x1},
4904 unicode.Range16{Lo: 0x1362, Hi: 0x1362, Stride: 0x1},
4905 unicode.Range16{Lo: 0x1367, Hi: 0x1368, Stride: 0x1},
4906 unicode.Range16{Lo: 0x166e, Hi: 0x166e, Stride: 0x1},
4907 unicode.Range16{Lo: 0x1735, Hi: 0x1736, Stride: 0x1},
4908 unicode.Range16{Lo: 0x1803, Hi: 0x1803, Stride: 0x1},
4909 unicode.Range16{Lo: 0x1809, Hi: 0x1809, Stride: 0x1},
4910 unicode.Range16{Lo: 0x1944, Hi: 0x1945, Stride: 0x1},
4911 unicode.Range16{Lo: 0x1aa8, Hi: 0x1aab, Stride: 0x1},
4912 unicode.Range16{Lo: 0x1b5a, Hi: 0x1b5b, Stride: 0x1},
4913 unicode.Range16{Lo: 0x1b5e, Hi: 0x1b5f, Stride: 0x1},
4914 unicode.Range16{Lo: 0x1c3b, Hi: 0x1c3c, Stride: 0x1},
4915 unicode.Range16{Lo: 0x1c7e, Hi: 0x1c7f, Stride: 0x1},
4916 unicode.Range16{Lo: 0x203c, Hi: 0x203d, Stride: 0x1},
4917 unicode.Range16{Lo: 0x2047, Hi: 0x2049, Stride: 0x1},
4918 unicode.Range16{Lo: 0x2e2e, Hi: 0x2e2e, Stride: 0x1},
4919 unicode.Range16{Lo: 0x2e3c, Hi: 0x2e3c, Stride: 0x1},
4920 unicode.Range16{Lo: 0x3002, Hi: 0x3002, Stride: 0x1},
4921 unicode.Range16{Lo: 0xa4ff, Hi: 0xa4ff, Stride: 0x1},
4922 unicode.Range16{Lo: 0xa60e, Hi: 0xa60f, Stride: 0x1},
4923 unicode.Range16{Lo: 0xa6f3, Hi: 0xa6f3, Stride: 0x1},
4924 unicode.Range16{Lo: 0xa6f7, Hi: 0xa6f7, Stride: 0x1},
4925 unicode.Range16{Lo: 0xa876, Hi: 0xa877, Stride: 0x1},
4926 unicode.Range16{Lo: 0xa8ce, Hi: 0xa8cf, Stride: 0x1},
4927 unicode.Range16{Lo: 0xa92f, Hi: 0xa92f, Stride: 0x1},
4928 unicode.Range16{Lo: 0xa9c8, Hi: 0xa9c9, Stride: 0x1},
4929 unicode.Range16{Lo: 0xaa5d, Hi: 0xaa5f, Stride: 0x1},
4930 unicode.Range16{Lo: 0xaaf0, Hi: 0xaaf1, Stride: 0x1},
4931 unicode.Range16{Lo: 0xabeb, Hi: 0xabeb, Stride: 0x1},
4932 unicode.Range16{Lo: 0xfe56, Hi: 0xfe57, Stride: 0x1},
4933 unicode.Range16{Lo: 0xff01, Hi: 0xff01, Stride: 0x1},
4934 unicode.Range16{Lo: 0xff1f, Hi: 0xff1f, Stride: 0x1},
4935 unicode.Range16{Lo: 0xff61, Hi: 0xff61, Stride: 0x1},
4936 },
4937 R32: []unicode.Range32{
4938 unicode.Range32{Lo: 0x10a56, Hi: 0x10a57, Stride: 0x1},
4939 unicode.Range32{Lo: 0x11047, Hi: 0x11048, Stride: 0x1},
4940 unicode.Range32{Lo: 0x110be, Hi: 0x110c1, Stride: 0x1},
4941 unicode.Range32{Lo: 0x11141, Hi: 0x11143, Stride: 0x1},
4942 unicode.Range32{Lo: 0x111c5, Hi: 0x111c6, Stride: 0x1},
4943 unicode.Range32{Lo: 0x111cd, Hi: 0x111cd, Stride: 0x1},
4944 unicode.Range32{Lo: 0x111de, Hi: 0x111df, Stride: 0x1},
4945 unicode.Range32{Lo: 0x11238, Hi: 0x11239, Stride: 0x1},
4946 unicode.Range32{Lo: 0x1123b, Hi: 0x1123c, Stride: 0x1},
4947 unicode.Range32{Lo: 0x112a9, Hi: 0x112a9, Stride: 0x1},
4948 unicode.Range32{Lo: 0x1144b, Hi: 0x1144c, Stride: 0x1},
4949 unicode.Range32{Lo: 0x115c2, Hi: 0x115c3, Stride: 0x1},
4950 unicode.Range32{Lo: 0x115c9, Hi: 0x115d7, Stride: 0x1},
4951 unicode.Range32{Lo: 0x11641, Hi: 0x11642, Stride: 0x1},
4952 unicode.Range32{Lo: 0x1173c, Hi: 0x1173e, Stride: 0x1},
4953 unicode.Range32{Lo: 0x11c41, Hi: 0x11c42, Stride: 0x1},
4954 unicode.Range32{Lo: 0x16a6e, Hi: 0x16a6f, Stride: 0x1},
4955 unicode.Range32{Lo: 0x16af5, Hi: 0x16af5, Stride: 0x1},
4956 unicode.Range32{Lo: 0x16b37, Hi: 0x16b38, Stride: 0x1},
4957 unicode.Range32{Lo: 0x16b44, Hi: 0x16b44, Stride: 0x1},
4958 unicode.Range32{Lo: 0x1bc9f, Hi: 0x1bc9f, Stride: 0x1},
4959 unicode.Range32{Lo: 0x1da88, Hi: 0x1da88, Stride: 0x1},
4960 },
4961 LatinOffset: 2,
4962}
4963
4964var _SentenceSep = &unicode.RangeTable{
4965 R16: []unicode.Range16{
4966 unicode.Range16{Lo: 0x85, Hi: 0x85, Stride: 0x1},
4967 unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1},
4968 unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1},
4969 },
4970 LatinOffset: 1,
4971}
4972
4973var _SentenceSp = &unicode.RangeTable{
4974 R16: []unicode.Range16{
4975 unicode.Range16{Lo: 0x9, Hi: 0x9, Stride: 0x1},
4976 unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1},
4977 unicode.Range16{Lo: 0x20, Hi: 0x20, Stride: 0x1},
4978 unicode.Range16{Lo: 0xa0, Hi: 0xa0, Stride: 0x1},
4979 unicode.Range16{Lo: 0x1680, Hi: 0x1680, Stride: 0x1},
4980 unicode.Range16{Lo: 0x2000, Hi: 0x200a, Stride: 0x1},
4981 unicode.Range16{Lo: 0x202f, Hi: 0x202f, Stride: 0x1},
4982 unicode.Range16{Lo: 0x205f, Hi: 0x205f, Stride: 0x1},
4983 unicode.Range16{Lo: 0x3000, Hi: 0x3000, Stride: 0x1},
4984 },
4985 LatinOffset: 4,
4986}
4987
4988var _SentenceUpper = &unicode.RangeTable{
4989 R16: []unicode.Range16{
4990 unicode.Range16{Lo: 0x41, Hi: 0x5a, Stride: 0x1},
4991 unicode.Range16{Lo: 0xc0, Hi: 0xd6, Stride: 0x1},
4992 unicode.Range16{Lo: 0xd8, Hi: 0xde, Stride: 0x1},
4993 unicode.Range16{Lo: 0x100, Hi: 0x100, Stride: 0x1},
4994 unicode.Range16{Lo: 0x102, Hi: 0x102, Stride: 0x1},
4995 unicode.Range16{Lo: 0x104, Hi: 0x104, Stride: 0x1},
4996 unicode.Range16{Lo: 0x106, Hi: 0x106, Stride: 0x1},
4997 unicode.Range16{Lo: 0x108, Hi: 0x108, Stride: 0x1},
4998 unicode.Range16{Lo: 0x10a, Hi: 0x10a, Stride: 0x1},
4999 unicode.Range16{Lo: 0x10c, Hi: 0x10c, Stride: 0x1},
5000 unicode.Range16{Lo: 0x10e, Hi: 0x10e, Stride: 0x1},
5001 unicode.Range16{Lo: 0x110, Hi: 0x110, Stride: 0x1},
5002 unicode.Range16{Lo: 0x112, Hi: 0x112, Stride: 0x1},
5003 unicode.Range16{Lo: 0x114, Hi: 0x114, Stride: 0x1},
5004 unicode.Range16{Lo: 0x116, Hi: 0x116, Stride: 0x1},
5005 unicode.Range16{Lo: 0x118, Hi: 0x118, Stride: 0x1},
5006 unicode.Range16{Lo: 0x11a, Hi: 0x11a, Stride: 0x1},
5007 unicode.Range16{Lo: 0x11c, Hi: 0x11c, Stride: 0x1},
5008 unicode.Range16{Lo: 0x11e, Hi: 0x11e, Stride: 0x1},
5009 unicode.Range16{Lo: 0x120, Hi: 0x120, Stride: 0x1},
5010 unicode.Range16{Lo: 0x122, Hi: 0x122, Stride: 0x1},
5011 unicode.Range16{Lo: 0x124, Hi: 0x124, Stride: 0x1},
5012 unicode.Range16{Lo: 0x126, Hi: 0x126, Stride: 0x1},
5013 unicode.Range16{Lo: 0x128, Hi: 0x128, Stride: 0x1},
5014 unicode.Range16{Lo: 0x12a, Hi: 0x12a, Stride: 0x1},
5015 unicode.Range16{Lo: 0x12c, Hi: 0x12c, Stride: 0x1},
5016 unicode.Range16{Lo: 0x12e, Hi: 0x12e, Stride: 0x1},
5017 unicode.Range16{Lo: 0x130, Hi: 0x130, Stride: 0x1},
5018 unicode.Range16{Lo: 0x132, Hi: 0x132, Stride: 0x1},
5019 unicode.Range16{Lo: 0x134, Hi: 0x134, Stride: 0x1},
5020 unicode.Range16{Lo: 0x136, Hi: 0x136, Stride: 0x1},
5021 unicode.Range16{Lo: 0x139, Hi: 0x139, Stride: 0x1},
5022 unicode.Range16{Lo: 0x13b, Hi: 0x13b, Stride: 0x1},
5023 unicode.Range16{Lo: 0x13d, Hi: 0x13d, Stride: 0x1},
5024 unicode.Range16{Lo: 0x13f, Hi: 0x13f, Stride: 0x1},
5025 unicode.Range16{Lo: 0x141, Hi: 0x141, Stride: 0x1},
5026 unicode.Range16{Lo: 0x143, Hi: 0x143, Stride: 0x1},
5027 unicode.Range16{Lo: 0x145, Hi: 0x145, Stride: 0x1},
5028 unicode.Range16{Lo: 0x147, Hi: 0x147, Stride: 0x1},
5029 unicode.Range16{Lo: 0x14a, Hi: 0x14a, Stride: 0x1},
5030 unicode.Range16{Lo: 0x14c, Hi: 0x14c, Stride: 0x1},
5031 unicode.Range16{Lo: 0x14e, Hi: 0x14e, Stride: 0x1},
5032 unicode.Range16{Lo: 0x150, Hi: 0x150, Stride: 0x1},
5033 unicode.Range16{Lo: 0x152, Hi: 0x152, Stride: 0x1},
5034 unicode.Range16{Lo: 0x154, Hi: 0x154, Stride: 0x1},
5035 unicode.Range16{Lo: 0x156, Hi: 0x156, Stride: 0x1},
5036 unicode.Range16{Lo: 0x158, Hi: 0x158, Stride: 0x1},
5037 unicode.Range16{Lo: 0x15a, Hi: 0x15a, Stride: 0x1},
5038 unicode.Range16{Lo: 0x15c, Hi: 0x15c, Stride: 0x1},
5039 unicode.Range16{Lo: 0x15e, Hi: 0x15e, Stride: 0x1},
5040 unicode.Range16{Lo: 0x160, Hi: 0x160, Stride: 0x1},
5041 unicode.Range16{Lo: 0x162, Hi: 0x162, Stride: 0x1},
5042 unicode.Range16{Lo: 0x164, Hi: 0x164, Stride: 0x1},
5043 unicode.Range16{Lo: 0x166, Hi: 0x166, Stride: 0x1},
5044 unicode.Range16{Lo: 0x168, Hi: 0x168, Stride: 0x1},
5045 unicode.Range16{Lo: 0x16a, Hi: 0x16a, Stride: 0x1},
5046 unicode.Range16{Lo: 0x16c, Hi: 0x16c, Stride: 0x1},
5047 unicode.Range16{Lo: 0x16e, Hi: 0x16e, Stride: 0x1},
5048 unicode.Range16{Lo: 0x170, Hi: 0x170, Stride: 0x1},
5049 unicode.Range16{Lo: 0x172, Hi: 0x172, Stride: 0x1},
5050 unicode.Range16{Lo: 0x174, Hi: 0x174, Stride: 0x1},
5051 unicode.Range16{Lo: 0x176, Hi: 0x176, Stride: 0x1},
5052 unicode.Range16{Lo: 0x178, Hi: 0x179, Stride: 0x1},
5053 unicode.Range16{Lo: 0x17b, Hi: 0x17b, Stride: 0x1},
5054 unicode.Range16{Lo: 0x17d, Hi: 0x17d, Stride: 0x1},
5055 unicode.Range16{Lo: 0x181, Hi: 0x182, Stride: 0x1},
5056 unicode.Range16{Lo: 0x184, Hi: 0x184, Stride: 0x1},
5057 unicode.Range16{Lo: 0x186, Hi: 0x187, Stride: 0x1},
5058 unicode.Range16{Lo: 0x189, Hi: 0x18b, Stride: 0x1},
5059 unicode.Range16{Lo: 0x18e, Hi: 0x191, Stride: 0x1},
5060 unicode.Range16{Lo: 0x193, Hi: 0x194, Stride: 0x1},
5061 unicode.Range16{Lo: 0x196, Hi: 0x198, Stride: 0x1},
5062 unicode.Range16{Lo: 0x19c, Hi: 0x19d, Stride: 0x1},
5063 unicode.Range16{Lo: 0x19f, Hi: 0x1a0, Stride: 0x1},
5064 unicode.Range16{Lo: 0x1a2, Hi: 0x1a2, Stride: 0x1},
5065 unicode.Range16{Lo: 0x1a4, Hi: 0x1a4, Stride: 0x1},
5066 unicode.Range16{Lo: 0x1a6, Hi: 0x1a7, Stride: 0x1},
5067 unicode.Range16{Lo: 0x1a9, Hi: 0x1a9, Stride: 0x1},
5068 unicode.Range16{Lo: 0x1ac, Hi: 0x1ac, Stride: 0x1},
5069 unicode.Range16{Lo: 0x1ae, Hi: 0x1af, Stride: 0x1},
5070 unicode.Range16{Lo: 0x1b1, Hi: 0x1b3, Stride: 0x1},
5071 unicode.Range16{Lo: 0x1b5, Hi: 0x1b5, Stride: 0x1},
5072 unicode.Range16{Lo: 0x1b7, Hi: 0x1b8, Stride: 0x1},
5073 unicode.Range16{Lo: 0x1bc, Hi: 0x1bc, Stride: 0x1},
5074 unicode.Range16{Lo: 0x1c4, Hi: 0x1c5, Stride: 0x1},
5075 unicode.Range16{Lo: 0x1c7, Hi: 0x1c8, Stride: 0x1},
5076 unicode.Range16{Lo: 0x1ca, Hi: 0x1cb, Stride: 0x1},
5077 unicode.Range16{Lo: 0x1cd, Hi: 0x1cd, Stride: 0x1},
5078 unicode.Range16{Lo: 0x1cf, Hi: 0x1cf, Stride: 0x1},
5079 unicode.Range16{Lo: 0x1d1, Hi: 0x1d1, Stride: 0x1},
5080 unicode.Range16{Lo: 0x1d3, Hi: 0x1d3, Stride: 0x1},
5081 unicode.Range16{Lo: 0x1d5, Hi: 0x1d5, Stride: 0x1},
5082 unicode.Range16{Lo: 0x1d7, Hi: 0x1d7, Stride: 0x1},
5083 unicode.Range16{Lo: 0x1d9, Hi: 0x1d9, Stride: 0x1},
5084 unicode.Range16{Lo: 0x1db, Hi: 0x1db, Stride: 0x1},
5085 unicode.Range16{Lo: 0x1de, Hi: 0x1de, Stride: 0x1},
5086 unicode.Range16{Lo: 0x1e0, Hi: 0x1e0, Stride: 0x1},
5087 unicode.Range16{Lo: 0x1e2, Hi: 0x1e2, Stride: 0x1},
5088 unicode.Range16{Lo: 0x1e4, Hi: 0x1e4, Stride: 0x1},
5089 unicode.Range16{Lo: 0x1e6, Hi: 0x1e6, Stride: 0x1},
5090 unicode.Range16{Lo: 0x1e8, Hi: 0x1e8, Stride: 0x1},
5091 unicode.Range16{Lo: 0x1ea, Hi: 0x1ea, Stride: 0x1},
5092 unicode.Range16{Lo: 0x1ec, Hi: 0x1ec, Stride: 0x1},
5093 unicode.Range16{Lo: 0x1ee, Hi: 0x1ee, Stride: 0x1},
5094 unicode.Range16{Lo: 0x1f1, Hi: 0x1f2, Stride: 0x1},
5095 unicode.Range16{Lo: 0x1f4, Hi: 0x1f4, Stride: 0x1},
5096 unicode.Range16{Lo: 0x1f6, Hi: 0x1f8, Stride: 0x1},
5097 unicode.Range16{Lo: 0x1fa, Hi: 0x1fa, Stride: 0x1},
5098 unicode.Range16{Lo: 0x1fc, Hi: 0x1fc, Stride: 0x1},
5099 unicode.Range16{Lo: 0x1fe, Hi: 0x1fe, Stride: 0x1},
5100 unicode.Range16{Lo: 0x200, Hi: 0x200, Stride: 0x1},
5101 unicode.Range16{Lo: 0x202, Hi: 0x202, Stride: 0x1},
5102 unicode.Range16{Lo: 0x204, Hi: 0x204, Stride: 0x1},
5103 unicode.Range16{Lo: 0x206, Hi: 0x206, Stride: 0x1},
5104 unicode.Range16{Lo: 0x208, Hi: 0x208, Stride: 0x1},
5105 unicode.Range16{Lo: 0x20a, Hi: 0x20a, Stride: 0x1},
5106 unicode.Range16{Lo: 0x20c, Hi: 0x20c, Stride: 0x1},
5107 unicode.Range16{Lo: 0x20e, Hi: 0x20e, Stride: 0x1},
5108 unicode.Range16{Lo: 0x210, Hi: 0x210, Stride: 0x1},
5109 unicode.Range16{Lo: 0x212, Hi: 0x212, Stride: 0x1},
5110 unicode.Range16{Lo: 0x214, Hi: 0x214, Stride: 0x1},
5111 unicode.Range16{Lo: 0x216, Hi: 0x216, Stride: 0x1},
5112 unicode.Range16{Lo: 0x218, Hi: 0x218, Stride: 0x1},
5113 unicode.Range16{Lo: 0x21a, Hi: 0x21a, Stride: 0x1},
5114 unicode.Range16{Lo: 0x21c, Hi: 0x21c, Stride: 0x1},
5115 unicode.Range16{Lo: 0x21e, Hi: 0x21e, Stride: 0x1},
5116 unicode.Range16{Lo: 0x220, Hi: 0x220, Stride: 0x1},
5117 unicode.Range16{Lo: 0x222, Hi: 0x222, Stride: 0x1},
5118 unicode.Range16{Lo: 0x224, Hi: 0x224, Stride: 0x1},
5119 unicode.Range16{Lo: 0x226, Hi: 0x226, Stride: 0x1},
5120 unicode.Range16{Lo: 0x228, Hi: 0x228, Stride: 0x1},
5121 unicode.Range16{Lo: 0x22a, Hi: 0x22a, Stride: 0x1},
5122 unicode.Range16{Lo: 0x22c, Hi: 0x22c, Stride: 0x1},
5123 unicode.Range16{Lo: 0x22e, Hi: 0x22e, Stride: 0x1},
5124 unicode.Range16{Lo: 0x230, Hi: 0x230, Stride: 0x1},
5125 unicode.Range16{Lo: 0x232, Hi: 0x232, Stride: 0x1},
5126 unicode.Range16{Lo: 0x23a, Hi: 0x23b, Stride: 0x1},
5127 unicode.Range16{Lo: 0x23d, Hi: 0x23e, Stride: 0x1},
5128 unicode.Range16{Lo: 0x241, Hi: 0x241, Stride: 0x1},
5129 unicode.Range16{Lo: 0x243, Hi: 0x246, Stride: 0x1},
5130 unicode.Range16{Lo: 0x248, Hi: 0x248, Stride: 0x1},
5131 unicode.Range16{Lo: 0x24a, Hi: 0x24a, Stride: 0x1},
5132 unicode.Range16{Lo: 0x24c, Hi: 0x24c, Stride: 0x1},
5133 unicode.Range16{Lo: 0x24e, Hi: 0x24e, Stride: 0x1},
5134 unicode.Range16{Lo: 0x370, Hi: 0x370, Stride: 0x1},
5135 unicode.Range16{Lo: 0x372, Hi: 0x372, Stride: 0x1},
5136 unicode.Range16{Lo: 0x376, Hi: 0x376, Stride: 0x1},
5137 unicode.Range16{Lo: 0x37f, Hi: 0x37f, Stride: 0x1},
5138 unicode.Range16{Lo: 0x386, Hi: 0x386, Stride: 0x1},
5139 unicode.Range16{Lo: 0x388, Hi: 0x38a, Stride: 0x1},
5140 unicode.Range16{Lo: 0x38c, Hi: 0x38c, Stride: 0x1},
5141 unicode.Range16{Lo: 0x38e, Hi: 0x38f, Stride: 0x1},
5142 unicode.Range16{Lo: 0x391, Hi: 0x3a1, Stride: 0x1},
5143 unicode.Range16{Lo: 0x3a3, Hi: 0x3ab, Stride: 0x1},
5144 unicode.Range16{Lo: 0x3cf, Hi: 0x3cf, Stride: 0x1},
5145 unicode.Range16{Lo: 0x3d2, Hi: 0x3d4, Stride: 0x1},
5146 unicode.Range16{Lo: 0x3d8, Hi: 0x3d8, Stride: 0x1},
5147 unicode.Range16{Lo: 0x3da, Hi: 0x3da, Stride: 0x1},
5148 unicode.Range16{Lo: 0x3dc, Hi: 0x3dc, Stride: 0x1},
5149 unicode.Range16{Lo: 0x3de, Hi: 0x3de, Stride: 0x1},
5150 unicode.Range16{Lo: 0x3e0, Hi: 0x3e0, Stride: 0x1},
5151 unicode.Range16{Lo: 0x3e2, Hi: 0x3e2, Stride: 0x1},
5152 unicode.Range16{Lo: 0x3e4, Hi: 0x3e4, Stride: 0x1},
5153 unicode.Range16{Lo: 0x3e6, Hi: 0x3e6, Stride: 0x1},
5154 unicode.Range16{Lo: 0x3e8, Hi: 0x3e8, Stride: 0x1},
5155 unicode.Range16{Lo: 0x3ea, Hi: 0x3ea, Stride: 0x1},
5156 unicode.Range16{Lo: 0x3ec, Hi: 0x3ec, Stride: 0x1},
5157 unicode.Range16{Lo: 0x3ee, Hi: 0x3ee, Stride: 0x1},
5158 unicode.Range16{Lo: 0x3f4, Hi: 0x3f4, Stride: 0x1},
5159 unicode.Range16{Lo: 0x3f7, Hi: 0x3f7, Stride: 0x1},
5160 unicode.Range16{Lo: 0x3f9, Hi: 0x3fa, Stride: 0x1},
5161 unicode.Range16{Lo: 0x3fd, Hi: 0x42f, Stride: 0x1},
5162 unicode.Range16{Lo: 0x460, Hi: 0x460, Stride: 0x1},
5163 unicode.Range16{Lo: 0x462, Hi: 0x462, Stride: 0x1},
5164 unicode.Range16{Lo: 0x464, Hi: 0x464, Stride: 0x1},
5165 unicode.Range16{Lo: 0x466, Hi: 0x466, Stride: 0x1},
5166 unicode.Range16{Lo: 0x468, Hi: 0x468, Stride: 0x1},
5167 unicode.Range16{Lo: 0x46a, Hi: 0x46a, Stride: 0x1},
5168 unicode.Range16{Lo: 0x46c, Hi: 0x46c, Stride: 0x1},
5169 unicode.Range16{Lo: 0x46e, Hi: 0x46e, Stride: 0x1},
5170 unicode.Range16{Lo: 0x470, Hi: 0x470, Stride: 0x1},
5171 unicode.Range16{Lo: 0x472, Hi: 0x472, Stride: 0x1},
5172 unicode.Range16{Lo: 0x474, Hi: 0x474, Stride: 0x1},
5173 unicode.Range16{Lo: 0x476, Hi: 0x476, Stride: 0x1},
5174 unicode.Range16{Lo: 0x478, Hi: 0x478, Stride: 0x1},
5175 unicode.Range16{Lo: 0x47a, Hi: 0x47a, Stride: 0x1},
5176 unicode.Range16{Lo: 0x47c, Hi: 0x47c, Stride: 0x1},
5177 unicode.Range16{Lo: 0x47e, Hi: 0x47e, Stride: 0x1},
5178 unicode.Range16{Lo: 0x480, Hi: 0x480, Stride: 0x1},
5179 unicode.Range16{Lo: 0x48a, Hi: 0x48a, Stride: 0x1},
5180 unicode.Range16{Lo: 0x48c, Hi: 0x48c, Stride: 0x1},
5181 unicode.Range16{Lo: 0x48e, Hi: 0x48e, Stride: 0x1},
5182 unicode.Range16{Lo: 0x490, Hi: 0x490, Stride: 0x1},
5183 unicode.Range16{Lo: 0x492, Hi: 0x492, Stride: 0x1},
5184 unicode.Range16{Lo: 0x494, Hi: 0x494, Stride: 0x1},
5185 unicode.Range16{Lo: 0x496, Hi: 0x496, Stride: 0x1},
5186 unicode.Range16{Lo: 0x498, Hi: 0x498, Stride: 0x1},
5187 unicode.Range16{Lo: 0x49a, Hi: 0x49a, Stride: 0x1},
5188 unicode.Range16{Lo: 0x49c, Hi: 0x49c, Stride: 0x1},
5189 unicode.Range16{Lo: 0x49e, Hi: 0x49e, Stride: 0x1},
5190 unicode.Range16{Lo: 0x4a0, Hi: 0x4a0, Stride: 0x1},
5191 unicode.Range16{Lo: 0x4a2, Hi: 0x4a2, Stride: 0x1},
5192 unicode.Range16{Lo: 0x4a4, Hi: 0x4a4, Stride: 0x1},
5193 unicode.Range16{Lo: 0x4a6, Hi: 0x4a6, Stride: 0x1},
5194 unicode.Range16{Lo: 0x4a8, Hi: 0x4a8, Stride: 0x1},
5195 unicode.Range16{Lo: 0x4aa, Hi: 0x4aa, Stride: 0x1},
5196 unicode.Range16{Lo: 0x4ac, Hi: 0x4ac, Stride: 0x1},
5197 unicode.Range16{Lo: 0x4ae, Hi: 0x4ae, Stride: 0x1},
5198 unicode.Range16{Lo: 0x4b0, Hi: 0x4b0, Stride: 0x1},
5199 unicode.Range16{Lo: 0x4b2, Hi: 0x4b2, Stride: 0x1},
5200 unicode.Range16{Lo: 0x4b4, Hi: 0x4b4, Stride: 0x1},
5201 unicode.Range16{Lo: 0x4b6, Hi: 0x4b6, Stride: 0x1},
5202 unicode.Range16{Lo: 0x4b8, Hi: 0x4b8, Stride: 0x1},
5203 unicode.Range16{Lo: 0x4ba, Hi: 0x4ba, Stride: 0x1},
5204 unicode.Range16{Lo: 0x4bc, Hi: 0x4bc, Stride: 0x1},
5205 unicode.Range16{Lo: 0x4be, Hi: 0x4be, Stride: 0x1},
5206 unicode.Range16{Lo: 0x4c0, Hi: 0x4c1, Stride: 0x1},
5207 unicode.Range16{Lo: 0x4c3, Hi: 0x4c3, Stride: 0x1},
5208 unicode.Range16{Lo: 0x4c5, Hi: 0x4c5, Stride: 0x1},
5209 unicode.Range16{Lo: 0x4c7, Hi: 0x4c7, Stride: 0x1},
5210 unicode.Range16{Lo: 0x4c9, Hi: 0x4c9, Stride: 0x1},
5211 unicode.Range16{Lo: 0x4cb, Hi: 0x4cb, Stride: 0x1},
5212 unicode.Range16{Lo: 0x4cd, Hi: 0x4cd, Stride: 0x1},
5213 unicode.Range16{Lo: 0x4d0, Hi: 0x4d0, Stride: 0x1},
5214 unicode.Range16{Lo: 0x4d2, Hi: 0x4d2, Stride: 0x1},
5215 unicode.Range16{Lo: 0x4d4, Hi: 0x4d4, Stride: 0x1},
5216 unicode.Range16{Lo: 0x4d6, Hi: 0x4d6, Stride: 0x1},
5217 unicode.Range16{Lo: 0x4d8, Hi: 0x4d8, Stride: 0x1},
5218 unicode.Range16{Lo: 0x4da, Hi: 0x4da, Stride: 0x1},
5219 unicode.Range16{Lo: 0x4dc, Hi: 0x4dc, Stride: 0x1},
5220 unicode.Range16{Lo: 0x4de, Hi: 0x4de, Stride: 0x1},
5221 unicode.Range16{Lo: 0x4e0, Hi: 0x4e0, Stride: 0x1},
5222 unicode.Range16{Lo: 0x4e2, Hi: 0x4e2, Stride: 0x1},
5223 unicode.Range16{Lo: 0x4e4, Hi: 0x4e4, Stride: 0x1},
5224 unicode.Range16{Lo: 0x4e6, Hi: 0x4e6, Stride: 0x1},
5225 unicode.Range16{Lo: 0x4e8, Hi: 0x4e8, Stride: 0x1},
5226 unicode.Range16{Lo: 0x4ea, Hi: 0x4ea, Stride: 0x1},
5227 unicode.Range16{Lo: 0x4ec, Hi: 0x4ec, Stride: 0x1},
5228 unicode.Range16{Lo: 0x4ee, Hi: 0x4ee, Stride: 0x1},
5229 unicode.Range16{Lo: 0x4f0, Hi: 0x4f0, Stride: 0x1},
5230 unicode.Range16{Lo: 0x4f2, Hi: 0x4f2, Stride: 0x1},
5231 unicode.Range16{Lo: 0x4f4, Hi: 0x4f4, Stride: 0x1},
5232 unicode.Range16{Lo: 0x4f6, Hi: 0x4f6, Stride: 0x1},
5233 unicode.Range16{Lo: 0x4f8, Hi: 0x4f8, Stride: 0x1},
5234 unicode.Range16{Lo: 0x4fa, Hi: 0x4fa, Stride: 0x1},
5235 unicode.Range16{Lo: 0x4fc, Hi: 0x4fc, Stride: 0x1},
5236 unicode.Range16{Lo: 0x4fe, Hi: 0x4fe, Stride: 0x1},
5237 unicode.Range16{Lo: 0x500, Hi: 0x500, Stride: 0x1},
5238 unicode.Range16{Lo: 0x502, Hi: 0x502, Stride: 0x1},
5239 unicode.Range16{Lo: 0x504, Hi: 0x504, Stride: 0x1},
5240 unicode.Range16{Lo: 0x506, Hi: 0x506, Stride: 0x1},
5241 unicode.Range16{Lo: 0x508, Hi: 0x508, Stride: 0x1},
5242 unicode.Range16{Lo: 0x50a, Hi: 0x50a, Stride: 0x1},
5243 unicode.Range16{Lo: 0x50c, Hi: 0x50c, Stride: 0x1},
5244 unicode.Range16{Lo: 0x50e, Hi: 0x50e, Stride: 0x1},
5245 unicode.Range16{Lo: 0x510, Hi: 0x510, Stride: 0x1},
5246 unicode.Range16{Lo: 0x512, Hi: 0x512, Stride: 0x1},
5247 unicode.Range16{Lo: 0x514, Hi: 0x514, Stride: 0x1},
5248 unicode.Range16{Lo: 0x516, Hi: 0x516, Stride: 0x1},
5249 unicode.Range16{Lo: 0x518, Hi: 0x518, Stride: 0x1},
5250 unicode.Range16{Lo: 0x51a, Hi: 0x51a, Stride: 0x1},
5251 unicode.Range16{Lo: 0x51c, Hi: 0x51c, Stride: 0x1},
5252 unicode.Range16{Lo: 0x51e, Hi: 0x51e, Stride: 0x1},
5253 unicode.Range16{Lo: 0x520, Hi: 0x520, Stride: 0x1},
5254 unicode.Range16{Lo: 0x522, Hi: 0x522, Stride: 0x1},
5255 unicode.Range16{Lo: 0x524, Hi: 0x524, Stride: 0x1},
5256 unicode.Range16{Lo: 0x526, Hi: 0x526, Stride: 0x1},
5257 unicode.Range16{Lo: 0x528, Hi: 0x528, Stride: 0x1},
5258 unicode.Range16{Lo: 0x52a, Hi: 0x52a, Stride: 0x1},
5259 unicode.Range16{Lo: 0x52c, Hi: 0x52c, Stride: 0x1},
5260 unicode.Range16{Lo: 0x52e, Hi: 0x52e, Stride: 0x1},
5261 unicode.Range16{Lo: 0x531, Hi: 0x556, Stride: 0x1},
5262 unicode.Range16{Lo: 0x10a0, Hi: 0x10c5, Stride: 0x1},
5263 unicode.Range16{Lo: 0x10c7, Hi: 0x10c7, Stride: 0x1},
5264 unicode.Range16{Lo: 0x10cd, Hi: 0x10cd, Stride: 0x1},
5265 unicode.Range16{Lo: 0x13a0, Hi: 0x13f5, Stride: 0x1},
5266 unicode.Range16{Lo: 0x1e00, Hi: 0x1e00, Stride: 0x1},
5267 unicode.Range16{Lo: 0x1e02, Hi: 0x1e02, Stride: 0x1},
5268 unicode.Range16{Lo: 0x1e04, Hi: 0x1e04, Stride: 0x1},
5269 unicode.Range16{Lo: 0x1e06, Hi: 0x1e06, Stride: 0x1},
5270 unicode.Range16{Lo: 0x1e08, Hi: 0x1e08, Stride: 0x1},
5271 unicode.Range16{Lo: 0x1e0a, Hi: 0x1e0a, Stride: 0x1},
5272 unicode.Range16{Lo: 0x1e0c, Hi: 0x1e0c, Stride: 0x1},
5273 unicode.Range16{Lo: 0x1e0e, Hi: 0x1e0e, Stride: 0x1},
5274 unicode.Range16{Lo: 0x1e10, Hi: 0x1e10, Stride: 0x1},
5275 unicode.Range16{Lo: 0x1e12, Hi: 0x1e12, Stride: 0x1},
5276 unicode.Range16{Lo: 0x1e14, Hi: 0x1e14, Stride: 0x1},
5277 unicode.Range16{Lo: 0x1e16, Hi: 0x1e16, Stride: 0x1},
5278 unicode.Range16{Lo: 0x1e18, Hi: 0x1e18, Stride: 0x1},
5279 unicode.Range16{Lo: 0x1e1a, Hi: 0x1e1a, Stride: 0x1},
5280 unicode.Range16{Lo: 0x1e1c, Hi: 0x1e1c, Stride: 0x1},
5281 unicode.Range16{Lo: 0x1e1e, Hi: 0x1e1e, Stride: 0x1},
5282 unicode.Range16{Lo: 0x1e20, Hi: 0x1e20, Stride: 0x1},
5283 unicode.Range16{Lo: 0x1e22, Hi: 0x1e22, Stride: 0x1},
5284 unicode.Range16{Lo: 0x1e24, Hi: 0x1e24, Stride: 0x1},
5285 unicode.Range16{Lo: 0x1e26, Hi: 0x1e26, Stride: 0x1},
5286 unicode.Range16{Lo: 0x1e28, Hi: 0x1e28, Stride: 0x1},
5287 unicode.Range16{Lo: 0x1e2a, Hi: 0x1e2a, Stride: 0x1},
5288 unicode.Range16{Lo: 0x1e2c, Hi: 0x1e2c, Stride: 0x1},
5289 unicode.Range16{Lo: 0x1e2e, Hi: 0x1e2e, Stride: 0x1},
5290 unicode.Range16{Lo: 0x1e30, Hi: 0x1e30, Stride: 0x1},
5291 unicode.Range16{Lo: 0x1e32, Hi: 0x1e32, Stride: 0x1},
5292 unicode.Range16{Lo: 0x1e34, Hi: 0x1e34, Stride: 0x1},
5293 unicode.Range16{Lo: 0x1e36, Hi: 0x1e36, Stride: 0x1},
5294 unicode.Range16{Lo: 0x1e38, Hi: 0x1e38, Stride: 0x1},
5295 unicode.Range16{Lo: 0x1e3a, Hi: 0x1e3a, Stride: 0x1},
5296 unicode.Range16{Lo: 0x1e3c, Hi: 0x1e3c, Stride: 0x1},
5297 unicode.Range16{Lo: 0x1e3e, Hi: 0x1e3e, Stride: 0x1},
5298 unicode.Range16{Lo: 0x1e40, Hi: 0x1e40, Stride: 0x1},
5299 unicode.Range16{Lo: 0x1e42, Hi: 0x1e42, Stride: 0x1},
5300 unicode.Range16{Lo: 0x1e44, Hi: 0x1e44, Stride: 0x1},
5301 unicode.Range16{Lo: 0x1e46, Hi: 0x1e46, Stride: 0x1},
5302 unicode.Range16{Lo: 0x1e48, Hi: 0x1e48, Stride: 0x1},
5303 unicode.Range16{Lo: 0x1e4a, Hi: 0x1e4a, Stride: 0x1},
5304 unicode.Range16{Lo: 0x1e4c, Hi: 0x1e4c, Stride: 0x1},
5305 unicode.Range16{Lo: 0x1e4e, Hi: 0x1e4e, Stride: 0x1},
5306 unicode.Range16{Lo: 0x1e50, Hi: 0x1e50, Stride: 0x1},
5307 unicode.Range16{Lo: 0x1e52, Hi: 0x1e52, Stride: 0x1},
5308 unicode.Range16{Lo: 0x1e54, Hi: 0x1e54, Stride: 0x1},
5309 unicode.Range16{Lo: 0x1e56, Hi: 0x1e56, Stride: 0x1},
5310 unicode.Range16{Lo: 0x1e58, Hi: 0x1e58, Stride: 0x1},
5311 unicode.Range16{Lo: 0x1e5a, Hi: 0x1e5a, Stride: 0x1},
5312 unicode.Range16{Lo: 0x1e5c, Hi: 0x1e5c, Stride: 0x1},
5313 unicode.Range16{Lo: 0x1e5e, Hi: 0x1e5e, Stride: 0x1},
5314 unicode.Range16{Lo: 0x1e60, Hi: 0x1e60, Stride: 0x1},
5315 unicode.Range16{Lo: 0x1e62, Hi: 0x1e62, Stride: 0x1},
5316 unicode.Range16{Lo: 0x1e64, Hi: 0x1e64, Stride: 0x1},
5317 unicode.Range16{Lo: 0x1e66, Hi: 0x1e66, Stride: 0x1},
5318 unicode.Range16{Lo: 0x1e68, Hi: 0x1e68, Stride: 0x1},
5319 unicode.Range16{Lo: 0x1e6a, Hi: 0x1e6a, Stride: 0x1},
5320 unicode.Range16{Lo: 0x1e6c, Hi: 0x1e6c, Stride: 0x1},
5321 unicode.Range16{Lo: 0x1e6e, Hi: 0x1e6e, Stride: 0x1},
5322 unicode.Range16{Lo: 0x1e70, Hi: 0x1e70, Stride: 0x1},
5323 unicode.Range16{Lo: 0x1e72, Hi: 0x1e72, Stride: 0x1},
5324 unicode.Range16{Lo: 0x1e74, Hi: 0x1e74, Stride: 0x1},
5325 unicode.Range16{Lo: 0x1e76, Hi: 0x1e76, Stride: 0x1},
5326 unicode.Range16{Lo: 0x1e78, Hi: 0x1e78, Stride: 0x1},
5327 unicode.Range16{Lo: 0x1e7a, Hi: 0x1e7a, Stride: 0x1},
5328 unicode.Range16{Lo: 0x1e7c, Hi: 0x1e7c, Stride: 0x1},
5329 unicode.Range16{Lo: 0x1e7e, Hi: 0x1e7e, Stride: 0x1},
5330 unicode.Range16{Lo: 0x1e80, Hi: 0x1e80, Stride: 0x1},
5331 unicode.Range16{Lo: 0x1e82, Hi: 0x1e82, Stride: 0x1},
5332 unicode.Range16{Lo: 0x1e84, Hi: 0x1e84, Stride: 0x1},
5333 unicode.Range16{Lo: 0x1e86, Hi: 0x1e86, Stride: 0x1},
5334 unicode.Range16{Lo: 0x1e88, Hi: 0x1e88, Stride: 0x1},
5335 unicode.Range16{Lo: 0x1e8a, Hi: 0x1e8a, Stride: 0x1},
5336 unicode.Range16{Lo: 0x1e8c, Hi: 0x1e8c, Stride: 0x1},
5337 unicode.Range16{Lo: 0x1e8e, Hi: 0x1e8e, Stride: 0x1},
5338 unicode.Range16{Lo: 0x1e90, Hi: 0x1e90, Stride: 0x1},
5339 unicode.Range16{Lo: 0x1e92, Hi: 0x1e92, Stride: 0x1},
5340 unicode.Range16{Lo: 0x1e94, Hi: 0x1e94, Stride: 0x1},
5341 unicode.Range16{Lo: 0x1e9e, Hi: 0x1e9e, Stride: 0x1},
5342 unicode.Range16{Lo: 0x1ea0, Hi: 0x1ea0, Stride: 0x1},
5343 unicode.Range16{Lo: 0x1ea2, Hi: 0x1ea2, Stride: 0x1},
5344 unicode.Range16{Lo: 0x1ea4, Hi: 0x1ea4, Stride: 0x1},
5345 unicode.Range16{Lo: 0x1ea6, Hi: 0x1ea6, Stride: 0x1},
5346 unicode.Range16{Lo: 0x1ea8, Hi: 0x1ea8, Stride: 0x1},
5347 unicode.Range16{Lo: 0x1eaa, Hi: 0x1eaa, Stride: 0x1},
5348 unicode.Range16{Lo: 0x1eac, Hi: 0x1eac, Stride: 0x1},
5349 unicode.Range16{Lo: 0x1eae, Hi: 0x1eae, Stride: 0x1},
5350 unicode.Range16{Lo: 0x1eb0, Hi: 0x1eb0, Stride: 0x1},
5351 unicode.Range16{Lo: 0x1eb2, Hi: 0x1eb2, Stride: 0x1},
5352 unicode.Range16{Lo: 0x1eb4, Hi: 0x1eb4, Stride: 0x1},
5353 unicode.Range16{Lo: 0x1eb6, Hi: 0x1eb6, Stride: 0x1},
5354 unicode.Range16{Lo: 0x1eb8, Hi: 0x1eb8, Stride: 0x1},
5355 unicode.Range16{Lo: 0x1eba, Hi: 0x1eba, Stride: 0x1},
5356 unicode.Range16{Lo: 0x1ebc, Hi: 0x1ebc, Stride: 0x1},
5357 unicode.Range16{Lo: 0x1ebe, Hi: 0x1ebe, Stride: 0x1},
5358 unicode.Range16{Lo: 0x1ec0, Hi: 0x1ec0, Stride: 0x1},
5359 unicode.Range16{Lo: 0x1ec2, Hi: 0x1ec2, Stride: 0x1},
5360 unicode.Range16{Lo: 0x1ec4, Hi: 0x1ec4, Stride: 0x1},
5361 unicode.Range16{Lo: 0x1ec6, Hi: 0x1ec6, Stride: 0x1},
5362 unicode.Range16{Lo: 0x1ec8, Hi: 0x1ec8, Stride: 0x1},
5363 unicode.Range16{Lo: 0x1eca, Hi: 0x1eca, Stride: 0x1},
5364 unicode.Range16{Lo: 0x1ecc, Hi: 0x1ecc, Stride: 0x1},
5365 unicode.Range16{Lo: 0x1ece, Hi: 0x1ece, Stride: 0x1},
5366 unicode.Range16{Lo: 0x1ed0, Hi: 0x1ed0, Stride: 0x1},
5367 unicode.Range16{Lo: 0x1ed2, Hi: 0x1ed2, Stride: 0x1},
5368 unicode.Range16{Lo: 0x1ed4, Hi: 0x1ed4, Stride: 0x1},
5369 unicode.Range16{Lo: 0x1ed6, Hi: 0x1ed6, Stride: 0x1},
5370 unicode.Range16{Lo: 0x1ed8, Hi: 0x1ed8, Stride: 0x1},
5371 unicode.Range16{Lo: 0x1eda, Hi: 0x1eda, Stride: 0x1},
5372 unicode.Range16{Lo: 0x1edc, Hi: 0x1edc, Stride: 0x1},
5373 unicode.Range16{Lo: 0x1ede, Hi: 0x1ede, Stride: 0x1},
5374 unicode.Range16{Lo: 0x1ee0, Hi: 0x1ee0, Stride: 0x1},
5375 unicode.Range16{Lo: 0x1ee2, Hi: 0x1ee2, Stride: 0x1},
5376 unicode.Range16{Lo: 0x1ee4, Hi: 0x1ee4, Stride: 0x1},
5377 unicode.Range16{Lo: 0x1ee6, Hi: 0x1ee6, Stride: 0x1},
5378 unicode.Range16{Lo: 0x1ee8, Hi: 0x1ee8, Stride: 0x1},
5379 unicode.Range16{Lo: 0x1eea, Hi: 0x1eea, Stride: 0x1},
5380 unicode.Range16{Lo: 0x1eec, Hi: 0x1eec, Stride: 0x1},
5381 unicode.Range16{Lo: 0x1eee, Hi: 0x1eee, Stride: 0x1},
5382 unicode.Range16{Lo: 0x1ef0, Hi: 0x1ef0, Stride: 0x1},
5383 unicode.Range16{Lo: 0x1ef2, Hi: 0x1ef2, Stride: 0x1},
5384 unicode.Range16{Lo: 0x1ef4, Hi: 0x1ef4, Stride: 0x1},
5385 unicode.Range16{Lo: 0x1ef6, Hi: 0x1ef6, Stride: 0x1},
5386 unicode.Range16{Lo: 0x1ef8, Hi: 0x1ef8, Stride: 0x1},
5387 unicode.Range16{Lo: 0x1efa, Hi: 0x1efa, Stride: 0x1},
5388 unicode.Range16{Lo: 0x1efc, Hi: 0x1efc, Stride: 0x1},
5389 unicode.Range16{Lo: 0x1efe, Hi: 0x1efe, Stride: 0x1},
5390 unicode.Range16{Lo: 0x1f08, Hi: 0x1f0f, Stride: 0x1},
5391 unicode.Range16{Lo: 0x1f18, Hi: 0x1f1d, Stride: 0x1},
5392 unicode.Range16{Lo: 0x1f28, Hi: 0x1f2f, Stride: 0x1},
5393 unicode.Range16{Lo: 0x1f38, Hi: 0x1f3f, Stride: 0x1},
5394 unicode.Range16{Lo: 0x1f48, Hi: 0x1f4d, Stride: 0x1},
5395 unicode.Range16{Lo: 0x1f59, Hi: 0x1f59, Stride: 0x1},
5396 unicode.Range16{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 0x1},
5397 unicode.Range16{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 0x1},
5398 unicode.Range16{Lo: 0x1f5f, Hi: 0x1f5f, Stride: 0x1},
5399 unicode.Range16{Lo: 0x1f68, Hi: 0x1f6f, Stride: 0x1},
5400 unicode.Range16{Lo: 0x1f88, Hi: 0x1f8f, Stride: 0x1},
5401 unicode.Range16{Lo: 0x1f98, Hi: 0x1f9f, Stride: 0x1},
5402 unicode.Range16{Lo: 0x1fa8, Hi: 0x1faf, Stride: 0x1},
5403 unicode.Range16{Lo: 0x1fb8, Hi: 0x1fbc, Stride: 0x1},
5404 unicode.Range16{Lo: 0x1fc8, Hi: 0x1fcc, Stride: 0x1},
5405 unicode.Range16{Lo: 0x1fd8, Hi: 0x1fdb, Stride: 0x1},
5406 unicode.Range16{Lo: 0x1fe8, Hi: 0x1fec, Stride: 0x1},
5407 unicode.Range16{Lo: 0x1ff8, Hi: 0x1ffc, Stride: 0x1},
5408 unicode.Range16{Lo: 0x2102, Hi: 0x2102, Stride: 0x1},
5409 unicode.Range16{Lo: 0x2107, Hi: 0x2107, Stride: 0x1},
5410 unicode.Range16{Lo: 0x210b, Hi: 0x210d, Stride: 0x1},
5411 unicode.Range16{Lo: 0x2110, Hi: 0x2112, Stride: 0x1},
5412 unicode.Range16{Lo: 0x2115, Hi: 0x2115, Stride: 0x1},
5413 unicode.Range16{Lo: 0x2119, Hi: 0x211d, Stride: 0x1},
5414 unicode.Range16{Lo: 0x2124, Hi: 0x2124, Stride: 0x1},
5415 unicode.Range16{Lo: 0x2126, Hi: 0x2126, Stride: 0x1},
5416 unicode.Range16{Lo: 0x2128, Hi: 0x2128, Stride: 0x1},
5417 unicode.Range16{Lo: 0x212a, Hi: 0x212d, Stride: 0x1},
5418 unicode.Range16{Lo: 0x2130, Hi: 0x2133, Stride: 0x1},
5419 unicode.Range16{Lo: 0x213e, Hi: 0x213f, Stride: 0x1},
5420 unicode.Range16{Lo: 0x2145, Hi: 0x2145, Stride: 0x1},
5421 unicode.Range16{Lo: 0x2160, Hi: 0x216f, Stride: 0x1},
5422 unicode.Range16{Lo: 0x2183, Hi: 0x2183, Stride: 0x1},
5423 unicode.Range16{Lo: 0x24b6, Hi: 0x24cf, Stride: 0x1},
5424 unicode.Range16{Lo: 0x2c00, Hi: 0x2c2e, Stride: 0x1},
5425 unicode.Range16{Lo: 0x2c60, Hi: 0x2c60, Stride: 0x1},
5426 unicode.Range16{Lo: 0x2c62, Hi: 0x2c64, Stride: 0x1},
5427 unicode.Range16{Lo: 0x2c67, Hi: 0x2c67, Stride: 0x1},
5428 unicode.Range16{Lo: 0x2c69, Hi: 0x2c69, Stride: 0x1},
5429 unicode.Range16{Lo: 0x2c6b, Hi: 0x2c6b, Stride: 0x1},
5430 unicode.Range16{Lo: 0x2c6d, Hi: 0x2c70, Stride: 0x1},
5431 unicode.Range16{Lo: 0x2c72, Hi: 0x2c72, Stride: 0x1},
5432 unicode.Range16{Lo: 0x2c75, Hi: 0x2c75, Stride: 0x1},
5433 unicode.Range16{Lo: 0x2c7e, Hi: 0x2c80, Stride: 0x1},
5434 unicode.Range16{Lo: 0x2c82, Hi: 0x2c82, Stride: 0x1},
5435 unicode.Range16{Lo: 0x2c84, Hi: 0x2c84, Stride: 0x1},
5436 unicode.Range16{Lo: 0x2c86, Hi: 0x2c86, Stride: 0x1},
5437 unicode.Range16{Lo: 0x2c88, Hi: 0x2c88, Stride: 0x1},
5438 unicode.Range16{Lo: 0x2c8a, Hi: 0x2c8a, Stride: 0x1},
5439 unicode.Range16{Lo: 0x2c8c, Hi: 0x2c8c, Stride: 0x1},
5440 unicode.Range16{Lo: 0x2c8e, Hi: 0x2c8e, Stride: 0x1},
5441 unicode.Range16{Lo: 0x2c90, Hi: 0x2c90, Stride: 0x1},
5442 unicode.Range16{Lo: 0x2c92, Hi: 0x2c92, Stride: 0x1},
5443 unicode.Range16{Lo: 0x2c94, Hi: 0x2c94, Stride: 0x1},
5444 unicode.Range16{Lo: 0x2c96, Hi: 0x2c96, Stride: 0x1},
5445 unicode.Range16{Lo: 0x2c98, Hi: 0x2c98, Stride: 0x1},
5446 unicode.Range16{Lo: 0x2c9a, Hi: 0x2c9a, Stride: 0x1},
5447 unicode.Range16{Lo: 0x2c9c, Hi: 0x2c9c, Stride: 0x1},
5448 unicode.Range16{Lo: 0x2c9e, Hi: 0x2c9e, Stride: 0x1},
5449 unicode.Range16{Lo: 0x2ca0, Hi: 0x2ca0, Stride: 0x1},
5450 unicode.Range16{Lo: 0x2ca2, Hi: 0x2ca2, Stride: 0x1},
5451 unicode.Range16{Lo: 0x2ca4, Hi: 0x2ca4, Stride: 0x1},
5452 unicode.Range16{Lo: 0x2ca6, Hi: 0x2ca6, Stride: 0x1},
5453 unicode.Range16{Lo: 0x2ca8, Hi: 0x2ca8, Stride: 0x1},
5454 unicode.Range16{Lo: 0x2caa, Hi: 0x2caa, Stride: 0x1},
5455 unicode.Range16{Lo: 0x2cac, Hi: 0x2cac, Stride: 0x1},
5456 unicode.Range16{Lo: 0x2cae, Hi: 0x2cae, Stride: 0x1},
5457 unicode.Range16{Lo: 0x2cb0, Hi: 0x2cb0, Stride: 0x1},
5458 unicode.Range16{Lo: 0x2cb2, Hi: 0x2cb2, Stride: 0x1},
5459 unicode.Range16{Lo: 0x2cb4, Hi: 0x2cb4, Stride: 0x1},
5460 unicode.Range16{Lo: 0x2cb6, Hi: 0x2cb6, Stride: 0x1},
5461 unicode.Range16{Lo: 0x2cb8, Hi: 0x2cb8, Stride: 0x1},
5462 unicode.Range16{Lo: 0x2cba, Hi: 0x2cba, Stride: 0x1},
5463 unicode.Range16{Lo: 0x2cbc, Hi: 0x2cbc, Stride: 0x1},
5464 unicode.Range16{Lo: 0x2cbe, Hi: 0x2cbe, Stride: 0x1},
5465 unicode.Range16{Lo: 0x2cc0, Hi: 0x2cc0, Stride: 0x1},
5466 unicode.Range16{Lo: 0x2cc2, Hi: 0x2cc2, Stride: 0x1},
5467 unicode.Range16{Lo: 0x2cc4, Hi: 0x2cc4, Stride: 0x1},
5468 unicode.Range16{Lo: 0x2cc6, Hi: 0x2cc6, Stride: 0x1},
5469 unicode.Range16{Lo: 0x2cc8, Hi: 0x2cc8, Stride: 0x1},
5470 unicode.Range16{Lo: 0x2cca, Hi: 0x2cca, Stride: 0x1},
5471 unicode.Range16{Lo: 0x2ccc, Hi: 0x2ccc, Stride: 0x1},
5472 unicode.Range16{Lo: 0x2cce, Hi: 0x2cce, Stride: 0x1},
5473 unicode.Range16{Lo: 0x2cd0, Hi: 0x2cd0, Stride: 0x1},
5474 unicode.Range16{Lo: 0x2cd2, Hi: 0x2cd2, Stride: 0x1},
5475 unicode.Range16{Lo: 0x2cd4, Hi: 0x2cd4, Stride: 0x1},
5476 unicode.Range16{Lo: 0x2cd6, Hi: 0x2cd6, Stride: 0x1},
5477 unicode.Range16{Lo: 0x2cd8, Hi: 0x2cd8, Stride: 0x1},
5478 unicode.Range16{Lo: 0x2cda, Hi: 0x2cda, Stride: 0x1},
5479 unicode.Range16{Lo: 0x2cdc, Hi: 0x2cdc, Stride: 0x1},
5480 unicode.Range16{Lo: 0x2cde, Hi: 0x2cde, Stride: 0x1},
5481 unicode.Range16{Lo: 0x2ce0, Hi: 0x2ce0, Stride: 0x1},
5482 unicode.Range16{Lo: 0x2ce2, Hi: 0x2ce2, Stride: 0x1},
5483 unicode.Range16{Lo: 0x2ceb, Hi: 0x2ceb, Stride: 0x1},
5484 unicode.Range16{Lo: 0x2ced, Hi: 0x2ced, Stride: 0x1},
5485 unicode.Range16{Lo: 0x2cf2, Hi: 0x2cf2, Stride: 0x1},
5486 unicode.Range16{Lo: 0xa640, Hi: 0xa640, Stride: 0x1},
5487 unicode.Range16{Lo: 0xa642, Hi: 0xa642, Stride: 0x1},
5488 unicode.Range16{Lo: 0xa644, Hi: 0xa644, Stride: 0x1},
5489 unicode.Range16{Lo: 0xa646, Hi: 0xa646, Stride: 0x1},
5490 unicode.Range16{Lo: 0xa648, Hi: 0xa648, Stride: 0x1},
5491 unicode.Range16{Lo: 0xa64a, Hi: 0xa64a, Stride: 0x1},
5492 unicode.Range16{Lo: 0xa64c, Hi: 0xa64c, Stride: 0x1},
5493 unicode.Range16{Lo: 0xa64e, Hi: 0xa64e, Stride: 0x1},
5494 unicode.Range16{Lo: 0xa650, Hi: 0xa650, Stride: 0x1},
5495 unicode.Range16{Lo: 0xa652, Hi: 0xa652, Stride: 0x1},
5496 unicode.Range16{Lo: 0xa654, Hi: 0xa654, Stride: 0x1},
5497 unicode.Range16{Lo: 0xa656, Hi: 0xa656, Stride: 0x1},
5498 unicode.Range16{Lo: 0xa658, Hi: 0xa658, Stride: 0x1},
5499 unicode.Range16{Lo: 0xa65a, Hi: 0xa65a, Stride: 0x1},
5500 unicode.Range16{Lo: 0xa65c, Hi: 0xa65c, Stride: 0x1},
5501 unicode.Range16{Lo: 0xa65e, Hi: 0xa65e, Stride: 0x1},
5502 unicode.Range16{Lo: 0xa660, Hi: 0xa660, Stride: 0x1},
5503 unicode.Range16{Lo: 0xa662, Hi: 0xa662, Stride: 0x1},
5504 unicode.Range16{Lo: 0xa664, Hi: 0xa664, Stride: 0x1},
5505 unicode.Range16{Lo: 0xa666, Hi: 0xa666, Stride: 0x1},
5506 unicode.Range16{Lo: 0xa668, Hi: 0xa668, Stride: 0x1},
5507 unicode.Range16{Lo: 0xa66a, Hi: 0xa66a, Stride: 0x1},
5508 unicode.Range16{Lo: 0xa66c, Hi: 0xa66c, Stride: 0x1},
5509 unicode.Range16{Lo: 0xa680, Hi: 0xa680, Stride: 0x1},
5510 unicode.Range16{Lo: 0xa682, Hi: 0xa682, Stride: 0x1},
5511 unicode.Range16{Lo: 0xa684, Hi: 0xa684, Stride: 0x1},
5512 unicode.Range16{Lo: 0xa686, Hi: 0xa686, Stride: 0x1},
5513 unicode.Range16{Lo: 0xa688, Hi: 0xa688, Stride: 0x1},
5514 unicode.Range16{Lo: 0xa68a, Hi: 0xa68a, Stride: 0x1},
5515 unicode.Range16{Lo: 0xa68c, Hi: 0xa68c, Stride: 0x1},
5516 unicode.Range16{Lo: 0xa68e, Hi: 0xa68e, Stride: 0x1},
5517 unicode.Range16{Lo: 0xa690, Hi: 0xa690, Stride: 0x1},
5518 unicode.Range16{Lo: 0xa692, Hi: 0xa692, Stride: 0x1},
5519 unicode.Range16{Lo: 0xa694, Hi: 0xa694, Stride: 0x1},
5520 unicode.Range16{Lo: 0xa696, Hi: 0xa696, Stride: 0x1},
5521 unicode.Range16{Lo: 0xa698, Hi: 0xa698, Stride: 0x1},
5522 unicode.Range16{Lo: 0xa69a, Hi: 0xa69a, Stride: 0x1},
5523 unicode.Range16{Lo: 0xa722, Hi: 0xa722, Stride: 0x1},
5524 unicode.Range16{Lo: 0xa724, Hi: 0xa724, Stride: 0x1},
5525 unicode.Range16{Lo: 0xa726, Hi: 0xa726, Stride: 0x1},
5526 unicode.Range16{Lo: 0xa728, Hi: 0xa728, Stride: 0x1},
5527 unicode.Range16{Lo: 0xa72a, Hi: 0xa72a, Stride: 0x1},
5528 unicode.Range16{Lo: 0xa72c, Hi: 0xa72c, Stride: 0x1},
5529 unicode.Range16{Lo: 0xa72e, Hi: 0xa72e, Stride: 0x1},
5530 unicode.Range16{Lo: 0xa732, Hi: 0xa732, Stride: 0x1},
5531 unicode.Range16{Lo: 0xa734, Hi: 0xa734, Stride: 0x1},
5532 unicode.Range16{Lo: 0xa736, Hi: 0xa736, Stride: 0x1},
5533 unicode.Range16{Lo: 0xa738, Hi: 0xa738, Stride: 0x1},
5534 unicode.Range16{Lo: 0xa73a, Hi: 0xa73a, Stride: 0x1},
5535 unicode.Range16{Lo: 0xa73c, Hi: 0xa73c, Stride: 0x1},
5536 unicode.Range16{Lo: 0xa73e, Hi: 0xa73e, Stride: 0x1},
5537 unicode.Range16{Lo: 0xa740, Hi: 0xa740, Stride: 0x1},
5538 unicode.Range16{Lo: 0xa742, Hi: 0xa742, Stride: 0x1},
5539 unicode.Range16{Lo: 0xa744, Hi: 0xa744, Stride: 0x1},
5540 unicode.Range16{Lo: 0xa746, Hi: 0xa746, Stride: 0x1},
5541 unicode.Range16{Lo: 0xa748, Hi: 0xa748, Stride: 0x1},
5542 unicode.Range16{Lo: 0xa74a, Hi: 0xa74a, Stride: 0x1},
5543 unicode.Range16{Lo: 0xa74c, Hi: 0xa74c, Stride: 0x1},
5544 unicode.Range16{Lo: 0xa74e, Hi: 0xa74e, Stride: 0x1},
5545 unicode.Range16{Lo: 0xa750, Hi: 0xa750, Stride: 0x1},
5546 unicode.Range16{Lo: 0xa752, Hi: 0xa752, Stride: 0x1},
5547 unicode.Range16{Lo: 0xa754, Hi: 0xa754, Stride: 0x1},
5548 unicode.Range16{Lo: 0xa756, Hi: 0xa756, Stride: 0x1},
5549 unicode.Range16{Lo: 0xa758, Hi: 0xa758, Stride: 0x1},
5550 unicode.Range16{Lo: 0xa75a, Hi: 0xa75a, Stride: 0x1},
5551 unicode.Range16{Lo: 0xa75c, Hi: 0xa75c, Stride: 0x1},
5552 unicode.Range16{Lo: 0xa75e, Hi: 0xa75e, Stride: 0x1},
5553 unicode.Range16{Lo: 0xa760, Hi: 0xa760, Stride: 0x1},
5554 unicode.Range16{Lo: 0xa762, Hi: 0xa762, Stride: 0x1},
5555 unicode.Range16{Lo: 0xa764, Hi: 0xa764, Stride: 0x1},
5556 unicode.Range16{Lo: 0xa766, Hi: 0xa766, Stride: 0x1},
5557 unicode.Range16{Lo: 0xa768, Hi: 0xa768, Stride: 0x1},
5558 unicode.Range16{Lo: 0xa76a, Hi: 0xa76a, Stride: 0x1},
5559 unicode.Range16{Lo: 0xa76c, Hi: 0xa76c, Stride: 0x1},
5560 unicode.Range16{Lo: 0xa76e, Hi: 0xa76e, Stride: 0x1},
5561 unicode.Range16{Lo: 0xa779, Hi: 0xa779, Stride: 0x1},
5562 unicode.Range16{Lo: 0xa77b, Hi: 0xa77b, Stride: 0x1},
5563 unicode.Range16{Lo: 0xa77d, Hi: 0xa77e, Stride: 0x1},
5564 unicode.Range16{Lo: 0xa780, Hi: 0xa780, Stride: 0x1},
5565 unicode.Range16{Lo: 0xa782, Hi: 0xa782, Stride: 0x1},
5566 unicode.Range16{Lo: 0xa784, Hi: 0xa784, Stride: 0x1},
5567 unicode.Range16{Lo: 0xa786, Hi: 0xa786, Stride: 0x1},
5568 unicode.Range16{Lo: 0xa78b, Hi: 0xa78b, Stride: 0x1},
5569 unicode.Range16{Lo: 0xa78d, Hi: 0xa78d, Stride: 0x1},
5570 unicode.Range16{Lo: 0xa790, Hi: 0xa790, Stride: 0x1},
5571 unicode.Range16{Lo: 0xa792, Hi: 0xa792, Stride: 0x1},
5572 unicode.Range16{Lo: 0xa796, Hi: 0xa796, Stride: 0x1},
5573 unicode.Range16{Lo: 0xa798, Hi: 0xa798, Stride: 0x1},
5574 unicode.Range16{Lo: 0xa79a, Hi: 0xa79a, Stride: 0x1},
5575 unicode.Range16{Lo: 0xa79c, Hi: 0xa79c, Stride: 0x1},
5576 unicode.Range16{Lo: 0xa79e, Hi: 0xa79e, Stride: 0x1},
5577 unicode.Range16{Lo: 0xa7a0, Hi: 0xa7a0, Stride: 0x1},
5578 unicode.Range16{Lo: 0xa7a2, Hi: 0xa7a2, Stride: 0x1},
5579 unicode.Range16{Lo: 0xa7a4, Hi: 0xa7a4, Stride: 0x1},
5580 unicode.Range16{Lo: 0xa7a6, Hi: 0xa7a6, Stride: 0x1},
5581 unicode.Range16{Lo: 0xa7a8, Hi: 0xa7a8, Stride: 0x1},
5582 unicode.Range16{Lo: 0xa7aa, Hi: 0xa7ae, Stride: 0x1},
5583 unicode.Range16{Lo: 0xa7b0, Hi: 0xa7b4, Stride: 0x1},
5584 unicode.Range16{Lo: 0xa7b6, Hi: 0xa7b6, Stride: 0x1},
5585 unicode.Range16{Lo: 0xff21, Hi: 0xff3a, Stride: 0x1},
5586 },
5587 R32: []unicode.Range32{
5588 unicode.Range32{Lo: 0x10400, Hi: 0x10427, Stride: 0x1},
5589 unicode.Range32{Lo: 0x104b0, Hi: 0x104d3, Stride: 0x1},
5590 unicode.Range32{Lo: 0x10c80, Hi: 0x10cb2, Stride: 0x1},
5591 unicode.Range32{Lo: 0x118a0, Hi: 0x118bf, Stride: 0x1},
5592 unicode.Range32{Lo: 0x1d400, Hi: 0x1d419, Stride: 0x1},
5593 unicode.Range32{Lo: 0x1d434, Hi: 0x1d44d, Stride: 0x1},
5594 unicode.Range32{Lo: 0x1d468, Hi: 0x1d481, Stride: 0x1},
5595 unicode.Range32{Lo: 0x1d49c, Hi: 0x1d49c, Stride: 0x1},
5596 unicode.Range32{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 0x1},
5597 unicode.Range32{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 0x1},
5598 unicode.Range32{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 0x1},
5599 unicode.Range32{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 0x1},
5600 unicode.Range32{Lo: 0x1d4ae, Hi: 0x1d4b5, Stride: 0x1},
5601 unicode.Range32{Lo: 0x1d4d0, Hi: 0x1d4e9, Stride: 0x1},
5602 unicode.Range32{Lo: 0x1d504, Hi: 0x1d505, Stride: 0x1},
5603 unicode.Range32{Lo: 0x1d507, Hi: 0x1d50a, Stride: 0x1},
5604 unicode.Range32{Lo: 0x1d50d, Hi: 0x1d514, Stride: 0x1},
5605 unicode.Range32{Lo: 0x1d516, Hi: 0x1d51c, Stride: 0x1},
5606 unicode.Range32{Lo: 0x1d538, Hi: 0x1d539, Stride: 0x1},
5607 unicode.Range32{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 0x1},
5608 unicode.Range32{Lo: 0x1d540, Hi: 0x1d544, Stride: 0x1},
5609 unicode.Range32{Lo: 0x1d546, Hi: 0x1d546, Stride: 0x1},
5610 unicode.Range32{Lo: 0x1d54a, Hi: 0x1d550, Stride: 0x1},
5611 unicode.Range32{Lo: 0x1d56c, Hi: 0x1d585, Stride: 0x1},
5612 unicode.Range32{Lo: 0x1d5a0, Hi: 0x1d5b9, Stride: 0x1},
5613 unicode.Range32{Lo: 0x1d5d4, Hi: 0x1d5ed, Stride: 0x1},
5614 unicode.Range32{Lo: 0x1d608, Hi: 0x1d621, Stride: 0x1},
5615 unicode.Range32{Lo: 0x1d63c, Hi: 0x1d655, Stride: 0x1},
5616 unicode.Range32{Lo: 0x1d670, Hi: 0x1d689, Stride: 0x1},
5617 unicode.Range32{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 0x1},
5618 unicode.Range32{Lo: 0x1d6e2, Hi: 0x1d6fa, Stride: 0x1},
5619 unicode.Range32{Lo: 0x1d71c, Hi: 0x1d734, Stride: 0x1},
5620 unicode.Range32{Lo: 0x1d756, Hi: 0x1d76e, Stride: 0x1},
5621 unicode.Range32{Lo: 0x1d790, Hi: 0x1d7a8, Stride: 0x1},
5622 unicode.Range32{Lo: 0x1d7ca, Hi: 0x1d7ca, Stride: 0x1},
5623 unicode.Range32{Lo: 0x1e900, Hi: 0x1e921, Stride: 0x1},
5624 unicode.Range32{Lo: 0x1f130, Hi: 0x1f149, Stride: 0x1},
5625 unicode.Range32{Lo: 0x1f150, Hi: 0x1f169, Stride: 0x1},
5626 unicode.Range32{Lo: 0x1f170, Hi: 0x1f189, Stride: 0x1},
5627 },
5628 LatinOffset: 3,
5629}
5630
5631type _SentenceRuneRange unicode.RangeTable
5632
5633func _SentenceRuneType(r rune) *_SentenceRuneRange {
5634 switch {
5635 case unicode.Is(_SentenceATerm, r):
5636 return (*_SentenceRuneRange)(_SentenceATerm)
5637 case unicode.Is(_SentenceCR, r):
5638 return (*_SentenceRuneRange)(_SentenceCR)
5639 case unicode.Is(_SentenceClose, r):
5640 return (*_SentenceRuneRange)(_SentenceClose)
5641 case unicode.Is(_SentenceExtend, r):
5642 return (*_SentenceRuneRange)(_SentenceExtend)
5643 case unicode.Is(_SentenceFormat, r):
5644 return (*_SentenceRuneRange)(_SentenceFormat)
5645 case unicode.Is(_SentenceLF, r):
5646 return (*_SentenceRuneRange)(_SentenceLF)
5647 case unicode.Is(_SentenceLower, r):
5648 return (*_SentenceRuneRange)(_SentenceLower)
5649 case unicode.Is(_SentenceNumeric, r):
5650 return (*_SentenceRuneRange)(_SentenceNumeric)
5651 case unicode.Is(_SentenceOLetter, r):
5652 return (*_SentenceRuneRange)(_SentenceOLetter)
5653 case unicode.Is(_SentenceSContinue, r):
5654 return (*_SentenceRuneRange)(_SentenceSContinue)
5655 case unicode.Is(_SentenceSTerm, r):
5656 return (*_SentenceRuneRange)(_SentenceSTerm)
5657 case unicode.Is(_SentenceSep, r):
5658 return (*_SentenceRuneRange)(_SentenceSep)
5659 case unicode.Is(_SentenceSp, r):
5660 return (*_SentenceRuneRange)(_SentenceSp)
5661 case unicode.Is(_SentenceUpper, r):
5662 return (*_SentenceRuneRange)(_SentenceUpper)
5663 default:
5664 return nil
5665 }
5666}
5667func (rng *_SentenceRuneRange) String() string {
5668 switch (*unicode.RangeTable)(rng) {
5669 case _SentenceATerm:
5670 return "ATerm"
5671 case _SentenceCR:
5672 return "CR"
5673 case _SentenceClose:
5674 return "Close"
5675 case _SentenceExtend:
5676 return "Extend"
5677 case _SentenceFormat:
5678 return "Format"
5679 case _SentenceLF:
5680 return "LF"
5681 case _SentenceLower:
5682 return "Lower"
5683 case _SentenceNumeric:
5684 return "Numeric"
5685 case _SentenceOLetter:
5686 return "OLetter"
5687 case _SentenceSContinue:
5688 return "SContinue"
5689 case _SentenceSTerm:
5690 return "STerm"
5691 case _SentenceSep:
5692 return "Sep"
5693 case _SentenceSp:
5694 return "Sp"
5695 case _SentenceUpper:
5696 return "Upper"
5697 default:
5698 return "Other"
5699 }
5700}
diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb b/vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb
new file mode 100644
index 0000000..422e4e5
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb
@@ -0,0 +1,335 @@
1#!/usr/bin/env ruby
2#
3# This scripted has been updated to accept more command-line arguments:
4#
5# -u, --url URL to process
6# -m, --machine Machine name
7# -p, --properties Properties to add to the machine
8# -o, --output Write output to file
9#
10# Updated by: Marty Schoch <marty.schoch@gmail.com>
11#
12# This script uses the unicode spec to generate a Ragel state machine
13# that recognizes unicode alphanumeric characters. It generates 5
14# character classes: uupper, ulower, ualpha, udigit, and ualnum.
15# Currently supported encodings are UTF-8 [default] and UCS-4.
16#
17# Usage: unicode2ragel.rb [options]
18# -e, --encoding [ucs4 | utf8] Data encoding
19# -h, --help Show this message
20#
21# This script was originally written as part of the Ferret search
22# engine library.
23#
24# Author: Rakan El-Khalil <rakan@well.com>
25
26require 'optparse'
27require 'open-uri'
28
29ENCODINGS = [ :utf8, :ucs4 ]
30ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
31DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
32DEFAULT_MACHINE_NAME= "WChar"
33
34###
35# Display vars & default option
36
37TOTAL_WIDTH = 80
38RANGE_WIDTH = 23
39@encoding = :utf8
40@chart_url = DEFAULT_CHART_URL
41machine_name = DEFAULT_MACHINE_NAME
42properties = []
43@output = $stdout
44
45###
46# Option parsing
47
48cli_opts = OptionParser.new do |opts|
49 opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
50 @encoding = o.downcase.to_sym
51 end
52 opts.on("-h", "--help", "Show this message") do
53 puts opts
54 exit
55 end
56 opts.on("-u", "--url URL", "URL to process") do |o|
57 @chart_url = o
58 end
59 opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
60 machine_name = o
61 end
62 opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
63 properties = o
64 end
65 opts.on("-o", "--output FILE", "output file") do |o|
66 @output = File.new(o, "w+")
67 end
68end
69
70cli_opts.parse(ARGV)
71unless ENCODINGS.member? @encoding
72 puts "Invalid encoding: #{@encoding}"
73 puts cli_opts
74 exit
75end
76
77##
78# Downloads the document at url and yields every alpha line's hex
79# range and description.
80
81def each_alpha( url, property )
82 open( url ) do |file|
83 file.each_line do |line|
84 next if line =~ /^#/;
85 next if line !~ /; #{property} #/;
86
87 range, description = line.split(/;/)
88 range.strip!
89 description.gsub!(/.*#/, '').strip!
90
91 if range =~ /\.\./
92 start, stop = range.split '..'
93 else start = stop = range
94 end
95
96 yield start.hex .. stop.hex, description
97 end
98 end
99end
100
101###
102# Formats to hex at minimum width
103
104def to_hex( n )
105 r = "%0X" % n
106 r = "0#{r}" unless (r.length % 2).zero?
107 r
108end
109
110###
111# UCS4 is just a straight hex conversion of the unicode codepoint.
112
113def to_ucs4( range )
114 rangestr = "0x" + to_hex(range.begin)
115 rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
116 [ rangestr ]
117end
118
119##
120# 0x00 - 0x7f -> 0zzzzzzz[7]
121# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
122# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
123# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
124
125UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
126
127def to_utf8_enc( n )
128 r = 0
129 if n <= 0x7f
130 r = n
131 elsif n <= 0x7ff
132 y = 0xc0 | (n >> 6)
133 z = 0x80 | (n & 0x3f)
134 r = y << 8 | z
135 elsif n <= 0xffff
136 x = 0xe0 | (n >> 12)
137 y = 0x80 | (n >> 6) & 0x3f
138 z = 0x80 | n & 0x3f
139 r = x << 16 | y << 8 | z
140 elsif n <= 0x10ffff
141 w = 0xf0 | (n >> 18)
142 x = 0x80 | (n >> 12) & 0x3f
143 y = 0x80 | (n >> 6) & 0x3f
144 z = 0x80 | n & 0x3f
145 r = w << 24 | x << 16 | y << 8 | z
146 end
147
148 to_hex(r)
149end
150
151def from_utf8_enc( n )
152 n = n.hex
153 r = 0
154 if n <= 0x7f
155 r = n
156 elsif n <= 0xdfff
157 y = (n >> 8) & 0x1f
158 z = n & 0x3f
159 r = y << 6 | z
160 elsif n <= 0xefffff
161 x = (n >> 16) & 0x0f
162 y = (n >> 8) & 0x3f
163 z = n & 0x3f
164 r = x << 10 | y << 6 | z
165 elsif n <= 0xf7ffffff
166 w = (n >> 24) & 0x07
167 x = (n >> 16) & 0x3f
168 y = (n >> 8) & 0x3f
169 z = n & 0x3f
170 r = w << 18 | x << 12 | y << 6 | z
171 end
172 r
173end
174
175###
176# Given a range, splits it up into ranges that can be continuously
177# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
178# This is not strictly needed since the current [5.1] unicode standard
179# doesn't have ranges that straddle utf8 boundaries. This is included
180# for completeness as there is no telling if that will ever change.
181
182def utf8_ranges( range )
183 ranges = []
184 UTF8_BOUNDARIES.each do |max|
185 if range.begin <= max
186 if range.end <= max
187 ranges << range
188 return ranges
189 end
190
191 ranges << (range.begin .. max)
192 range = (max + 1) .. range.end
193 end
194 end
195 ranges
196end
197
198def build_range( start, stop )
199 size = start.size/2
200 left = size - 1
201 return [""] if size < 1
202
203 a = start[0..1]
204 b = stop[0..1]
205
206 ###
207 # Shared prefix
208
209 if a == b
210 return build_range(start[2..-1], stop[2..-1]).map do |elt|
211 "0x#{a} " + elt
212 end
213 end
214
215 ###
216 # Unshared prefix, end of run
217
218 return ["0x#{a}..0x#{b} "] if left.zero?
219
220 ###
221 # Unshared prefix, not end of run
222 # Range can be 0x123456..0x56789A
223 # Which is equivalent to:
224 # 0x123456 .. 0x12FFFF
225 # 0x130000 .. 0x55FFFF
226 # 0x560000 .. 0x56789A
227
228 ret = []
229 ret << build_range(start, a + "FF" * left)
230
231 ###
232 # Only generate middle range if need be.
233
234 if a.hex+1 != b.hex
235 max = to_hex(b.hex - 1)
236 max = "FF" if b == "FF"
237 ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
238 end
239
240 ###
241 # Don't generate last range if it is covered by first range
242
243 ret << build_range(b + "00" * left, stop) unless b == "FF"
244 ret.flatten!
245end
246
247def to_utf8( range )
248 utf8_ranges( range ).map do |r|
249 begin_enc = to_utf8_enc(r.begin)
250 end_enc = to_utf8_enc(r.end)
251 build_range begin_enc, end_enc
252 end.flatten!
253end
254
255##
256# Perform a 3-way comparison of the number of codepoints advertised by
257# the unicode spec for the given range, the originally parsed range,
258# and the resulting utf8 encoded range.
259
260def count_codepoints( code )
261 code.split(' ').inject(1) do |acc, elt|
262 if elt =~ /0x(.+)\.\.0x(.+)/
263 if @encoding == :utf8
264 acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
265 else
266 acc * ($2.hex - $1.hex + 1)
267 end
268 else
269 acc
270 end
271 end
272end
273
274def is_valid?( range, desc, codes )
275 spec_count = 1
276 spec_count = $1.to_i if desc =~ /\[(\d+)\]/
277 range_count = range.end - range.begin + 1
278
279 sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
280 sum == spec_count and sum == range_count
281end
282
283##
284# Generate the state maching to stdout
285
286def generate_machine( name, property )
287 pipe = " "
288 @output.puts " #{name} = "
289 each_alpha( @chart_url, property ) do |range, desc|
290
291 codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
292
293 #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
294 # is_valid? range, desc, codes
295
296 range_width = codes.map { |a| a.size }.max
297 range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
298
299 desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
300 desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
301
302 if desc.size > desc_width
303 desc = desc[0..desc_width - 4] + "..."
304 end
305
306 codes.each_with_index do |r, idx|
307 desc = "" unless idx.zero?
308 code = "%-#{range_width}s" % r
309 @output.puts " #{pipe} #{code} ##{desc}"
310 pipe = "|"
311 end
312 end
313 @output.puts " ;"
314 @output.puts ""
315end
316
317@output.puts <<EOF
318# The following Ragel file was autogenerated with #{$0}
319# from: #{@chart_url}
320#
321# It defines #{properties}.
322#
323# To use this, make sure that your alphtype is set to #{ALPHTYPES[@encoding]},
324# and that your input is in #{@encoding}.
325
326%%{
327 machine #{machine_name};
328
329EOF
330
331properties.each { |x| generate_machine( x, x ) }
332
333@output.puts <<EOF
334}%%
335EOF
diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go b/vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go
new file mode 100644
index 0000000..6b14bef
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go
@@ -0,0 +1,19 @@
1package textseg
2
3import "unicode/utf8"
4
5// ScanGraphemeClusters is a split function for bufio.Scanner that splits
6// on UTF8 sequence boundaries.
7//
8// This is included largely for completeness, since this behavior is already
9// built in to Go when ranging over a string.
10func ScanUTF8Sequences(data []byte, atEOF bool) (int, []byte, error) {
11 if len(data) == 0 {
12 return 0, nil, nil
13 }
14 r, seqLen := utf8.DecodeRune(data)
15 if r == utf8.RuneError && !atEOF {
16 return 0, nil, nil
17 }
18 return seqLen, data[:seqLen], nil
19}
diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/armon/go-radix/.gitignore
new file mode 100644
index 0000000..0026861
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/.gitignore
@@ -0,0 +1,22 @@
1# Compiled Object files, Static and Dynamic libs (Shared Objects)
2*.o
3*.a
4*.so
5
6# Folders
7_obj
8_test
9
10# Architecture specific extensions/prefixes
11*.[568vq]
12[568vq].out
13
14*.cgo1.go
15*.cgo2.c
16_cgo_defun.c
17_cgo_gotypes.go
18_cgo_export.*
19
20_testmain.go
21
22*.exe
diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml
new file mode 100644
index 0000000..1a0bbea
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/.travis.yml
@@ -0,0 +1,3 @@
1language: go
2go:
3 - tip
diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE
new file mode 100644
index 0000000..a5df10e
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/LICENSE
@@ -0,0 +1,20 @@
1The MIT License (MIT)
2
3Copyright (c) 2014 Armon Dadgar
4
5Permission is hereby granted, free of charge, to any person obtaining a copy of
6this software and associated documentation files (the "Software"), to deal in
7the Software without restriction, including without limitation the rights to
8use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9the Software, and to permit persons to whom the Software is furnished to do so,
10subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in all
13copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md
new file mode 100644
index 0000000..26f42a2
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/README.md
@@ -0,0 +1,38 @@
1go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix)
2=========
3
4Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
5The package only provides a single `Tree` implementation, optimized for sparse nodes.
6
7As a radix tree, it provides the following:
8 * O(k) operations. In many cases, this can be faster than a hash table since
9 the hash function is an O(k) operation, and hash tables have very poor cache locality.
10 * Minimum / Maximum value lookups
11 * Ordered iteration
12
13For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
14
15Documentation
16=============
17
18The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix).
19
20Example
21=======
22
23Below is a simple example of usage
24
25```go
26// Create a tree
27r := radix.New()
28r.Insert("foo", 1)
29r.Insert("bar", 2)
30r.Insert("foobar", 2)
31
32// Find the longest prefix match
33m, _, _ := r.LongestPrefix("foozip")
34if m != "foo" {
35 panic("should be foo")
36}
37```
38
diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go
new file mode 100644
index 0000000..d2914c1
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/radix.go
@@ -0,0 +1,496 @@
1package radix
2
3import (
4 "sort"
5 "strings"
6)
7
8// WalkFn is used when walking the tree. Takes a
9// key and value, returning if iteration should
10// be terminated.
11type WalkFn func(s string, v interface{}) bool
12
13// leafNode is used to represent a value
14type leafNode struct {
15 key string
16 val interface{}
17}
18
19// edge is used to represent an edge node
20type edge struct {
21 label byte
22 node *node
23}
24
25type node struct {
26 // leaf is used to store possible leaf
27 leaf *leafNode
28
29 // prefix is the common prefix we ignore
30 prefix string
31
32 // Edges should be stored in-order for iteration.
33 // We avoid a fully materialized slice to save memory,
34 // since in most cases we expect to be sparse
35 edges edges
36}
37
38func (n *node) isLeaf() bool {
39 return n.leaf != nil
40}
41
42func (n *node) addEdge(e edge) {
43 n.edges = append(n.edges, e)
44 n.edges.Sort()
45}
46
47func (n *node) replaceEdge(e edge) {
48 num := len(n.edges)
49 idx := sort.Search(num, func(i int) bool {
50 return n.edges[i].label >= e.label
51 })
52 if idx < num && n.edges[idx].label == e.label {
53 n.edges[idx].node = e.node
54 return
55 }
56 panic("replacing missing edge")
57}
58
59func (n *node) getEdge(label byte) *node {
60 num := len(n.edges)
61 idx := sort.Search(num, func(i int) bool {
62 return n.edges[i].label >= label
63 })
64 if idx < num && n.edges[idx].label == label {
65 return n.edges[idx].node
66 }
67 return nil
68}
69
70func (n *node) delEdge(label byte) {
71 num := len(n.edges)
72 idx := sort.Search(num, func(i int) bool {
73 return n.edges[i].label >= label
74 })
75 if idx < num && n.edges[idx].label == label {
76 copy(n.edges[idx:], n.edges[idx+1:])
77 n.edges[len(n.edges)-1] = edge{}
78 n.edges = n.edges[:len(n.edges)-1]
79 }
80}
81
82type edges []edge
83
84func (e edges) Len() int {
85 return len(e)
86}
87
88func (e edges) Less(i, j int) bool {
89 return e[i].label < e[j].label
90}
91
92func (e edges) Swap(i, j int) {
93 e[i], e[j] = e[j], e[i]
94}
95
96func (e edges) Sort() {
97 sort.Sort(e)
98}
99
100// Tree implements a radix tree. This can be treated as a
101// Dictionary abstract data type. The main advantage over
102// a standard hash map is prefix-based lookups and
103// ordered iteration,
104type Tree struct {
105 root *node
106 size int
107}
108
109// New returns an empty Tree
110func New() *Tree {
111 return NewFromMap(nil)
112}
113
114// NewFromMap returns a new tree containing the keys
115// from an existing map
116func NewFromMap(m map[string]interface{}) *Tree {
117 t := &Tree{root: &node{}}
118 for k, v := range m {
119 t.Insert(k, v)
120 }
121 return t
122}
123
124// Len is used to return the number of elements in the tree
125func (t *Tree) Len() int {
126 return t.size
127}
128
129// longestPrefix finds the length of the shared prefix
130// of two strings
131func longestPrefix(k1, k2 string) int {
132 max := len(k1)
133 if l := len(k2); l < max {
134 max = l
135 }
136 var i int
137 for i = 0; i < max; i++ {
138 if k1[i] != k2[i] {
139 break
140 }
141 }
142 return i
143}
144
145// Insert is used to add a newentry or update
146// an existing entry. Returns if updated.
147func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) {
148 var parent *node
149 n := t.root
150 search := s
151 for {
152 // Handle key exhaution
153 if len(search) == 0 {
154 if n.isLeaf() {
155 old := n.leaf.val
156 n.leaf.val = v
157 return old, true
158 }
159
160 n.leaf = &leafNode{
161 key: s,
162 val: v,
163 }
164 t.size++
165 return nil, false
166 }
167
168 // Look for the edge
169 parent = n
170 n = n.getEdge(search[0])
171
172 // No edge, create one
173 if n == nil {
174 e := edge{
175 label: search[0],
176 node: &node{
177 leaf: &leafNode{
178 key: s,
179 val: v,
180 },
181 prefix: search,
182 },
183 }
184 parent.addEdge(e)
185 t.size++
186 return nil, false
187 }
188
189 // Determine longest prefix of the search key on match
190 commonPrefix := longestPrefix(search, n.prefix)
191 if commonPrefix == len(n.prefix) {
192 search = search[commonPrefix:]
193 continue
194 }
195
196 // Split the node
197 t.size++
198 child := &node{
199 prefix: search[:commonPrefix],
200 }
201 parent.replaceEdge(edge{
202 label: search[0],
203 node: child,
204 })
205
206 // Restore the existing node
207 child.addEdge(edge{
208 label: n.prefix[commonPrefix],
209 node: n,
210 })
211 n.prefix = n.prefix[commonPrefix:]
212
213 // Create a new leaf node
214 leaf := &leafNode{
215 key: s,
216 val: v,
217 }
218
219 // If the new key is a subset, add to to this node
220 search = search[commonPrefix:]
221 if len(search) == 0 {
222 child.leaf = leaf
223 return nil, false
224 }
225
226 // Create a new edge for the node
227 child.addEdge(edge{
228 label: search[0],
229 node: &node{
230 leaf: leaf,
231 prefix: search,
232 },
233 })
234 return nil, false
235 }
236}
237
238// Delete is used to delete a key, returning the previous
239// value and if it was deleted
240func (t *Tree) Delete(s string) (interface{}, bool) {
241 var parent *node
242 var label byte
243 n := t.root
244 search := s
245 for {
246 // Check for key exhaution
247 if len(search) == 0 {
248 if !n.isLeaf() {
249 break
250 }
251 goto DELETE
252 }
253
254 // Look for an edge
255 parent = n
256 label = search[0]
257 n = n.getEdge(label)
258 if n == nil {
259 break
260 }
261
262 // Consume the search prefix
263 if strings.HasPrefix(search, n.prefix) {
264 search = search[len(n.prefix):]
265 } else {
266 break
267 }
268 }
269 return nil, false
270
271DELETE:
272 // Delete the leaf
273 leaf := n.leaf
274 n.leaf = nil
275 t.size--
276
277 // Check if we should delete this node from the parent
278 if parent != nil && len(n.edges) == 0 {
279 parent.delEdge(label)
280 }
281
282 // Check if we should merge this node
283 if n != t.root && len(n.edges) == 1 {
284 n.mergeChild()
285 }
286
287 // Check if we should merge the parent's other child
288 if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() {
289 parent.mergeChild()
290 }
291
292 return leaf.val, true
293}
294
295func (n *node) mergeChild() {
296 e := n.edges[0]
297 child := e.node
298 n.prefix = n.prefix + child.prefix
299 n.leaf = child.leaf
300 n.edges = child.edges
301}
302
303// Get is used to lookup a specific key, returning
304// the value and if it was found
305func (t *Tree) Get(s string) (interface{}, bool) {
306 n := t.root
307 search := s
308 for {
309 // Check for key exhaution
310 if len(search) == 0 {
311 if n.isLeaf() {
312 return n.leaf.val, true
313 }
314 break
315 }
316
317 // Look for an edge
318 n = n.getEdge(search[0])
319 if n == nil {
320 break
321 }
322
323 // Consume the search prefix
324 if strings.HasPrefix(search, n.prefix) {
325 search = search[len(n.prefix):]
326 } else {
327 break
328 }
329 }
330 return nil, false
331}
332
333// LongestPrefix is like Get, but instead of an
334// exact match, it will return the longest prefix match.
335func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) {
336 var last *leafNode
337 n := t.root
338 search := s
339 for {
340 // Look for a leaf node
341 if n.isLeaf() {
342 last = n.leaf
343 }
344
345 // Check for key exhaution
346 if len(search) == 0 {
347 break
348 }
349
350 // Look for an edge
351 n = n.getEdge(search[0])
352 if n == nil {
353 break
354 }
355
356 // Consume the search prefix
357 if strings.HasPrefix(search, n.prefix) {
358 search = search[len(n.prefix):]
359 } else {
360 break
361 }
362 }
363 if last != nil {
364 return last.key, last.val, true
365 }
366 return "", nil, false
367}
368
369// Minimum is used to return the minimum value in the tree
370func (t *Tree) Minimum() (string, interface{}, bool) {
371 n := t.root
372 for {
373 if n.isLeaf() {
374 return n.leaf.key, n.leaf.val, true
375 }
376 if len(n.edges) > 0 {
377 n = n.edges[0].node
378 } else {
379 break
380 }
381 }
382 return "", nil, false
383}
384
385// Maximum is used to return the maximum value in the tree
386func (t *Tree) Maximum() (string, interface{}, bool) {
387 n := t.root
388 for {
389 if num := len(n.edges); num > 0 {
390 n = n.edges[num-1].node
391 continue
392 }
393 if n.isLeaf() {
394 return n.leaf.key, n.leaf.val, true
395 }
396 break
397 }
398 return "", nil, false
399}
400
401// Walk is used to walk the tree
402func (t *Tree) Walk(fn WalkFn) {
403 recursiveWalk(t.root, fn)
404}
405
406// WalkPrefix is used to walk the tree under a prefix
407func (t *Tree) WalkPrefix(prefix string, fn WalkFn) {
408 n := t.root
409 search := prefix
410 for {
411 // Check for key exhaution
412 if len(search) == 0 {
413 recursiveWalk(n, fn)
414 return
415 }
416
417 // Look for an edge
418 n = n.getEdge(search[0])
419 if n == nil {
420 break
421 }
422
423 // Consume the search prefix
424 if strings.HasPrefix(search, n.prefix) {
425 search = search[len(n.prefix):]
426
427 } else if strings.HasPrefix(n.prefix, search) {
428 // Child may be under our search prefix
429 recursiveWalk(n, fn)
430 return
431 } else {
432 break
433 }
434 }
435
436}
437
438// WalkPath is used to walk the tree, but only visiting nodes
439// from the root down to a given leaf. Where WalkPrefix walks
440// all the entries *under* the given prefix, this walks the
441// entries *above* the given prefix.
442func (t *Tree) WalkPath(path string, fn WalkFn) {
443 n := t.root
444 search := path
445 for {
446 // Visit the leaf values if any
447 if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
448 return
449 }
450
451 // Check for key exhaution
452 if len(search) == 0 {
453 return
454 }
455
456 // Look for an edge
457 n = n.getEdge(search[0])
458 if n == nil {
459 return
460 }
461
462 // Consume the search prefix
463 if strings.HasPrefix(search, n.prefix) {
464 search = search[len(n.prefix):]
465 } else {
466 break
467 }
468 }
469}
470
471// recursiveWalk is used to do a pre-order walk of a node
472// recursively. Returns true if the walk should be aborted
473func recursiveWalk(n *node, fn WalkFn) bool {
474 // Visit the leaf values if any
475 if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
476 return true
477 }
478
479 // Recurse on the children
480 for _, e := range n.edges {
481 if recursiveWalk(e.node, fn) {
482 return true
483 }
484 }
485 return false
486}
487
488// ToMap is used to walk the tree and convert it into a map
489func (t *Tree) ToMap() map[string]interface{} {
490 out := make(map[string]interface{}, t.size)
491 t.Walk(func(k string, v interface{}) bool {
492 out[k] = v
493 return false
494 })
495 return out
496}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
index 788fe6e..212fe25 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -15,6 +15,12 @@ type Config struct {
15 Endpoint string 15 Endpoint string
16 SigningRegion string 16 SigningRegion string
17 SigningName string 17 SigningName string
18
19 // States that the signing name did not come from a modeled source but
20 // was derived based on other data. Used by service client constructors
21 // to determine if the signin name can be overriden based on metadata the
22 // service has.
23 SigningNameDerived bool
18} 24}
19 25
20// ConfigProvider provides a generic way for a service client to receive 26// ConfigProvider provides a generic way for a service client to receive
@@ -85,6 +91,6 @@ func (c *Client) AddDebugHandlers() {
85 return 91 return
86 } 92 }
87 93
88 c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) 94 c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
89 c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) 95 c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
90} 96}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
index 1313478..a397b0d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -1,11 +1,11 @@
1package client 1package client
2 2
3import ( 3import (
4 "math/rand" 4 "strconv"
5 "sync"
6 "time" 5 "time"
7 6
8 "github.com/aws/aws-sdk-go/aws/request" 7 "github.com/aws/aws-sdk-go/aws/request"
8 "github.com/aws/aws-sdk-go/internal/sdkrand"
9) 9)
10 10
11// DefaultRetryer implements basic retry logic using exponential backoff for 11// DefaultRetryer implements basic retry logic using exponential backoff for
@@ -15,11 +15,11 @@ import (
15// the MaxRetries method: 15// the MaxRetries method:
16// 16//
17// type retryer struct { 17// type retryer struct {
18// service.DefaultRetryer 18// client.DefaultRetryer
19// } 19// }
20// 20//
21// // This implementation always has 100 max retries 21// // This implementation always has 100 max retries
22// func (d retryer) MaxRetries() uint { return 100 } 22// func (d retryer) MaxRetries() int { return 100 }
23type DefaultRetryer struct { 23type DefaultRetryer struct {
24 NumMaxRetries int 24 NumMaxRetries int
25} 25}
@@ -30,25 +30,27 @@ func (d DefaultRetryer) MaxRetries() int {
30 return d.NumMaxRetries 30 return d.NumMaxRetries
31} 31}
32 32
33var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
34
35// RetryRules returns the delay duration before retrying this request again 33// RetryRules returns the delay duration before retrying this request again
36func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { 34func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
37 // Set the upper limit of delay in retrying at ~five minutes 35 // Set the upper limit of delay in retrying at ~five minutes
38 minTime := 30 36 minTime := 30
39 throttle := d.shouldThrottle(r) 37 throttle := d.shouldThrottle(r)
40 if throttle { 38 if throttle {
39 if delay, ok := getRetryDelay(r); ok {
40 return delay
41 }
42
41 minTime = 500 43 minTime = 500
42 } 44 }
43 45
44 retryCount := r.RetryCount 46 retryCount := r.RetryCount
45 if retryCount > 13 { 47 if throttle && retryCount > 8 {
46 retryCount = 13
47 } else if throttle && retryCount > 8 {
48 retryCount = 8 48 retryCount = 8
49 } else if retryCount > 13 {
50 retryCount = 13
49 } 51 }
50 52
51 delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) 53 delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
52 return time.Duration(delay) * time.Millisecond 54 return time.Duration(delay) * time.Millisecond
53} 55}
54 56
@@ -60,7 +62,7 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
60 return *r.Retryable 62 return *r.Retryable
61 } 63 }
62 64
63 if r.HTTPResponse.StatusCode >= 500 { 65 if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
64 return true 66 return true
65 } 67 }
66 return r.IsErrorRetryable() || d.shouldThrottle(r) 68 return r.IsErrorRetryable() || d.shouldThrottle(r)
@@ -68,29 +70,47 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
68 70
69// ShouldThrottle returns true if the request should be throttled. 71// ShouldThrottle returns true if the request should be throttled.
70func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { 72func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
71 if r.HTTPResponse.StatusCode == 502 || 73 switch r.HTTPResponse.StatusCode {
72 r.HTTPResponse.StatusCode == 503 || 74 case 429:
73 r.HTTPResponse.StatusCode == 504 { 75 case 502:
74 return true 76 case 503:
77 case 504:
78 default:
79 return r.IsErrorThrottle()
75 } 80 }
76 return r.IsErrorThrottle()
77}
78 81
79// lockedSource is a thread-safe implementation of rand.Source 82 return true
80type lockedSource struct {
81 lk sync.Mutex
82 src rand.Source
83} 83}
84 84
85func (r *lockedSource) Int63() (n int64) { 85// This will look in the Retry-After header, RFC 7231, for how long
86 r.lk.Lock() 86// it will wait before attempting another request
87 n = r.src.Int63() 87func getRetryDelay(r *request.Request) (time.Duration, bool) {
88 r.lk.Unlock() 88 if !canUseRetryAfterHeader(r) {
89 return 89 return 0, false
90 }
91
92 delayStr := r.HTTPResponse.Header.Get("Retry-After")
93 if len(delayStr) == 0 {
94 return 0, false
95 }
96
97 delay, err := strconv.Atoi(delayStr)
98 if err != nil {
99 return 0, false
100 }
101
102 return time.Duration(delay) * time.Second, true
90} 103}
91 104
92func (r *lockedSource) Seed(seed int64) { 105// Will look at the status code to see if the retry header pertains to
93 r.lk.Lock() 106// the status code.
94 r.src.Seed(seed) 107func canUseRetryAfterHeader(r *request.Request) bool {
95 r.lk.Unlock() 108 switch r.HTTPResponse.StatusCode {
109 case 429:
110 case 503:
111 default:
112 return false
113 }
114
115 return true
96} 116}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
index 1f39c91..ce9fb89 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
@@ -44,22 +44,57 @@ func (reader *teeReaderCloser) Close() error {
44 return reader.Source.Close() 44 return reader.Source.Close()
45} 45}
46 46
47// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
48// to a service. Will include the HTTP request body if the LogLevel of the
49// request matches LogDebugWithHTTPBody.
50var LogHTTPRequestHandler = request.NamedHandler{
51 Name: "awssdk.client.LogRequest",
52 Fn: logRequest,
53}
54
47func logRequest(r *request.Request) { 55func logRequest(r *request.Request) {
48 logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) 56 logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
49 dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) 57 bodySeekable := aws.IsReaderSeekable(r.Body)
58
59 b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
50 if err != nil { 60 if err != nil {
51 r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) 61 r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
62 r.ClientInfo.ServiceName, r.Operation.Name, err))
52 return 63 return
53 } 64 }
54 65
55 if logBody { 66 if logBody {
67 if !bodySeekable {
68 r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
69 }
56 // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's 70 // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
57 // Body as a NoOpCloser and will not be reset after read by the HTTP 71 // Body as a NoOpCloser and will not be reset after read by the HTTP
58 // client reader. 72 // client reader.
59 r.ResetBody() 73 r.ResetBody()
60 } 74 }
61 75
62 r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) 76 r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
77 r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
78}
79
80// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
81// to a service. Will only log the HTTP request's headers. The request payload
82// will not be read.
83var LogHTTPRequestHeaderHandler = request.NamedHandler{
84 Name: "awssdk.client.LogRequestHeader",
85 Fn: logRequestHeader,
86}
87
88func logRequestHeader(r *request.Request) {
89 b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
90 if err != nil {
91 r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
92 r.ClientInfo.ServiceName, r.Operation.Name, err))
93 return
94 }
95
96 r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
97 r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
63} 98}
64 99
65const logRespMsg = `DEBUG: Response %s/%s Details: 100const logRespMsg = `DEBUG: Response %s/%s Details:
@@ -72,27 +107,44 @@ const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
72%s 107%s
73-----------------------------------------------------` 108-----------------------------------------------------`
74 109
110// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
111// received from a service. Will include the HTTP response body if the LogLevel
112// of the request matches LogDebugWithHTTPBody.
113var LogHTTPResponseHandler = request.NamedHandler{
114 Name: "awssdk.client.LogResponse",
115 Fn: logResponse,
116}
117
75func logResponse(r *request.Request) { 118func logResponse(r *request.Request) {
76 lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} 119 lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
77 r.HTTPResponse.Body = &teeReaderCloser{ 120
78 Reader: io.TeeReader(r.HTTPResponse.Body, lw), 121 logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
79 Source: r.HTTPResponse.Body, 122 if logBody {
123 r.HTTPResponse.Body = &teeReaderCloser{
124 Reader: io.TeeReader(r.HTTPResponse.Body, lw),
125 Source: r.HTTPResponse.Body,
126 }
80 } 127 }
81 128
82 handlerFn := func(req *request.Request) { 129 handlerFn := func(req *request.Request) {
83 body, err := httputil.DumpResponse(req.HTTPResponse, false) 130 b, err := httputil.DumpResponse(req.HTTPResponse, false)
84 if err != nil { 131 if err != nil {
85 lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) 132 lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
133 req.ClientInfo.ServiceName, req.Operation.Name, err))
86 return 134 return
87 } 135 }
88 136
89 b, err := ioutil.ReadAll(lw.buf) 137 lw.Logger.Log(fmt.Sprintf(logRespMsg,
90 if err != nil { 138 req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
91 lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) 139
92 return 140 if logBody {
93 } 141 b, err := ioutil.ReadAll(lw.buf)
94 lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body))) 142 if err != nil {
95 if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) { 143 lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
144 req.ClientInfo.ServiceName, req.Operation.Name, err))
145 return
146 }
147
96 lw.Logger.Log(string(b)) 148 lw.Logger.Log(string(b))
97 } 149 }
98 } 150 }
@@ -106,3 +158,27 @@ func logResponse(r *request.Request) {
106 Name: handlerName, Fn: handlerFn, 158 Name: handlerName, Fn: handlerFn,
107 }) 159 })
108} 160}
161
162// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
163// response received from a service. Will only log the HTTP response's headers.
164// The response payload will not be read.
165var LogHTTPResponseHeaderHandler = request.NamedHandler{
166 Name: "awssdk.client.LogResponseHeader",
167 Fn: logResponseHeader,
168}
169
170func logResponseHeader(r *request.Request) {
171 if r.Config.Logger == nil {
172 return
173 }
174
175 b, err := httputil.DumpResponse(r.HTTPResponse, false)
176 if err != nil {
177 r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
178 r.ClientInfo.ServiceName, r.Operation.Name, err))
179 return
180 }
181
182 r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
183 r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
184}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
index 4778056..920e9fd 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
@@ -3,6 +3,7 @@ package metadata
3// ClientInfo wraps immutable data from the client.Client structure. 3// ClientInfo wraps immutable data from the client.Client structure.
4type ClientInfo struct { 4type ClientInfo struct {
5 ServiceName string 5 ServiceName string
6 ServiceID string
6 APIVersion string 7 APIVersion string
7 Endpoint string 8 Endpoint string
8 SigningName string 9 SigningName string
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
index d1f31f1..5421b5d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -95,7 +95,7 @@ type Config struct {
95 // recoverable failures. 95 // recoverable failures.
96 // 96 //
97 // When nil or the value does not implement the request.Retryer interface, 97 // When nil or the value does not implement the request.Retryer interface,
98 // the request.DefaultRetryer will be used. 98 // the client.DefaultRetryer will be used.
99 // 99 //
100 // When both Retryer and MaxRetries are non-nil, the former is used and 100 // When both Retryer and MaxRetries are non-nil, the former is used and
101 // the latter ignored. 101 // the latter ignored.
@@ -151,6 +151,15 @@ type Config struct {
151 // with accelerate. 151 // with accelerate.
152 S3UseAccelerate *bool 152 S3UseAccelerate *bool
153 153
154 // S3DisableContentMD5Validation config option is temporarily disabled,
155 // For S3 GetObject API calls, #1837.
156 //
157 // Set this to `true` to disable the S3 service client from automatically
158 // adding the ContentMD5 to S3 Object Put and Upload API calls. This option
159 // will also disable the SDK from performing object ContentMD5 validation
160 // on GetObject API calls.
161 S3DisableContentMD5Validation *bool
162
154 // Set this to `true` to disable the EC2Metadata client from overriding the 163 // Set this to `true` to disable the EC2Metadata client from overriding the
155 // default http.Client's Timeout. This is helpful if you do not want the 164 // default http.Client's Timeout. This is helpful if you do not want the
156 // EC2Metadata client to create a new http.Client. This options is only 165 // EC2Metadata client to create a new http.Client. This options is only
@@ -168,7 +177,7 @@ type Config struct {
168 // 177 //
169 EC2MetadataDisableTimeoutOverride *bool 178 EC2MetadataDisableTimeoutOverride *bool
170 179
171 // Instructs the endpiont to be generated for a service client to 180 // Instructs the endpoint to be generated for a service client to
172 // be the dual stack endpoint. The dual stack endpoint will support 181 // be the dual stack endpoint. The dual stack endpoint will support
173 // both IPv4 and IPv6 addressing. 182 // both IPv4 and IPv6 addressing.
174 // 183 //
@@ -336,6 +345,15 @@ func (c *Config) WithS3Disable100Continue(disable bool) *Config {
336func (c *Config) WithS3UseAccelerate(enable bool) *Config { 345func (c *Config) WithS3UseAccelerate(enable bool) *Config {
337 c.S3UseAccelerate = &enable 346 c.S3UseAccelerate = &enable
338 return c 347 return c
348
349}
350
351// WithS3DisableContentMD5Validation sets a config
352// S3DisableContentMD5Validation value returning a Config pointer for chaining.
353func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
354 c.S3DisableContentMD5Validation = &enable
355 return c
356
339} 357}
340 358
341// WithUseDualStack sets a config UseDualStack value returning a Config 359// WithUseDualStack sets a config UseDualStack value returning a Config
@@ -435,6 +453,10 @@ func mergeInConfig(dst *Config, other *Config) {
435 dst.S3UseAccelerate = other.S3UseAccelerate 453 dst.S3UseAccelerate = other.S3UseAccelerate
436 } 454 }
437 455
456 if other.S3DisableContentMD5Validation != nil {
457 dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
458 }
459
438 if other.UseDualStack != nil { 460 if other.UseDualStack != nil {
439 dst.UseDualStack = other.UseDualStack 461 dst.UseDualStack = other.UseDualStack
440 } 462 }
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
index e8cf93d..8fdda53 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
@@ -4,9 +4,9 @@ package aws
4 4
5import "time" 5import "time"
6 6
7// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This 7// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
8// is copied to provide a 1.6 and 1.5 safe version of context that is compatible 8// provide a 1.6 and 1.5 safe version of context that is compatible with Go
9// with Go 1.7's Context. 9// 1.7's Context.
10// 10//
11// An emptyCtx is never canceled, has no values, and has no deadline. It is not 11// An emptyCtx is never canceled, has no values, and has no deadline. It is not
12// struct{}, since vars of this type must have distinct addresses. 12// struct{}, since vars of this type must have distinct addresses.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
index 3b73a7d..ff5d58e 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
@@ -311,6 +311,24 @@ func TimeValue(v *time.Time) time.Time {
311 return time.Time{} 311 return time.Time{}
312} 312}
313 313
314// SecondsTimeValue converts an int64 pointer to a time.Time value
315// representing seconds since Epoch or time.Time{} if the pointer is nil.
316func SecondsTimeValue(v *int64) time.Time {
317 if v != nil {
318 return time.Unix((*v / 1000), 0)
319 }
320 return time.Time{}
321}
322
323// MillisecondsTimeValue converts an int64 pointer to a time.Time value
324// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
325func MillisecondsTimeValue(v *int64) time.Time {
326 if v != nil {
327 return time.Unix(0, (*v * 1000000))
328 }
329 return time.Time{}
330}
331
314// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". 332// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
315// The result is undefined if the Unix time cannot be represented by an int64. 333// The result is undefined if the Unix time cannot be represented by an int64.
316// Which includes calling TimeUnixMilli on a zero Time is undefined. 334// Which includes calling TimeUnixMilli on a zero Time is undefined.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
index 495e3ef..cfcddf3 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -3,12 +3,10 @@ package corehandlers
3import ( 3import (
4 "bytes" 4 "bytes"
5 "fmt" 5 "fmt"
6 "io"
7 "io/ioutil" 6 "io/ioutil"
8 "net/http" 7 "net/http"
9 "net/url" 8 "net/url"
10 "regexp" 9 "regexp"
11 "runtime"
12 "strconv" 10 "strconv"
13 "time" 11 "time"
14 12
@@ -36,18 +34,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen
36 if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { 34 if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
37 length, _ = strconv.ParseInt(slength, 10, 64) 35 length, _ = strconv.ParseInt(slength, 10, 64)
38 } else { 36 } else {
39 switch body := r.Body.(type) { 37 if r.Body != nil {
40 case nil: 38 var err error
41 length = 0 39 length, err = aws.SeekerLen(r.Body)
42 case lener: 40 if err != nil {
43 length = int64(body.Len()) 41 r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
44 case io.Seeker: 42 return
45 r.BodyStart, _ = body.Seek(0, 1) 43 }
46 end, _ := body.Seek(0, 2)
47 body.Seek(r.BodyStart, 0) // make sure to seek back to original location
48 length = end - r.BodyStart
49 default:
50 panic("Cannot get length of body, must provide `ContentLength`")
51 } 44 }
52 } 45 }
53 46
@@ -60,13 +53,6 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen
60 } 53 }
61}} 54}}
62 55
63// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
64var SDKVersionUserAgentHandler = request.NamedHandler{
65 Name: "core.SDKVersionUserAgentHandler",
66 Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
67 runtime.Version(), runtime.GOOS, runtime.GOARCH),
68}
69
70var reStatusCode = regexp.MustCompile(`^(\d{3})`) 56var reStatusCode = regexp.MustCompile(`^(\d{3})`)
71 57
72// ValidateReqSigHandler is a request handler to ensure that the request's 58// ValidateReqSigHandler is a request handler to ensure that the request's
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
new file mode 100644
index 0000000..a15f496
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
@@ -0,0 +1,37 @@
1package corehandlers
2
3import (
4 "os"
5 "runtime"
6
7 "github.com/aws/aws-sdk-go/aws"
8 "github.com/aws/aws-sdk-go/aws/request"
9)
10
11// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
12// to the user agent.
13var SDKVersionUserAgentHandler = request.NamedHandler{
14 Name: "core.SDKVersionUserAgentHandler",
15 Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
16 runtime.Version(), runtime.GOOS, runtime.GOARCH),
17}
18
19const execEnvVar = `AWS_EXECUTION_ENV`
20const execEnvUAKey = `exec_env`
21
22// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
23// execution environment to the user agent.
24//
25// If the environment variable AWS_EXECUTION_ENV is set, its value will be
26// appended to the user agent string.
27var AddHostExecEnvUserAgentHander = request.NamedHandler{
28 Name: "core.AddHostExecEnvUserAgentHander",
29 Fn: func(r *request.Request) {
30 v := os.Getenv(execEnvVar)
31 if len(v) == 0 {
32 return
33 }
34
35 request.AddToUserAgent(r, execEnvUAKey+"/"+v)
36 },
37}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
index 42416fc..ed08699 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -178,7 +178,8 @@ func (e *Expiry) IsExpired() bool {
178type Credentials struct { 178type Credentials struct {
179 creds Value 179 creds Value
180 forceRefresh bool 180 forceRefresh bool
181 m sync.Mutex 181
182 m sync.RWMutex
182 183
183 provider Provider 184 provider Provider
184} 185}
@@ -201,6 +202,17 @@ func NewCredentials(provider Provider) *Credentials {
201// If Credentials.Expire() was called the credentials Value will be force 202// If Credentials.Expire() was called the credentials Value will be force
202// expired, and the next call to Get() will cause them to be refreshed. 203// expired, and the next call to Get() will cause them to be refreshed.
203func (c *Credentials) Get() (Value, error) { 204func (c *Credentials) Get() (Value, error) {
205 // Check the cached credentials first with just the read lock.
206 c.m.RLock()
207 if !c.isExpired() {
208 creds := c.creds
209 c.m.RUnlock()
210 return creds, nil
211 }
212 c.m.RUnlock()
213
214 // Credentials are expired need to retrieve the credentials taking the full
215 // lock.
204 c.m.Lock() 216 c.m.Lock()
205 defer c.m.Unlock() 217 defer c.m.Unlock()
206 218
@@ -234,8 +246,8 @@ func (c *Credentials) Expire() {
234// If the Credentials were forced to be expired with Expire() this will 246// If the Credentials were forced to be expired with Expire() this will
235// reflect that override. 247// reflect that override.
236func (c *Credentials) IsExpired() bool { 248func (c *Credentials) IsExpired() bool {
237 c.m.Lock() 249 c.m.RLock()
238 defer c.m.Unlock() 250 defer c.m.RUnlock()
239 251
240 return c.isExpired() 252 return c.isExpired()
241} 253}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
index c397495..0ed791b 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -4,7 +4,6 @@ import (
4 "bufio" 4 "bufio"
5 "encoding/json" 5 "encoding/json"
6 "fmt" 6 "fmt"
7 "path"
8 "strings" 7 "strings"
9 "time" 8 "time"
10 9
@@ -12,6 +11,7 @@ import (
12 "github.com/aws/aws-sdk-go/aws/client" 11 "github.com/aws/aws-sdk-go/aws/client"
13 "github.com/aws/aws-sdk-go/aws/credentials" 12 "github.com/aws/aws-sdk-go/aws/credentials"
14 "github.com/aws/aws-sdk-go/aws/ec2metadata" 13 "github.com/aws/aws-sdk-go/aws/ec2metadata"
14 "github.com/aws/aws-sdk-go/internal/sdkuri"
15) 15)
16 16
17// ProviderName provides a name of EC2Role provider 17// ProviderName provides a name of EC2Role provider
@@ -125,7 +125,7 @@ type ec2RoleCredRespBody struct {
125 Message string 125 Message string
126} 126}
127 127
128const iamSecurityCredsPath = "/iam/security-credentials" 128const iamSecurityCredsPath = "iam/security-credentials/"
129 129
130// requestCredList requests a list of credentials from the EC2 service. 130// requestCredList requests a list of credentials from the EC2 service.
131// If there are no credentials, or there is an error making or receiving the request 131// If there are no credentials, or there is an error making or receiving the request
@@ -153,7 +153,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
153// If the credentials cannot be found, or there is an error reading the response 153// If the credentials cannot be found, or there is an error reading the response
154// and error will be returned. 154// and error will be returned.
155func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { 155func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
156 resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) 156 resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
157 if err != nil { 157 if err != nil {
158 return ec2RoleCredRespBody{}, 158 return ec2RoleCredRespBody{},
159 awserr.New("EC2RoleRequestError", 159 awserr.New("EC2RoleRequestError",
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
new file mode 100644
index 0000000..152d785
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
@@ -0,0 +1,46 @@
1// Package csm provides Client Side Monitoring (CSM) which enables sending metrics
2// via UDP connection. Using the Start function will enable the reporting of
3// metrics on a given port. If Start is called, with different parameters, again,
4// a panic will occur.
5//
6// Pause can be called to pause any metrics publishing on a given port. Sessions
7// that have had their handlers modified via InjectHandlers may still be used.
8// However, the handlers will act as a no-op meaning no metrics will be published.
9//
10// Example:
11// r, err := csm.Start("clientID", ":31000")
12// if err != nil {
13// panic(fmt.Errorf("failed starting CSM: %v", err))
14// }
15//
16// sess, err := session.NewSession(&aws.Config{})
17// if err != nil {
18// panic(fmt.Errorf("failed loading session: %v", err))
19// }
20//
21// r.InjectHandlers(&sess.Handlers)
22//
23// client := s3.New(sess)
24// resp, err := client.GetObject(&s3.GetObjectInput{
25// Bucket: aws.String("bucket"),
26// Key: aws.String("key"),
27// })
28//
29// // Will pause monitoring
30// r.Pause()
31// resp, err = client.GetObject(&s3.GetObjectInput{
32// Bucket: aws.String("bucket"),
33// Key: aws.String("key"),
34// })
35//
36// // Resume monitoring
37// r.Continue()
38//
39// Start returns a Reporter that is used to enable or disable monitoring. If
40// access to the Reporter is required later, calling Get will return the Reporter
41// singleton.
42//
43// Example:
44// r := csm.Get()
45// r.Continue()
46package csm
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
new file mode 100644
index 0000000..2f0c6ea
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
@@ -0,0 +1,67 @@
1package csm
2
3import (
4 "fmt"
5 "sync"
6)
7
8var (
9 lock sync.Mutex
10)
11
12// Client side metric handler names
13const (
14 APICallMetricHandlerName = "awscsm.SendAPICallMetric"
15 APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
16)
17
18// Start will start the a long running go routine to capture
19// client side metrics. Calling start multiple time will only
20// start the metric listener once and will panic if a different
21// client ID or port is passed in.
22//
23// Example:
24// r, err := csm.Start("clientID", "127.0.0.1:8094")
25// if err != nil {
26// panic(fmt.Errorf("expected no error, but received %v", err))
27// }
28// sess := session.NewSession()
29// r.InjectHandlers(sess.Handlers)
30//
31// svc := s3.New(sess)
32// out, err := svc.GetObject(&s3.GetObjectInput{
33// Bucket: aws.String("bucket"),
34// Key: aws.String("key"),
35// })
36func Start(clientID string, url string) (*Reporter, error) {
37 lock.Lock()
38 defer lock.Unlock()
39
40 if sender == nil {
41 sender = newReporter(clientID, url)
42 } else {
43 if sender.clientID != clientID {
44 panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID))
45 }
46
47 if sender.url != url {
48 panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url))
49 }
50 }
51
52 if err := connect(url); err != nil {
53 sender = nil
54 return nil, err
55 }
56
57 return sender, nil
58}
59
60// Get will return a reporter if one exists, if one does not exist, nil will
61// be returned.
62func Get() *Reporter {
63 lock.Lock()
64 defer lock.Unlock()
65
66 return sender
67}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
new file mode 100644
index 0000000..4b0d630
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
@@ -0,0 +1,51 @@
1package csm
2
3import (
4 "strconv"
5 "time"
6)
7
8type metricTime time.Time
9
10func (t metricTime) MarshalJSON() ([]byte, error) {
11 ns := time.Duration(time.Time(t).UnixNano())
12 return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil
13}
14
15type metric struct {
16 ClientID *string `json:"ClientId,omitempty"`
17 API *string `json:"Api,omitempty"`
18 Service *string `json:"Service,omitempty"`
19 Timestamp *metricTime `json:"Timestamp,omitempty"`
20 Type *string `json:"Type,omitempty"`
21 Version *int `json:"Version,omitempty"`
22
23 AttemptCount *int `json:"AttemptCount,omitempty"`
24 Latency *int `json:"Latency,omitempty"`
25
26 Fqdn *string `json:"Fqdn,omitempty"`
27 UserAgent *string `json:"UserAgent,omitempty"`
28 AttemptLatency *int `json:"AttemptLatency,omitempty"`
29
30 SessionToken *string `json:"SessionToken,omitempty"`
31 Region *string `json:"Region,omitempty"`
32 AccessKey *string `json:"AccessKey,omitempty"`
33 HTTPStatusCode *int `json:"HttpStatusCode,omitempty"`
34 XAmzID2 *string `json:"XAmzId2,omitempty"`
35 XAmzRequestID *string `json:"XAmznRequestId,omitempty"`
36
37 AWSException *string `json:"AwsException,omitempty"`
38 AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"`
39 SDKException *string `json:"SdkException,omitempty"`
40 SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
41
42 DestinationIP *string `json:"DestinationIp,omitempty"`
43 ConnectionReused *int `json:"ConnectionReused,omitempty"`
44
45 AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"`
46 ConnectLatency *int `json:"ConnectLatency,omitempty"`
47 RequestLatency *int `json:"RequestLatency,omitempty"`
48 DNSLatency *int `json:"DnsLatency,omitempty"`
49 TCPLatency *int `json:"TcpLatency,omitempty"`
50 SSLLatency *int `json:"SslLatency,omitempty"`
51}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
new file mode 100644
index 0000000..514fc37
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
@@ -0,0 +1,54 @@
1package csm
2
3import (
4 "sync/atomic"
5)
6
7const (
8 runningEnum = iota
9 pausedEnum
10)
11
12var (
13 // MetricsChannelSize of metrics to hold in the channel
14 MetricsChannelSize = 100
15)
16
17type metricChan struct {
18 ch chan metric
19 paused int64
20}
21
22func newMetricChan(size int) metricChan {
23 return metricChan{
24 ch: make(chan metric, size),
25 }
26}
27
28func (ch *metricChan) Pause() {
29 atomic.StoreInt64(&ch.paused, pausedEnum)
30}
31
32func (ch *metricChan) Continue() {
33 atomic.StoreInt64(&ch.paused, runningEnum)
34}
35
36func (ch *metricChan) IsPaused() bool {
37 v := atomic.LoadInt64(&ch.paused)
38 return v == pausedEnum
39}
40
41// Push will push metrics to the metric channel if the channel
42// is not paused
43func (ch *metricChan) Push(m metric) bool {
44 if ch.IsPaused() {
45 return false
46 }
47
48 select {
49 case ch.ch <- m:
50 return true
51 default:
52 return false
53 }
54}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
new file mode 100644
index 0000000..11082e5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
@@ -0,0 +1,231 @@
1package csm
2
3import (
4 "encoding/json"
5 "net"
6 "time"
7
8 "github.com/aws/aws-sdk-go/aws"
9 "github.com/aws/aws-sdk-go/aws/awserr"
10 "github.com/aws/aws-sdk-go/aws/request"
11)
12
13const (
14 // DefaultPort is used when no port is specified
15 DefaultPort = "31000"
16)
17
18// Reporter will gather metrics of API requests made and
19// send those metrics to the CSM endpoint.
20type Reporter struct {
21 clientID string
22 url string
23 conn net.Conn
24 metricsCh metricChan
25 done chan struct{}
26}
27
28var (
29 sender *Reporter
30)
31
32func connect(url string) error {
33 const network = "udp"
34 if err := sender.connect(network, url); err != nil {
35 return err
36 }
37
38 if sender.done == nil {
39 sender.done = make(chan struct{})
40 go sender.start()
41 }
42
43 return nil
44}
45
46func newReporter(clientID, url string) *Reporter {
47 return &Reporter{
48 clientID: clientID,
49 url: url,
50 metricsCh: newMetricChan(MetricsChannelSize),
51 }
52}
53
54func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
55 if rep == nil {
56 return
57 }
58
59 now := time.Now()
60 creds, _ := r.Config.Credentials.Get()
61
62 m := metric{
63 ClientID: aws.String(rep.clientID),
64 API: aws.String(r.Operation.Name),
65 Service: aws.String(r.ClientInfo.ServiceID),
66 Timestamp: (*metricTime)(&now),
67 UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
68 Region: r.Config.Region,
69 Type: aws.String("ApiCallAttempt"),
70 Version: aws.Int(1),
71
72 XAmzRequestID: aws.String(r.RequestID),
73
74 AttemptCount: aws.Int(r.RetryCount + 1),
75 AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
76 AccessKey: aws.String(creds.AccessKeyID),
77 }
78
79 if r.HTTPResponse != nil {
80 m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
81 }
82
83 if r.Error != nil {
84 if awserr, ok := r.Error.(awserr.Error); ok {
85 setError(&m, awserr)
86 }
87 }
88
89 rep.metricsCh.Push(m)
90}
91
92func setError(m *metric, err awserr.Error) {
93 msg := err.Error()
94 code := err.Code()
95
96 switch code {
97 case "RequestError",
98 "SerializationError",
99 request.CanceledErrorCode:
100 m.SDKException = &code
101 m.SDKExceptionMessage = &msg
102 default:
103 m.AWSException = &code
104 m.AWSExceptionMessage = &msg
105 }
106}
107
108func (rep *Reporter) sendAPICallMetric(r *request.Request) {
109 if rep == nil {
110 return
111 }
112
113 now := time.Now()
114 m := metric{
115 ClientID: aws.String(rep.clientID),
116 API: aws.String(r.Operation.Name),
117 Service: aws.String(r.ClientInfo.ServiceID),
118 Timestamp: (*metricTime)(&now),
119 Type: aws.String("ApiCall"),
120 AttemptCount: aws.Int(r.RetryCount + 1),
121 Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
122 XAmzRequestID: aws.String(r.RequestID),
123 }
124
125 // TODO: Probably want to figure something out for logging dropped
126 // metrics
127 rep.metricsCh.Push(m)
128}
129
130func (rep *Reporter) connect(network, url string) error {
131 if rep.conn != nil {
132 rep.conn.Close()
133 }
134
135 conn, err := net.Dial(network, url)
136 if err != nil {
137 return awserr.New("UDPError", "Could not connect", err)
138 }
139
140 rep.conn = conn
141
142 return nil
143}
144
145func (rep *Reporter) close() {
146 if rep.done != nil {
147 close(rep.done)
148 }
149
150 rep.metricsCh.Pause()
151}
152
153func (rep *Reporter) start() {
154 defer func() {
155 rep.metricsCh.Pause()
156 }()
157
158 for {
159 select {
160 case <-rep.done:
161 rep.done = nil
162 return
163 case m := <-rep.metricsCh.ch:
164 // TODO: What to do with this error? Probably should just log
165 b, err := json.Marshal(m)
166 if err != nil {
167 continue
168 }
169
170 rep.conn.Write(b)
171 }
172 }
173}
174
175// Pause will pause the metric channel preventing any new metrics from
176// being added.
177func (rep *Reporter) Pause() {
178 lock.Lock()
179 defer lock.Unlock()
180
181 if rep == nil {
182 return
183 }
184
185 rep.close()
186}
187
188// Continue will reopen the metric channel and allow for monitoring
189// to be resumed.
190func (rep *Reporter) Continue() {
191 lock.Lock()
192 defer lock.Unlock()
193 if rep == nil {
194 return
195 }
196
197 if !rep.metricsCh.IsPaused() {
198 return
199 }
200
201 rep.metricsCh.Continue()
202}
203
204// InjectHandlers will will enable client side metrics and inject the proper
205// handlers to handle how metrics are sent.
206//
207// Example:
208// // Start must be called in order to inject the correct handlers
209// r, err := csm.Start("clientID", "127.0.0.1:8094")
210// if err != nil {
211// panic(fmt.Errorf("expected no error, but received %v", err))
212// }
213//
214// sess := session.NewSession()
215// r.InjectHandlers(&sess.Handlers)
216//
217// // create a new service client with our client side metric session
218// svc := s3.New(sess)
219func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
220 if rep == nil {
221 return
222 }
223
224 apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric}
225 apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric}
226
227 handlers.Complete.PushFrontNamed(apiCallHandler)
228 handlers.Complete.PushFrontNamed(apiCallAttemptHandler)
229
230 handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler)
231}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
index 07afe3b..5040a2f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -9,6 +9,7 @@ package defaults
9 9
10import ( 10import (
11 "fmt" 11 "fmt"
12 "net"
12 "net/http" 13 "net/http"
13 "net/url" 14 "net/url"
14 "os" 15 "os"
@@ -72,6 +73,7 @@ func Handlers() request.Handlers {
72 handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) 73 handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
73 handlers.Validate.AfterEachFn = request.HandlerListStopOnError 74 handlers.Validate.AfterEachFn = request.HandlerListStopOnError
74 handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) 75 handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
76 handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
75 handlers.Build.AfterEachFn = request.HandlerListStopOnError 77 handlers.Build.AfterEachFn = request.HandlerListStopOnError
76 handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) 78 handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
77 handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) 79 handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
@@ -90,14 +92,25 @@ func Handlers() request.Handlers {
90func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { 92func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
91 return credentials.NewCredentials(&credentials.ChainProvider{ 93 return credentials.NewCredentials(&credentials.ChainProvider{
92 VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), 94 VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
93 Providers: []credentials.Provider{ 95 Providers: CredProviders(cfg, handlers),
94 &credentials.EnvProvider{},
95 &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
96 RemoteCredProvider(*cfg, handlers),
97 },
98 }) 96 })
99} 97}
100 98
99// CredProviders returns the slice of providers used in
100// the default credential chain.
101//
102// For applications that need to use some other provider (for example use
103// different environment variables for legacy reasons) but still fall back
104// on the default chain of providers. This allows that default chaint to be
105// automatically updated
106func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
107 return []credentials.Provider{
108 &credentials.EnvProvider{},
109 &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
110 RemoteCredProvider(*cfg, handlers),
111 }
112}
113
101const ( 114const (
102 httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" 115 httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
103 ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" 116 ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
@@ -118,14 +131,43 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P
118 return ec2RoleProvider(cfg, handlers) 131 return ec2RoleProvider(cfg, handlers)
119} 132}
120 133
134var lookupHostFn = net.LookupHost
135
136func isLoopbackHost(host string) (bool, error) {
137 ip := net.ParseIP(host)
138 if ip != nil {
139 return ip.IsLoopback(), nil
140 }
141
142 // Host is not an ip, perform lookup
143 addrs, err := lookupHostFn(host)
144 if err != nil {
145 return false, err
146 }
147 for _, addr := range addrs {
148 if !net.ParseIP(addr).IsLoopback() {
149 return false, nil
150 }
151 }
152
153 return true, nil
154}
155
121func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { 156func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
122 var errMsg string 157 var errMsg string
123 158
124 parsed, err := url.Parse(u) 159 parsed, err := url.Parse(u)
125 if err != nil { 160 if err != nil {
126 errMsg = fmt.Sprintf("invalid URL, %v", err) 161 errMsg = fmt.Sprintf("invalid URL, %v", err)
127 } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { 162 } else {
128 errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) 163 host := aws.URLHostname(parsed)
164 if len(host) == 0 {
165 errMsg = "unable to parse host from local HTTP cred provider URL"
166 } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
167 errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
168 } else if !isLoopback {
169 errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
170 }
129 } 171 }
130 172
131 if len(errMsg) > 0 { 173 if len(errMsg) > 0 {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
index 984407a..c215cd3 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -4,12 +4,12 @@ import (
4 "encoding/json" 4 "encoding/json"
5 "fmt" 5 "fmt"
6 "net/http" 6 "net/http"
7 "path"
8 "strings" 7 "strings"
9 "time" 8 "time"
10 9
11 "github.com/aws/aws-sdk-go/aws/awserr" 10 "github.com/aws/aws-sdk-go/aws/awserr"
12 "github.com/aws/aws-sdk-go/aws/request" 11 "github.com/aws/aws-sdk-go/aws/request"
12 "github.com/aws/aws-sdk-go/internal/sdkuri"
13) 13)
14 14
15// GetMetadata uses the path provided to request information from the EC2 15// GetMetadata uses the path provided to request information from the EC2
@@ -19,7 +19,7 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
19 op := &request.Operation{ 19 op := &request.Operation{
20 Name: "GetMetadata", 20 Name: "GetMetadata",
21 HTTPMethod: "GET", 21 HTTPMethod: "GET",
22 HTTPPath: path.Join("/", "meta-data", p), 22 HTTPPath: sdkuri.PathJoin("/meta-data", p),
23 } 23 }
24 24
25 output := &metadataOutput{} 25 output := &metadataOutput{}
@@ -35,7 +35,7 @@ func (c *EC2Metadata) GetUserData() (string, error) {
35 op := &request.Operation{ 35 op := &request.Operation{
36 Name: "GetUserData", 36 Name: "GetUserData",
37 HTTPMethod: "GET", 37 HTTPMethod: "GET",
38 HTTPPath: path.Join("/", "user-data"), 38 HTTPPath: "/user-data",
39 } 39 }
40 40
41 output := &metadataOutput{} 41 output := &metadataOutput{}
@@ -56,7 +56,7 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
56 op := &request.Operation{ 56 op := &request.Operation{
57 Name: "GetDynamicData", 57 Name: "GetDynamicData",
58 HTTPMethod: "GET", 58 HTTPMethod: "GET",
59 HTTPPath: path.Join("/", "dynamic", p), 59 HTTPPath: sdkuri.PathJoin("/dynamic", p),
60 } 60 }
61 61
62 output := &metadataOutput{} 62 output := &metadataOutput{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
index 5b4379d..ef5f732 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -1,5 +1,10 @@
1// Package ec2metadata provides the client for making API calls to the 1// Package ec2metadata provides the client for making API calls to the
2// EC2 Metadata service. 2// EC2 Metadata service.
3//
4// This package's client can be disabled completely by setting the environment
5// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
6// true instructs the SDK to disable the EC2 Metadata client. The client cannot
7// be used while the environemnt variable is set to true, (case insensitive).
3package ec2metadata 8package ec2metadata
4 9
5import ( 10import (
@@ -7,17 +12,21 @@ import (
7 "errors" 12 "errors"
8 "io" 13 "io"
9 "net/http" 14 "net/http"
15 "os"
16 "strings"
10 "time" 17 "time"
11 18
12 "github.com/aws/aws-sdk-go/aws" 19 "github.com/aws/aws-sdk-go/aws"
13 "github.com/aws/aws-sdk-go/aws/awserr" 20 "github.com/aws/aws-sdk-go/aws/awserr"
14 "github.com/aws/aws-sdk-go/aws/client" 21 "github.com/aws/aws-sdk-go/aws/client"
15 "github.com/aws/aws-sdk-go/aws/client/metadata" 22 "github.com/aws/aws-sdk-go/aws/client/metadata"
23 "github.com/aws/aws-sdk-go/aws/corehandlers"
16 "github.com/aws/aws-sdk-go/aws/request" 24 "github.com/aws/aws-sdk-go/aws/request"
17) 25)
18 26
19// ServiceName is the name of the service. 27// ServiceName is the name of the service.
20const ServiceName = "ec2metadata" 28const ServiceName = "ec2metadata"
29const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
21 30
22// A EC2Metadata is an EC2 Metadata service Client. 31// A EC2Metadata is an EC2 Metadata service Client.
23type EC2Metadata struct { 32type EC2Metadata struct {
@@ -75,6 +84,21 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
75 svc.Handlers.Validate.Clear() 84 svc.Handlers.Validate.Clear()
76 svc.Handlers.Validate.PushBack(validateEndpointHandler) 85 svc.Handlers.Validate.PushBack(validateEndpointHandler)
77 86
87 // Disable the EC2 Metadata service if the environment variable is set.
88 // This shortcirctes the service's functionality to always fail to send
89 // requests.
90 if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
91 svc.Handlers.Send.SwapNamed(request.NamedHandler{
92 Name: corehandlers.SendHandler.Name,
93 Fn: func(r *request.Request) {
94 r.Error = awserr.New(
95 request.CanceledErrorCode,
96 "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
97 nil)
98 },
99 })
100 }
101
78 // Add additional options to the service config 102 // Add additional options to the service config
79 for _, option := range opts { 103 for _, option := range opts {
80 option(svc.Client) 104 option(svc.Client)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index d6d87e4..8e823be 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -24,6 +24,7 @@ const (
24 EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). 24 EuCentral1RegionID = "eu-central-1" // EU (Frankfurt).
25 EuWest1RegionID = "eu-west-1" // EU (Ireland). 25 EuWest1RegionID = "eu-west-1" // EU (Ireland).
26 EuWest2RegionID = "eu-west-2" // EU (London). 26 EuWest2RegionID = "eu-west-2" // EU (London).
27 EuWest3RegionID = "eu-west-3" // EU (Paris).
27 SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). 28 SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
28 UsEast1RegionID = "us-east-1" // US East (N. Virginia). 29 UsEast1RegionID = "us-east-1" // US East (N. Virginia).
29 UsEast2RegionID = "us-east-2" // US East (Ohio). 30 UsEast2RegionID = "us-east-2" // US East (Ohio).
@@ -33,7 +34,8 @@ const (
33 34
34// AWS China partition's regions. 35// AWS China partition's regions.
35const ( 36const (
36 CnNorth1RegionID = "cn-north-1" // China (Beijing). 37 CnNorth1RegionID = "cn-north-1" // China (Beijing).
38 CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia).
37) 39)
38 40
39// AWS GovCloud (US) partition's regions. 41// AWS GovCloud (US) partition's regions.
@@ -43,18 +45,26 @@ const (
43 45
44// Service identifiers 46// Service identifiers
45const ( 47const (
48 A4bServiceID = "a4b" // A4b.
46 AcmServiceID = "acm" // Acm. 49 AcmServiceID = "acm" // Acm.
50 AcmPcaServiceID = "acm-pca" // AcmPca.
51 ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
52 ApiPricingServiceID = "api.pricing" // ApiPricing.
47 ApigatewayServiceID = "apigateway" // Apigateway. 53 ApigatewayServiceID = "apigateway" // Apigateway.
48 ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. 54 ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
49 Appstream2ServiceID = "appstream2" // Appstream2. 55 Appstream2ServiceID = "appstream2" // Appstream2.
50 AthenaServiceID = "athena" // Athena. 56 AthenaServiceID = "athena" // Athena.
51 AutoscalingServiceID = "autoscaling" // Autoscaling. 57 AutoscalingServiceID = "autoscaling" // Autoscaling.
58 AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
52 BatchServiceID = "batch" // Batch. 59 BatchServiceID = "batch" // Batch.
53 BudgetsServiceID = "budgets" // Budgets. 60 BudgetsServiceID = "budgets" // Budgets.
61 CeServiceID = "ce" // Ce.
62 Cloud9ServiceID = "cloud9" // Cloud9.
54 ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. 63 ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
55 CloudformationServiceID = "cloudformation" // Cloudformation. 64 CloudformationServiceID = "cloudformation" // Cloudformation.
56 CloudfrontServiceID = "cloudfront" // Cloudfront. 65 CloudfrontServiceID = "cloudfront" // Cloudfront.
57 CloudhsmServiceID = "cloudhsm" // Cloudhsm. 66 CloudhsmServiceID = "cloudhsm" // Cloudhsm.
67 Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
58 CloudsearchServiceID = "cloudsearch" // Cloudsearch. 68 CloudsearchServiceID = "cloudsearch" // Cloudsearch.
59 CloudtrailServiceID = "cloudtrail" // Cloudtrail. 69 CloudtrailServiceID = "cloudtrail" // Cloudtrail.
60 CodebuildServiceID = "codebuild" // Codebuild. 70 CodebuildServiceID = "codebuild" // Codebuild.
@@ -65,9 +75,11 @@ const (
65 CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. 75 CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
66 CognitoIdpServiceID = "cognito-idp" // CognitoIdp. 76 CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
67 CognitoSyncServiceID = "cognito-sync" // CognitoSync. 77 CognitoSyncServiceID = "cognito-sync" // CognitoSync.
78 ComprehendServiceID = "comprehend" // Comprehend.
68 ConfigServiceID = "config" // Config. 79 ConfigServiceID = "config" // Config.
69 CurServiceID = "cur" // Cur. 80 CurServiceID = "cur" // Cur.
70 DatapipelineServiceID = "datapipeline" // Datapipeline. 81 DatapipelineServiceID = "datapipeline" // Datapipeline.
82 DaxServiceID = "dax" // Dax.
71 DevicefarmServiceID = "devicefarm" // Devicefarm. 83 DevicefarmServiceID = "devicefarm" // Devicefarm.
72 DirectconnectServiceID = "directconnect" // Directconnect. 84 DirectconnectServiceID = "directconnect" // Directconnect.
73 DiscoveryServiceID = "discovery" // Discovery. 85 DiscoveryServiceID = "discovery" // Discovery.
@@ -89,8 +101,12 @@ const (
89 EsServiceID = "es" // Es. 101 EsServiceID = "es" // Es.
90 EventsServiceID = "events" // Events. 102 EventsServiceID = "events" // Events.
91 FirehoseServiceID = "firehose" // Firehose. 103 FirehoseServiceID = "firehose" // Firehose.
104 FmsServiceID = "fms" // Fms.
92 GameliftServiceID = "gamelift" // Gamelift. 105 GameliftServiceID = "gamelift" // Gamelift.
93 GlacierServiceID = "glacier" // Glacier. 106 GlacierServiceID = "glacier" // Glacier.
107 GlueServiceID = "glue" // Glue.
108 GreengrassServiceID = "greengrass" // Greengrass.
109 GuarddutyServiceID = "guardduty" // Guardduty.
94 HealthServiceID = "health" // Health. 110 HealthServiceID = "health" // Health.
95 IamServiceID = "iam" // Iam. 111 IamServiceID = "iam" // Iam.
96 ImportexportServiceID = "importexport" // Importexport. 112 ImportexportServiceID = "importexport" // Importexport.
@@ -98,17 +114,24 @@ const (
98 IotServiceID = "iot" // Iot. 114 IotServiceID = "iot" // Iot.
99 KinesisServiceID = "kinesis" // Kinesis. 115 KinesisServiceID = "kinesis" // Kinesis.
100 KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. 116 KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
117 KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
101 KmsServiceID = "kms" // Kms. 118 KmsServiceID = "kms" // Kms.
102 LambdaServiceID = "lambda" // Lambda. 119 LambdaServiceID = "lambda" // Lambda.
103 LightsailServiceID = "lightsail" // Lightsail. 120 LightsailServiceID = "lightsail" // Lightsail.
104 LogsServiceID = "logs" // Logs. 121 LogsServiceID = "logs" // Logs.
105 MachinelearningServiceID = "machinelearning" // Machinelearning. 122 MachinelearningServiceID = "machinelearning" // Machinelearning.
106 MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. 123 MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
124 MediaconvertServiceID = "mediaconvert" // Mediaconvert.
125 MedialiveServiceID = "medialive" // Medialive.
126 MediapackageServiceID = "mediapackage" // Mediapackage.
127 MediastoreServiceID = "mediastore" // Mediastore.
107 MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. 128 MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
129 MghServiceID = "mgh" // Mgh.
108 MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. 130 MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
109 ModelsLexServiceID = "models.lex" // ModelsLex. 131 ModelsLexServiceID = "models.lex" // ModelsLex.
110 MonitoringServiceID = "monitoring" // Monitoring. 132 MonitoringServiceID = "monitoring" // Monitoring.
111 MturkRequesterServiceID = "mturk-requester" // MturkRequester. 133 MturkRequesterServiceID = "mturk-requester" // MturkRequester.
134 NeptuneServiceID = "neptune" // Neptune.
112 OpsworksServiceID = "opsworks" // Opsworks. 135 OpsworksServiceID = "opsworks" // Opsworks.
113 OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. 136 OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
114 OrganizationsServiceID = "organizations" // Organizations. 137 OrganizationsServiceID = "organizations" // Organizations.
@@ -117,12 +140,18 @@ const (
117 RdsServiceID = "rds" // Rds. 140 RdsServiceID = "rds" // Rds.
118 RedshiftServiceID = "redshift" // Redshift. 141 RedshiftServiceID = "redshift" // Redshift.
119 RekognitionServiceID = "rekognition" // Rekognition. 142 RekognitionServiceID = "rekognition" // Rekognition.
143 ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
120 Route53ServiceID = "route53" // Route53. 144 Route53ServiceID = "route53" // Route53.
121 Route53domainsServiceID = "route53domains" // Route53domains. 145 Route53domainsServiceID = "route53domains" // Route53domains.
122 RuntimeLexServiceID = "runtime.lex" // RuntimeLex. 146 RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
147 RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
123 S3ServiceID = "s3" // S3. 148 S3ServiceID = "s3" // S3.
149 SagemakerServiceID = "sagemaker" // Sagemaker.
124 SdbServiceID = "sdb" // Sdb. 150 SdbServiceID = "sdb" // Sdb.
151 SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
152 ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
125 ServicecatalogServiceID = "servicecatalog" // Servicecatalog. 153 ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
154 ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
126 ShieldServiceID = "shield" // Shield. 155 ShieldServiceID = "shield" // Shield.
127 SmsServiceID = "sms" // Sms. 156 SmsServiceID = "sms" // Sms.
128 SnowballServiceID = "snowball" // Snowball. 157 SnowballServiceID = "snowball" // Snowball.
@@ -136,9 +165,11 @@ const (
136 SupportServiceID = "support" // Support. 165 SupportServiceID = "support" // Support.
137 SwfServiceID = "swf" // Swf. 166 SwfServiceID = "swf" // Swf.
138 TaggingServiceID = "tagging" // Tagging. 167 TaggingServiceID = "tagging" // Tagging.
168 TranslateServiceID = "translate" // Translate.
139 WafServiceID = "waf" // Waf. 169 WafServiceID = "waf" // Waf.
140 WafRegionalServiceID = "waf-regional" // WafRegional. 170 WafRegionalServiceID = "waf-regional" // WafRegional.
141 WorkdocsServiceID = "workdocs" // Workdocs. 171 WorkdocsServiceID = "workdocs" // Workdocs.
172 WorkmailServiceID = "workmail" // Workmail.
142 WorkspacesServiceID = "workspaces" // Workspaces. 173 WorkspacesServiceID = "workspaces" // Workspaces.
143 XrayServiceID = "xray" // Xray. 174 XrayServiceID = "xray" // Xray.
144) 175)
@@ -216,6 +247,9 @@ var awsPartition = partition{
216 "eu-west-2": region{ 247 "eu-west-2": region{
217 Description: "EU (London)", 248 Description: "EU (London)",
218 }, 249 },
250 "eu-west-3": region{
251 Description: "EU (Paris)",
252 },
219 "sa-east-1": region{ 253 "sa-east-1": region{
220 Description: "South America (Sao Paulo)", 254 Description: "South America (Sao Paulo)",
221 }, 255 },
@@ -233,6 +267,12 @@ var awsPartition = partition{
233 }, 267 },
234 }, 268 },
235 Services: services{ 269 Services: services{
270 "a4b": service{
271
272 Endpoints: endpoints{
273 "us-east-1": endpoint{},
274 },
275 },
236 "acm": service{ 276 "acm": service{
237 277
238 Endpoints: endpoints{ 278 Endpoints: endpoints{
@@ -245,6 +285,7 @@ var awsPartition = partition{
245 "eu-central-1": endpoint{}, 285 "eu-central-1": endpoint{},
246 "eu-west-1": endpoint{}, 286 "eu-west-1": endpoint{},
247 "eu-west-2": endpoint{}, 287 "eu-west-2": endpoint{},
288 "eu-west-3": endpoint{},
248 "sa-east-1": endpoint{}, 289 "sa-east-1": endpoint{},
249 "us-east-1": endpoint{}, 290 "us-east-1": endpoint{},
250 "us-east-2": endpoint{}, 291 "us-east-2": endpoint{},
@@ -252,6 +293,43 @@ var awsPartition = partition{
252 "us-west-2": endpoint{}, 293 "us-west-2": endpoint{},
253 }, 294 },
254 }, 295 },
296 "acm-pca": service{
297 Defaults: endpoint{
298 Protocols: []string{"https"},
299 },
300 Endpoints: endpoints{
301 "ap-northeast-1": endpoint{},
302 "ap-southeast-1": endpoint{},
303 "ap-southeast-2": endpoint{},
304 "ca-central-1": endpoint{},
305 "eu-central-1": endpoint{},
306 "eu-west-1": endpoint{},
307 "us-east-1": endpoint{},
308 "us-east-2": endpoint{},
309 "us-west-2": endpoint{},
310 },
311 },
312 "api.mediatailor": service{
313
314 Endpoints: endpoints{
315 "ap-northeast-1": endpoint{},
316 "ap-southeast-1": endpoint{},
317 "ap-southeast-2": endpoint{},
318 "eu-west-1": endpoint{},
319 "us-east-1": endpoint{},
320 },
321 },
322 "api.pricing": service{
323 Defaults: endpoint{
324 CredentialScope: credentialScope{
325 Service: "pricing",
326 },
327 },
328 Endpoints: endpoints{
329 "ap-south-1": endpoint{},
330 "us-east-1": endpoint{},
331 },
332 },
255 "apigateway": service{ 333 "apigateway": service{
256 334
257 Endpoints: endpoints{ 335 Endpoints: endpoints{
@@ -260,9 +338,12 @@ var awsPartition = partition{
260 "ap-south-1": endpoint{}, 338 "ap-south-1": endpoint{},
261 "ap-southeast-1": endpoint{}, 339 "ap-southeast-1": endpoint{},
262 "ap-southeast-2": endpoint{}, 340 "ap-southeast-2": endpoint{},
341 "ca-central-1": endpoint{},
263 "eu-central-1": endpoint{}, 342 "eu-central-1": endpoint{},
264 "eu-west-1": endpoint{}, 343 "eu-west-1": endpoint{},
265 "eu-west-2": endpoint{}, 344 "eu-west-2": endpoint{},
345 "eu-west-3": endpoint{},
346 "sa-east-1": endpoint{},
266 "us-east-1": endpoint{}, 347 "us-east-1": endpoint{},
267 "us-east-2": endpoint{}, 348 "us-east-2": endpoint{},
268 "us-west-1": endpoint{}, 349 "us-west-1": endpoint{},
@@ -287,6 +368,7 @@ var awsPartition = partition{
287 "eu-central-1": endpoint{}, 368 "eu-central-1": endpoint{},
288 "eu-west-1": endpoint{}, 369 "eu-west-1": endpoint{},
289 "eu-west-2": endpoint{}, 370 "eu-west-2": endpoint{},
371 "eu-west-3": endpoint{},
290 "sa-east-1": endpoint{}, 372 "sa-east-1": endpoint{},
291 "us-east-1": endpoint{}, 373 "us-east-1": endpoint{},
292 "us-east-2": endpoint{}, 374 "us-east-2": endpoint{},
@@ -311,9 +393,14 @@ var awsPartition = partition{
311 "athena": service{ 393 "athena": service{
312 394
313 Endpoints: endpoints{ 395 Endpoints: endpoints{
314 "us-east-1": endpoint{}, 396 "ap-northeast-1": endpoint{},
315 "us-east-2": endpoint{}, 397 "ap-southeast-1": endpoint{},
316 "us-west-2": endpoint{}, 398 "ap-southeast-2": endpoint{},
399 "eu-central-1": endpoint{},
400 "eu-west-1": endpoint{},
401 "us-east-1": endpoint{},
402 "us-east-2": endpoint{},
403 "us-west-2": endpoint{},
317 }, 404 },
318 }, 405 },
319 "autoscaling": service{ 406 "autoscaling": service{
@@ -330,6 +417,7 @@ var awsPartition = partition{
330 "eu-central-1": endpoint{}, 417 "eu-central-1": endpoint{},
331 "eu-west-1": endpoint{}, 418 "eu-west-1": endpoint{},
332 "eu-west-2": endpoint{}, 419 "eu-west-2": endpoint{},
420 "eu-west-3": endpoint{},
333 "sa-east-1": endpoint{}, 421 "sa-east-1": endpoint{},
334 "us-east-1": endpoint{}, 422 "us-east-1": endpoint{},
335 "us-east-2": endpoint{}, 423 "us-east-2": endpoint{},
@@ -337,12 +425,38 @@ var awsPartition = partition{
337 "us-west-2": endpoint{}, 425 "us-west-2": endpoint{},
338 }, 426 },
339 }, 427 },
428 "autoscaling-plans": service{
429 Defaults: endpoint{
430 Hostname: "autoscaling.{region}.amazonaws.com",
431 Protocols: []string{"http", "https"},
432 CredentialScope: credentialScope{
433 Service: "autoscaling-plans",
434 },
435 },
436 Endpoints: endpoints{
437 "ap-southeast-1": endpoint{},
438 "eu-west-1": endpoint{},
439 "us-east-1": endpoint{},
440 "us-east-2": endpoint{},
441 "us-west-2": endpoint{},
442 },
443 },
340 "batch": service{ 444 "batch": service{
341 445
342 Endpoints: endpoints{ 446 Endpoints: endpoints{
343 "eu-west-1": endpoint{}, 447 "ap-northeast-1": endpoint{},
344 "us-east-1": endpoint{}, 448 "ap-northeast-2": endpoint{},
345 "us-west-2": endpoint{}, 449 "ap-south-1": endpoint{},
450 "ap-southeast-1": endpoint{},
451 "ap-southeast-2": endpoint{},
452 "ca-central-1": endpoint{},
453 "eu-central-1": endpoint{},
454 "eu-west-1": endpoint{},
455 "eu-west-2": endpoint{},
456 "us-east-1": endpoint{},
457 "us-east-2": endpoint{},
458 "us-west-1": endpoint{},
459 "us-west-2": endpoint{},
346 }, 460 },
347 }, 461 },
348 "budgets": service{ 462 "budgets": service{
@@ -358,11 +472,35 @@ var awsPartition = partition{
358 }, 472 },
359 }, 473 },
360 }, 474 },
475 "ce": service{
476 PartitionEndpoint: "aws-global",
477 IsRegionalized: boxedFalse,
478
479 Endpoints: endpoints{
480 "aws-global": endpoint{
481 Hostname: "ce.us-east-1.amazonaws.com",
482 CredentialScope: credentialScope{
483 Region: "us-east-1",
484 },
485 },
486 },
487 },
488 "cloud9": service{
489
490 Endpoints: endpoints{
491 "ap-southeast-1": endpoint{},
492 "eu-west-1": endpoint{},
493 "us-east-1": endpoint{},
494 "us-east-2": endpoint{},
495 "us-west-2": endpoint{},
496 },
497 },
361 "clouddirectory": service{ 498 "clouddirectory": service{
362 499
363 Endpoints: endpoints{ 500 Endpoints: endpoints{
364 "ap-southeast-1": endpoint{}, 501 "ap-southeast-1": endpoint{},
365 "ap-southeast-2": endpoint{}, 502 "ap-southeast-2": endpoint{},
503 "eu-central-1": endpoint{},
366 "eu-west-1": endpoint{}, 504 "eu-west-1": endpoint{},
367 "eu-west-2": endpoint{}, 505 "eu-west-2": endpoint{},
368 "us-east-1": endpoint{}, 506 "us-east-1": endpoint{},
@@ -382,6 +520,7 @@ var awsPartition = partition{
382 "eu-central-1": endpoint{}, 520 "eu-central-1": endpoint{},
383 "eu-west-1": endpoint{}, 521 "eu-west-1": endpoint{},
384 "eu-west-2": endpoint{}, 522 "eu-west-2": endpoint{},
523 "eu-west-3": endpoint{},
385 "sa-east-1": endpoint{}, 524 "sa-east-1": endpoint{},
386 "us-east-1": endpoint{}, 525 "us-east-1": endpoint{},
387 "us-east-2": endpoint{}, 526 "us-east-2": endpoint{},
@@ -418,6 +557,27 @@ var awsPartition = partition{
418 "us-west-2": endpoint{}, 557 "us-west-2": endpoint{},
419 }, 558 },
420 }, 559 },
560 "cloudhsmv2": service{
561 Defaults: endpoint{
562 CredentialScope: credentialScope{
563 Service: "cloudhsm",
564 },
565 },
566 Endpoints: endpoints{
567 "ap-northeast-1": endpoint{},
568 "ap-south-1": endpoint{},
569 "ap-southeast-1": endpoint{},
570 "ap-southeast-2": endpoint{},
571 "ca-central-1": endpoint{},
572 "eu-central-1": endpoint{},
573 "eu-west-1": endpoint{},
574 "eu-west-2": endpoint{},
575 "us-east-1": endpoint{},
576 "us-east-2": endpoint{},
577 "us-west-1": endpoint{},
578 "us-west-2": endpoint{},
579 },
580 },
421 "cloudsearch": service{ 581 "cloudsearch": service{
422 582
423 Endpoints: endpoints{ 583 Endpoints: endpoints{
@@ -445,6 +605,7 @@ var awsPartition = partition{
445 "eu-central-1": endpoint{}, 605 "eu-central-1": endpoint{},
446 "eu-west-1": endpoint{}, 606 "eu-west-1": endpoint{},
447 "eu-west-2": endpoint{}, 607 "eu-west-2": endpoint{},
608 "eu-west-3": endpoint{},
448 "sa-east-1": endpoint{}, 609 "sa-east-1": endpoint{},
449 "us-east-1": endpoint{}, 610 "us-east-1": endpoint{},
450 "us-east-2": endpoint{}, 611 "us-east-2": endpoint{},
@@ -456,25 +617,63 @@ var awsPartition = partition{
456 617
457 Endpoints: endpoints{ 618 Endpoints: endpoints{
458 "ap-northeast-1": endpoint{}, 619 "ap-northeast-1": endpoint{},
620 "ap-northeast-2": endpoint{},
621 "ap-south-1": endpoint{},
459 "ap-southeast-1": endpoint{}, 622 "ap-southeast-1": endpoint{},
460 "ap-southeast-2": endpoint{}, 623 "ap-southeast-2": endpoint{},
624 "ca-central-1": endpoint{},
461 "eu-central-1": endpoint{}, 625 "eu-central-1": endpoint{},
462 "eu-west-1": endpoint{}, 626 "eu-west-1": endpoint{},
627 "eu-west-2": endpoint{},
628 "eu-west-3": endpoint{},
629 "sa-east-1": endpoint{},
463 "us-east-1": endpoint{}, 630 "us-east-1": endpoint{},
464 "us-east-2": endpoint{}, 631 "us-east-1-fips": endpoint{
465 "us-west-2": endpoint{}, 632 Hostname: "codebuild-fips.us-east-1.amazonaws.com",
633 CredentialScope: credentialScope{
634 Region: "us-east-1",
635 },
636 },
637 "us-east-2": endpoint{},
638 "us-east-2-fips": endpoint{
639 Hostname: "codebuild-fips.us-east-2.amazonaws.com",
640 CredentialScope: credentialScope{
641 Region: "us-east-2",
642 },
643 },
644 "us-west-1": endpoint{},
645 "us-west-1-fips": endpoint{
646 Hostname: "codebuild-fips.us-west-1.amazonaws.com",
647 CredentialScope: credentialScope{
648 Region: "us-west-1",
649 },
650 },
651 "us-west-2": endpoint{},
652 "us-west-2-fips": endpoint{
653 Hostname: "codebuild-fips.us-west-2.amazonaws.com",
654 CredentialScope: credentialScope{
655 Region: "us-west-2",
656 },
657 },
466 }, 658 },
467 }, 659 },
468 "codecommit": service{ 660 "codecommit": service{
469 661
470 Endpoints: endpoints{ 662 Endpoints: endpoints{
471 "ap-northeast-1": endpoint{}, 663 "ap-northeast-1": endpoint{},
664 "ap-northeast-2": endpoint{},
665 "ap-south-1": endpoint{},
472 "ap-southeast-1": endpoint{}, 666 "ap-southeast-1": endpoint{},
473 "ap-southeast-2": endpoint{}, 667 "ap-southeast-2": endpoint{},
668 "ca-central-1": endpoint{},
474 "eu-central-1": endpoint{}, 669 "eu-central-1": endpoint{},
475 "eu-west-1": endpoint{}, 670 "eu-west-1": endpoint{},
671 "eu-west-2": endpoint{},
672 "eu-west-3": endpoint{},
673 "sa-east-1": endpoint{},
476 "us-east-1": endpoint{}, 674 "us-east-1": endpoint{},
477 "us-east-2": endpoint{}, 675 "us-east-2": endpoint{},
676 "us-west-1": endpoint{},
478 "us-west-2": endpoint{}, 677 "us-west-2": endpoint{},
479 }, 678 },
480 }, 679 },
@@ -490,6 +689,7 @@ var awsPartition = partition{
490 "eu-central-1": endpoint{}, 689 "eu-central-1": endpoint{},
491 "eu-west-1": endpoint{}, 690 "eu-west-1": endpoint{},
492 "eu-west-2": endpoint{}, 691 "eu-west-2": endpoint{},
692 "eu-west-3": endpoint{},
493 "sa-east-1": endpoint{}, 693 "sa-east-1": endpoint{},
494 "us-east-1": endpoint{}, 694 "us-east-1": endpoint{},
495 "us-east-2": endpoint{}, 695 "us-east-2": endpoint{},
@@ -501,23 +701,37 @@ var awsPartition = partition{
501 701
502 Endpoints: endpoints{ 702 Endpoints: endpoints{
503 "ap-northeast-1": endpoint{}, 703 "ap-northeast-1": endpoint{},
704 "ap-northeast-2": endpoint{},
705 "ap-south-1": endpoint{},
504 "ap-southeast-1": endpoint{}, 706 "ap-southeast-1": endpoint{},
505 "ap-southeast-2": endpoint{}, 707 "ap-southeast-2": endpoint{},
708 "ca-central-1": endpoint{},
506 "eu-central-1": endpoint{}, 709 "eu-central-1": endpoint{},
507 "eu-west-1": endpoint{}, 710 "eu-west-1": endpoint{},
711 "eu-west-2": endpoint{},
712 "eu-west-3": endpoint{},
508 "sa-east-1": endpoint{}, 713 "sa-east-1": endpoint{},
509 "us-east-1": endpoint{}, 714 "us-east-1": endpoint{},
510 "us-east-2": endpoint{}, 715 "us-east-2": endpoint{},
716 "us-west-1": endpoint{},
511 "us-west-2": endpoint{}, 717 "us-west-2": endpoint{},
512 }, 718 },
513 }, 719 },
514 "codestar": service{ 720 "codestar": service{
515 721
516 Endpoints: endpoints{ 722 Endpoints: endpoints{
517 "eu-west-1": endpoint{}, 723 "ap-northeast-1": endpoint{},
518 "us-east-1": endpoint{}, 724 "ap-northeast-2": endpoint{},
519 "us-east-2": endpoint{}, 725 "ap-southeast-1": endpoint{},
520 "us-west-2": endpoint{}, 726 "ap-southeast-2": endpoint{},
727 "ca-central-1": endpoint{},
728 "eu-central-1": endpoint{},
729 "eu-west-1": endpoint{},
730 "eu-west-2": endpoint{},
731 "us-east-1": endpoint{},
732 "us-east-2": endpoint{},
733 "us-west-1": endpoint{},
734 "us-west-2": endpoint{},
521 }, 735 },
522 }, 736 },
523 "cognito-identity": service{ 737 "cognito-identity": service{
@@ -526,6 +740,7 @@ var awsPartition = partition{
526 "ap-northeast-1": endpoint{}, 740 "ap-northeast-1": endpoint{},
527 "ap-northeast-2": endpoint{}, 741 "ap-northeast-2": endpoint{},
528 "ap-south-1": endpoint{}, 742 "ap-south-1": endpoint{},
743 "ap-southeast-1": endpoint{},
529 "ap-southeast-2": endpoint{}, 744 "ap-southeast-2": endpoint{},
530 "eu-central-1": endpoint{}, 745 "eu-central-1": endpoint{},
531 "eu-west-1": endpoint{}, 746 "eu-west-1": endpoint{},
@@ -541,6 +756,7 @@ var awsPartition = partition{
541 "ap-northeast-1": endpoint{}, 756 "ap-northeast-1": endpoint{},
542 "ap-northeast-2": endpoint{}, 757 "ap-northeast-2": endpoint{},
543 "ap-south-1": endpoint{}, 758 "ap-south-1": endpoint{},
759 "ap-southeast-1": endpoint{},
544 "ap-southeast-2": endpoint{}, 760 "ap-southeast-2": endpoint{},
545 "eu-central-1": endpoint{}, 761 "eu-central-1": endpoint{},
546 "eu-west-1": endpoint{}, 762 "eu-west-1": endpoint{},
@@ -556,6 +772,7 @@ var awsPartition = partition{
556 "ap-northeast-1": endpoint{}, 772 "ap-northeast-1": endpoint{},
557 "ap-northeast-2": endpoint{}, 773 "ap-northeast-2": endpoint{},
558 "ap-south-1": endpoint{}, 774 "ap-south-1": endpoint{},
775 "ap-southeast-1": endpoint{},
559 "ap-southeast-2": endpoint{}, 776 "ap-southeast-2": endpoint{},
560 "eu-central-1": endpoint{}, 777 "eu-central-1": endpoint{},
561 "eu-west-1": endpoint{}, 778 "eu-west-1": endpoint{},
@@ -565,6 +782,17 @@ var awsPartition = partition{
565 "us-west-2": endpoint{}, 782 "us-west-2": endpoint{},
566 }, 783 },
567 }, 784 },
785 "comprehend": service{
786 Defaults: endpoint{
787 Protocols: []string{"https"},
788 },
789 Endpoints: endpoints{
790 "eu-west-1": endpoint{},
791 "us-east-1": endpoint{},
792 "us-east-2": endpoint{},
793 "us-west-2": endpoint{},
794 },
795 },
568 "config": service{ 796 "config": service{
569 797
570 Endpoints: endpoints{ 798 Endpoints: endpoints{
@@ -577,6 +805,7 @@ var awsPartition = partition{
577 "eu-central-1": endpoint{}, 805 "eu-central-1": endpoint{},
578 "eu-west-1": endpoint{}, 806 "eu-west-1": endpoint{},
579 "eu-west-2": endpoint{}, 807 "eu-west-2": endpoint{},
808 "eu-west-3": endpoint{},
580 "sa-east-1": endpoint{}, 809 "sa-east-1": endpoint{},
581 "us-east-1": endpoint{}, 810 "us-east-1": endpoint{},
582 "us-east-2": endpoint{}, 811 "us-east-2": endpoint{},
@@ -600,6 +829,21 @@ var awsPartition = partition{
600 "us-west-2": endpoint{}, 829 "us-west-2": endpoint{},
601 }, 830 },
602 }, 831 },
832 "dax": service{
833
834 Endpoints: endpoints{
835 "ap-northeast-1": endpoint{},
836 "ap-south-1": endpoint{},
837 "ap-southeast-1": endpoint{},
838 "ap-southeast-2": endpoint{},
839 "eu-west-1": endpoint{},
840 "sa-east-1": endpoint{},
841 "us-east-1": endpoint{},
842 "us-east-2": endpoint{},
843 "us-west-1": endpoint{},
844 "us-west-2": endpoint{},
845 },
846 },
603 "devicefarm": service{ 847 "devicefarm": service{
604 848
605 Endpoints: endpoints{ 849 Endpoints: endpoints{
@@ -618,6 +862,7 @@ var awsPartition = partition{
618 "eu-central-1": endpoint{}, 862 "eu-central-1": endpoint{},
619 "eu-west-1": endpoint{}, 863 "eu-west-1": endpoint{},
620 "eu-west-2": endpoint{}, 864 "eu-west-2": endpoint{},
865 "eu-west-3": endpoint{},
621 "sa-east-1": endpoint{}, 866 "sa-east-1": endpoint{},
622 "us-east-1": endpoint{}, 867 "us-east-1": endpoint{},
623 "us-east-2": endpoint{}, 868 "us-east-2": endpoint{},
@@ -643,6 +888,7 @@ var awsPartition = partition{
643 "eu-central-1": endpoint{}, 888 "eu-central-1": endpoint{},
644 "eu-west-1": endpoint{}, 889 "eu-west-1": endpoint{},
645 "eu-west-2": endpoint{}, 890 "eu-west-2": endpoint{},
891 "eu-west-3": endpoint{},
646 "sa-east-1": endpoint{}, 892 "sa-east-1": endpoint{},
647 "us-east-1": endpoint{}, 893 "us-east-1": endpoint{},
648 "us-east-2": endpoint{}, 894 "us-east-2": endpoint{},
@@ -655,14 +901,17 @@ var awsPartition = partition{
655 Endpoints: endpoints{ 901 Endpoints: endpoints{
656 "ap-northeast-1": endpoint{}, 902 "ap-northeast-1": endpoint{},
657 "ap-northeast-2": endpoint{}, 903 "ap-northeast-2": endpoint{},
904 "ap-south-1": endpoint{},
658 "ap-southeast-1": endpoint{}, 905 "ap-southeast-1": endpoint{},
659 "ap-southeast-2": endpoint{}, 906 "ap-southeast-2": endpoint{},
660 "ca-central-1": endpoint{}, 907 "ca-central-1": endpoint{},
661 "eu-central-1": endpoint{}, 908 "eu-central-1": endpoint{},
662 "eu-west-1": endpoint{}, 909 "eu-west-1": endpoint{},
663 "eu-west-2": endpoint{}, 910 "eu-west-2": endpoint{},
911 "sa-east-1": endpoint{},
664 "us-east-1": endpoint{}, 912 "us-east-1": endpoint{},
665 "us-east-2": endpoint{}, 913 "us-east-2": endpoint{},
914 "us-west-1": endpoint{},
666 "us-west-2": endpoint{}, 915 "us-west-2": endpoint{},
667 }, 916 },
668 }, 917 },
@@ -680,6 +929,7 @@ var awsPartition = partition{
680 "eu-central-1": endpoint{}, 929 "eu-central-1": endpoint{},
681 "eu-west-1": endpoint{}, 930 "eu-west-1": endpoint{},
682 "eu-west-2": endpoint{}, 931 "eu-west-2": endpoint{},
932 "eu-west-3": endpoint{},
683 "local": endpoint{ 933 "local": endpoint{
684 Hostname: "localhost:8000", 934 Hostname: "localhost:8000",
685 Protocols: []string{"http"}, 935 Protocols: []string{"http"},
@@ -708,6 +958,7 @@ var awsPartition = partition{
708 "eu-central-1": endpoint{}, 958 "eu-central-1": endpoint{},
709 "eu-west-1": endpoint{}, 959 "eu-west-1": endpoint{},
710 "eu-west-2": endpoint{}, 960 "eu-west-2": endpoint{},
961 "eu-west-3": endpoint{},
711 "sa-east-1": endpoint{}, 962 "sa-east-1": endpoint{},
712 "us-east-1": endpoint{}, 963 "us-east-1": endpoint{},
713 "us-east-2": endpoint{}, 964 "us-east-2": endpoint{},
@@ -730,12 +981,16 @@ var awsPartition = partition{
730 981
731 Endpoints: endpoints{ 982 Endpoints: endpoints{
732 "ap-northeast-1": endpoint{}, 983 "ap-northeast-1": endpoint{},
984 "ap-northeast-2": endpoint{},
985 "ap-south-1": endpoint{},
733 "ap-southeast-1": endpoint{}, 986 "ap-southeast-1": endpoint{},
734 "ap-southeast-2": endpoint{}, 987 "ap-southeast-2": endpoint{},
735 "ca-central-1": endpoint{}, 988 "ca-central-1": endpoint{},
736 "eu-central-1": endpoint{}, 989 "eu-central-1": endpoint{},
737 "eu-west-1": endpoint{}, 990 "eu-west-1": endpoint{},
738 "eu-west-2": endpoint{}, 991 "eu-west-2": endpoint{},
992 "eu-west-3": endpoint{},
993 "sa-east-1": endpoint{},
739 "us-east-1": endpoint{}, 994 "us-east-1": endpoint{},
740 "us-east-2": endpoint{}, 995 "us-east-2": endpoint{},
741 "us-west-1": endpoint{}, 996 "us-west-1": endpoint{},
@@ -746,12 +1001,16 @@ var awsPartition = partition{
746 1001
747 Endpoints: endpoints{ 1002 Endpoints: endpoints{
748 "ap-northeast-1": endpoint{}, 1003 "ap-northeast-1": endpoint{},
1004 "ap-northeast-2": endpoint{},
1005 "ap-south-1": endpoint{},
749 "ap-southeast-1": endpoint{}, 1006 "ap-southeast-1": endpoint{},
750 "ap-southeast-2": endpoint{}, 1007 "ap-southeast-2": endpoint{},
751 "ca-central-1": endpoint{}, 1008 "ca-central-1": endpoint{},
752 "eu-central-1": endpoint{}, 1009 "eu-central-1": endpoint{},
753 "eu-west-1": endpoint{}, 1010 "eu-west-1": endpoint{},
754 "eu-west-2": endpoint{}, 1011 "eu-west-2": endpoint{},
1012 "eu-west-3": endpoint{},
1013 "sa-east-1": endpoint{},
755 "us-east-1": endpoint{}, 1014 "us-east-1": endpoint{},
756 "us-east-2": endpoint{}, 1015 "us-east-2": endpoint{},
757 "us-west-1": endpoint{}, 1016 "us-west-1": endpoint{},
@@ -770,6 +1029,7 @@ var awsPartition = partition{
770 "eu-central-1": endpoint{}, 1029 "eu-central-1": endpoint{},
771 "eu-west-1": endpoint{}, 1030 "eu-west-1": endpoint{},
772 "eu-west-2": endpoint{}, 1031 "eu-west-2": endpoint{},
1032 "eu-west-3": endpoint{},
773 "sa-east-1": endpoint{}, 1033 "sa-east-1": endpoint{},
774 "us-east-1": endpoint{}, 1034 "us-east-1": endpoint{},
775 "us-east-2": endpoint{}, 1035 "us-east-2": endpoint{},
@@ -789,6 +1049,7 @@ var awsPartition = partition{
789 "eu-central-1": endpoint{}, 1049 "eu-central-1": endpoint{},
790 "eu-west-1": endpoint{}, 1050 "eu-west-1": endpoint{},
791 "eu-west-2": endpoint{}, 1051 "eu-west-2": endpoint{},
1052 "eu-west-3": endpoint{},
792 "sa-east-1": endpoint{}, 1053 "sa-east-1": endpoint{},
793 "us-east-1": endpoint{}, 1054 "us-east-1": endpoint{},
794 "us-east-2": endpoint{}, 1055 "us-east-2": endpoint{},
@@ -799,16 +1060,19 @@ var awsPartition = partition{
799 "elasticfilesystem": service{ 1060 "elasticfilesystem": service{
800 1061
801 Endpoints: endpoints{ 1062 Endpoints: endpoints{
1063 "ap-northeast-2": endpoint{},
802 "ap-southeast-2": endpoint{}, 1064 "ap-southeast-2": endpoint{},
1065 "eu-central-1": endpoint{},
803 "eu-west-1": endpoint{}, 1066 "eu-west-1": endpoint{},
804 "us-east-1": endpoint{}, 1067 "us-east-1": endpoint{},
805 "us-east-2": endpoint{}, 1068 "us-east-2": endpoint{},
1069 "us-west-1": endpoint{},
806 "us-west-2": endpoint{}, 1070 "us-west-2": endpoint{},
807 }, 1071 },
808 }, 1072 },
809 "elasticloadbalancing": service{ 1073 "elasticloadbalancing": service{
810 Defaults: endpoint{ 1074 Defaults: endpoint{
811 Protocols: []string{"http", "https"}, 1075 Protocols: []string{"https"},
812 }, 1076 },
813 Endpoints: endpoints{ 1077 Endpoints: endpoints{
814 "ap-northeast-1": endpoint{}, 1078 "ap-northeast-1": endpoint{},
@@ -820,6 +1084,7 @@ var awsPartition = partition{
820 "eu-central-1": endpoint{}, 1084 "eu-central-1": endpoint{},
821 "eu-west-1": endpoint{}, 1085 "eu-west-1": endpoint{},
822 "eu-west-2": endpoint{}, 1086 "eu-west-2": endpoint{},
1087 "eu-west-3": endpoint{},
823 "sa-east-1": endpoint{}, 1088 "sa-east-1": endpoint{},
824 "us-east-1": endpoint{}, 1089 "us-east-1": endpoint{},
825 "us-east-2": endpoint{}, 1090 "us-east-2": endpoint{},
@@ -844,6 +1109,7 @@ var awsPartition = partition{
844 }, 1109 },
845 "eu-west-1": endpoint{}, 1110 "eu-west-1": endpoint{},
846 "eu-west-2": endpoint{}, 1111 "eu-west-2": endpoint{},
1112 "eu-west-3": endpoint{},
847 "sa-east-1": endpoint{}, 1113 "sa-east-1": endpoint{},
848 "us-east-1": endpoint{ 1114 "us-east-1": endpoint{
849 SSLCommonName: "{service}.{region}.{dnsSuffix}", 1115 SSLCommonName: "{service}.{region}.{dnsSuffix}",
@@ -896,6 +1162,7 @@ var awsPartition = partition{
896 "eu-central-1": endpoint{}, 1162 "eu-central-1": endpoint{},
897 "eu-west-1": endpoint{}, 1163 "eu-west-1": endpoint{},
898 "eu-west-2": endpoint{}, 1164 "eu-west-2": endpoint{},
1165 "eu-west-3": endpoint{},
899 "sa-east-1": endpoint{}, 1166 "sa-east-1": endpoint{},
900 "us-east-1": endpoint{}, 1167 "us-east-1": endpoint{},
901 "us-east-2": endpoint{}, 1168 "us-east-2": endpoint{},
@@ -915,6 +1182,7 @@ var awsPartition = partition{
915 "eu-central-1": endpoint{}, 1182 "eu-central-1": endpoint{},
916 "eu-west-1": endpoint{}, 1183 "eu-west-1": endpoint{},
917 "eu-west-2": endpoint{}, 1184 "eu-west-2": endpoint{},
1185 "eu-west-3": endpoint{},
918 "sa-east-1": endpoint{}, 1186 "sa-east-1": endpoint{},
919 "us-east-1": endpoint{}, 1187 "us-east-1": endpoint{},
920 "us-east-2": endpoint{}, 1188 "us-east-2": endpoint{},
@@ -925,6 +1193,24 @@ var awsPartition = partition{
925 "firehose": service{ 1193 "firehose": service{
926 1194
927 Endpoints: endpoints{ 1195 Endpoints: endpoints{
1196 "ap-northeast-1": endpoint{},
1197 "ap-northeast-2": endpoint{},
1198 "ap-southeast-1": endpoint{},
1199 "ap-southeast-2": endpoint{},
1200 "ca-central-1": endpoint{},
1201 "eu-central-1": endpoint{},
1202 "eu-west-1": endpoint{},
1203 "us-east-1": endpoint{},
1204 "us-east-2": endpoint{},
1205 "us-west-1": endpoint{},
1206 "us-west-2": endpoint{},
1207 },
1208 },
1209 "fms": service{
1210 Defaults: endpoint{
1211 Protocols: []string{"https"},
1212 },
1213 Endpoints: endpoints{
928 "eu-west-1": endpoint{}, 1214 "eu-west-1": endpoint{},
929 "us-east-1": endpoint{}, 1215 "us-east-1": endpoint{},
930 "us-west-2": endpoint{}, 1216 "us-west-2": endpoint{},
@@ -937,10 +1223,15 @@ var awsPartition = partition{
937 "ap-northeast-2": endpoint{}, 1223 "ap-northeast-2": endpoint{},
938 "ap-south-1": endpoint{}, 1224 "ap-south-1": endpoint{},
939 "ap-southeast-1": endpoint{}, 1225 "ap-southeast-1": endpoint{},
1226 "ap-southeast-2": endpoint{},
1227 "ca-central-1": endpoint{},
940 "eu-central-1": endpoint{}, 1228 "eu-central-1": endpoint{},
941 "eu-west-1": endpoint{}, 1229 "eu-west-1": endpoint{},
1230 "eu-west-2": endpoint{},
942 "sa-east-1": endpoint{}, 1231 "sa-east-1": endpoint{},
943 "us-east-1": endpoint{}, 1232 "us-east-1": endpoint{},
1233 "us-east-2": endpoint{},
1234 "us-west-1": endpoint{},
944 "us-west-2": endpoint{}, 1235 "us-west-2": endpoint{},
945 }, 1236 },
946 }, 1237 },
@@ -952,11 +1243,65 @@ var awsPartition = partition{
952 "ap-northeast-1": endpoint{}, 1243 "ap-northeast-1": endpoint{},
953 "ap-northeast-2": endpoint{}, 1244 "ap-northeast-2": endpoint{},
954 "ap-south-1": endpoint{}, 1245 "ap-south-1": endpoint{},
1246 "ap-southeast-1": endpoint{},
955 "ap-southeast-2": endpoint{}, 1247 "ap-southeast-2": endpoint{},
956 "ca-central-1": endpoint{}, 1248 "ca-central-1": endpoint{},
957 "eu-central-1": endpoint{}, 1249 "eu-central-1": endpoint{},
958 "eu-west-1": endpoint{}, 1250 "eu-west-1": endpoint{},
959 "eu-west-2": endpoint{}, 1251 "eu-west-2": endpoint{},
1252 "eu-west-3": endpoint{},
1253 "us-east-1": endpoint{},
1254 "us-east-2": endpoint{},
1255 "us-west-1": endpoint{},
1256 "us-west-2": endpoint{},
1257 },
1258 },
1259 "glue": service{
1260
1261 Endpoints: endpoints{
1262 "ap-northeast-1": endpoint{},
1263 "ap-northeast-2": endpoint{},
1264 "ap-south-1": endpoint{},
1265 "ap-southeast-1": endpoint{},
1266 "ap-southeast-2": endpoint{},
1267 "eu-central-1": endpoint{},
1268 "eu-west-1": endpoint{},
1269 "eu-west-2": endpoint{},
1270 "us-east-1": endpoint{},
1271 "us-east-2": endpoint{},
1272 "us-west-2": endpoint{},
1273 },
1274 },
1275 "greengrass": service{
1276 IsRegionalized: boxedTrue,
1277 Defaults: endpoint{
1278 Protocols: []string{"https"},
1279 },
1280 Endpoints: endpoints{
1281 "ap-northeast-1": endpoint{},
1282 "ap-southeast-2": endpoint{},
1283 "eu-central-1": endpoint{},
1284 "us-east-1": endpoint{},
1285 "us-west-2": endpoint{},
1286 },
1287 },
1288 "guardduty": service{
1289 IsRegionalized: boxedTrue,
1290 Defaults: endpoint{
1291 Protocols: []string{"https"},
1292 },
1293 Endpoints: endpoints{
1294 "ap-northeast-1": endpoint{},
1295 "ap-northeast-2": endpoint{},
1296 "ap-south-1": endpoint{},
1297 "ap-southeast-1": endpoint{},
1298 "ap-southeast-2": endpoint{},
1299 "ca-central-1": endpoint{},
1300 "eu-central-1": endpoint{},
1301 "eu-west-1": endpoint{},
1302 "eu-west-2": endpoint{},
1303 "eu-west-3": endpoint{},
1304 "sa-east-1": endpoint{},
960 "us-east-1": endpoint{}, 1305 "us-east-1": endpoint{},
961 "us-east-2": endpoint{}, 1306 "us-east-2": endpoint{},
962 "us-west-1": endpoint{}, 1307 "us-west-1": endpoint{},
@@ -1004,8 +1349,11 @@ var awsPartition = partition{
1004 "ap-northeast-2": endpoint{}, 1349 "ap-northeast-2": endpoint{},
1005 "ap-south-1": endpoint{}, 1350 "ap-south-1": endpoint{},
1006 "ap-southeast-2": endpoint{}, 1351 "ap-southeast-2": endpoint{},
1352 "eu-central-1": endpoint{},
1007 "eu-west-1": endpoint{}, 1353 "eu-west-1": endpoint{},
1008 "us-east-1": endpoint{}, 1354 "us-east-1": endpoint{},
1355 "us-east-2": endpoint{},
1356 "us-west-1": endpoint{},
1009 "us-west-2": endpoint{}, 1357 "us-west-2": endpoint{},
1010 }, 1358 },
1011 }, 1359 },
@@ -1018,6 +1366,7 @@ var awsPartition = partition{
1018 Endpoints: endpoints{ 1366 Endpoints: endpoints{
1019 "ap-northeast-1": endpoint{}, 1367 "ap-northeast-1": endpoint{},
1020 "ap-northeast-2": endpoint{}, 1368 "ap-northeast-2": endpoint{},
1369 "ap-south-1": endpoint{},
1021 "ap-southeast-1": endpoint{}, 1370 "ap-southeast-1": endpoint{},
1022 "ap-southeast-2": endpoint{}, 1371 "ap-southeast-2": endpoint{},
1023 "eu-central-1": endpoint{}, 1372 "eu-central-1": endpoint{},
@@ -1040,6 +1389,7 @@ var awsPartition = partition{
1040 "eu-central-1": endpoint{}, 1389 "eu-central-1": endpoint{},
1041 "eu-west-1": endpoint{}, 1390 "eu-west-1": endpoint{},
1042 "eu-west-2": endpoint{}, 1391 "eu-west-2": endpoint{},
1392 "eu-west-3": endpoint{},
1043 "sa-east-1": endpoint{}, 1393 "sa-east-1": endpoint{},
1044 "us-east-1": endpoint{}, 1394 "us-east-1": endpoint{},
1045 "us-east-2": endpoint{}, 1395 "us-east-2": endpoint{},
@@ -1055,6 +1405,16 @@ var awsPartition = partition{
1055 "us-west-2": endpoint{}, 1405 "us-west-2": endpoint{},
1056 }, 1406 },
1057 }, 1407 },
1408 "kinesisvideo": service{
1409
1410 Endpoints: endpoints{
1411 "ap-northeast-1": endpoint{},
1412 "eu-central-1": endpoint{},
1413 "eu-west-1": endpoint{},
1414 "us-east-1": endpoint{},
1415 "us-west-2": endpoint{},
1416 },
1417 },
1058 "kms": service{ 1418 "kms": service{
1059 1419
1060 Endpoints: endpoints{ 1420 Endpoints: endpoints{
@@ -1067,6 +1427,7 @@ var awsPartition = partition{
1067 "eu-central-1": endpoint{}, 1427 "eu-central-1": endpoint{},
1068 "eu-west-1": endpoint{}, 1428 "eu-west-1": endpoint{},
1069 "eu-west-2": endpoint{}, 1429 "eu-west-2": endpoint{},
1430 "eu-west-3": endpoint{},
1070 "sa-east-1": endpoint{}, 1431 "sa-east-1": endpoint{},
1071 "us-east-1": endpoint{}, 1432 "us-east-1": endpoint{},
1072 "us-east-2": endpoint{}, 1433 "us-east-2": endpoint{},
@@ -1082,9 +1443,12 @@ var awsPartition = partition{
1082 "ap-south-1": endpoint{}, 1443 "ap-south-1": endpoint{},
1083 "ap-southeast-1": endpoint{}, 1444 "ap-southeast-1": endpoint{},
1084 "ap-southeast-2": endpoint{}, 1445 "ap-southeast-2": endpoint{},
1446 "ca-central-1": endpoint{},
1085 "eu-central-1": endpoint{}, 1447 "eu-central-1": endpoint{},
1086 "eu-west-1": endpoint{}, 1448 "eu-west-1": endpoint{},
1087 "eu-west-2": endpoint{}, 1449 "eu-west-2": endpoint{},
1450 "eu-west-3": endpoint{},
1451 "sa-east-1": endpoint{},
1088 "us-east-1": endpoint{}, 1452 "us-east-1": endpoint{},
1089 "us-east-2": endpoint{}, 1453 "us-east-2": endpoint{},
1090 "us-west-1": endpoint{}, 1454 "us-west-1": endpoint{},
@@ -1095,12 +1459,15 @@ var awsPartition = partition{
1095 1459
1096 Endpoints: endpoints{ 1460 Endpoints: endpoints{
1097 "ap-northeast-1": endpoint{}, 1461 "ap-northeast-1": endpoint{},
1462 "ap-northeast-2": endpoint{},
1098 "ap-south-1": endpoint{}, 1463 "ap-south-1": endpoint{},
1099 "ap-southeast-1": endpoint{}, 1464 "ap-southeast-1": endpoint{},
1100 "ap-southeast-2": endpoint{}, 1465 "ap-southeast-2": endpoint{},
1466 "ca-central-1": endpoint{},
1101 "eu-central-1": endpoint{}, 1467 "eu-central-1": endpoint{},
1102 "eu-west-1": endpoint{}, 1468 "eu-west-1": endpoint{},
1103 "eu-west-2": endpoint{}, 1469 "eu-west-2": endpoint{},
1470 "eu-west-3": endpoint{},
1104 "us-east-1": endpoint{}, 1471 "us-east-1": endpoint{},
1105 "us-east-2": endpoint{}, 1472 "us-east-2": endpoint{},
1106 "us-west-2": endpoint{}, 1473 "us-west-2": endpoint{},
@@ -1118,6 +1485,7 @@ var awsPartition = partition{
1118 "eu-central-1": endpoint{}, 1485 "eu-central-1": endpoint{},
1119 "eu-west-1": endpoint{}, 1486 "eu-west-1": endpoint{},
1120 "eu-west-2": endpoint{}, 1487 "eu-west-2": endpoint{},
1488 "eu-west-3": endpoint{},
1121 "sa-east-1": endpoint{}, 1489 "sa-east-1": endpoint{},
1122 "us-east-1": endpoint{}, 1490 "us-east-1": endpoint{},
1123 "us-east-2": endpoint{}, 1491 "us-east-2": endpoint{},
@@ -1138,6 +1506,65 @@ var awsPartition = partition{
1138 "us-east-1": endpoint{}, 1506 "us-east-1": endpoint{},
1139 }, 1507 },
1140 }, 1508 },
1509 "mediaconvert": service{
1510
1511 Endpoints: endpoints{
1512 "ap-northeast-1": endpoint{},
1513 "ap-northeast-2": endpoint{},
1514 "ap-south-1": endpoint{},
1515 "ap-southeast-1": endpoint{},
1516 "ap-southeast-2": endpoint{},
1517 "ca-central-1": endpoint{},
1518 "eu-central-1": endpoint{},
1519 "eu-west-1": endpoint{},
1520 "eu-west-2": endpoint{},
1521 "sa-east-1": endpoint{},
1522 "us-east-1": endpoint{},
1523 "us-east-2": endpoint{},
1524 "us-west-1": endpoint{},
1525 "us-west-2": endpoint{},
1526 },
1527 },
1528 "medialive": service{
1529
1530 Endpoints: endpoints{
1531 "ap-northeast-1": endpoint{},
1532 "ap-northeast-2": endpoint{},
1533 "ap-southeast-1": endpoint{},
1534 "ap-southeast-2": endpoint{},
1535 "eu-central-1": endpoint{},
1536 "eu-west-1": endpoint{},
1537 "sa-east-1": endpoint{},
1538 "us-east-1": endpoint{},
1539 "us-west-2": endpoint{},
1540 },
1541 },
1542 "mediapackage": service{
1543
1544 Endpoints: endpoints{
1545 "ap-northeast-1": endpoint{},
1546 "ap-northeast-2": endpoint{},
1547 "ap-southeast-1": endpoint{},
1548 "ap-southeast-2": endpoint{},
1549 "eu-central-1": endpoint{},
1550 "eu-west-1": endpoint{},
1551 "eu-west-3": endpoint{},
1552 "sa-east-1": endpoint{},
1553 "us-east-1": endpoint{},
1554 "us-west-2": endpoint{},
1555 },
1556 },
1557 "mediastore": service{
1558
1559 Endpoints: endpoints{
1560 "ap-northeast-1": endpoint{},
1561 "ap-southeast-2": endpoint{},
1562 "eu-central-1": endpoint{},
1563 "eu-west-1": endpoint{},
1564 "us-east-1": endpoint{},
1565 "us-west-2": endpoint{},
1566 },
1567 },
1141 "metering.marketplace": service{ 1568 "metering.marketplace": service{
1142 Defaults: endpoint{ 1569 Defaults: endpoint{
1143 CredentialScope: credentialScope{ 1570 CredentialScope: credentialScope{
@@ -1154,6 +1581,7 @@ var awsPartition = partition{
1154 "eu-central-1": endpoint{}, 1581 "eu-central-1": endpoint{},
1155 "eu-west-1": endpoint{}, 1582 "eu-west-1": endpoint{},
1156 "eu-west-2": endpoint{}, 1583 "eu-west-2": endpoint{},
1584 "eu-west-3": endpoint{},
1157 "sa-east-1": endpoint{}, 1585 "sa-east-1": endpoint{},
1158 "us-east-1": endpoint{}, 1586 "us-east-1": endpoint{},
1159 "us-east-2": endpoint{}, 1587 "us-east-2": endpoint{},
@@ -1161,6 +1589,12 @@ var awsPartition = partition{
1161 "us-west-2": endpoint{}, 1589 "us-west-2": endpoint{},
1162 }, 1590 },
1163 }, 1591 },
1592 "mgh": service{
1593
1594 Endpoints: endpoints{
1595 "us-west-2": endpoint{},
1596 },
1597 },
1164 "mobileanalytics": service{ 1598 "mobileanalytics": service{
1165 1599
1166 Endpoints: endpoints{ 1600 Endpoints: endpoints{
@@ -1174,7 +1608,9 @@ var awsPartition = partition{
1174 }, 1608 },
1175 }, 1609 },
1176 Endpoints: endpoints{ 1610 Endpoints: endpoints{
1611 "eu-west-1": endpoint{},
1177 "us-east-1": endpoint{}, 1612 "us-east-1": endpoint{},
1613 "us-west-2": endpoint{},
1178 }, 1614 },
1179 }, 1615 },
1180 "monitoring": service{ 1616 "monitoring": service{
@@ -1191,6 +1627,7 @@ var awsPartition = partition{
1191 "eu-central-1": endpoint{}, 1627 "eu-central-1": endpoint{},
1192 "eu-west-1": endpoint{}, 1628 "eu-west-1": endpoint{},
1193 "eu-west-2": endpoint{}, 1629 "eu-west-2": endpoint{},
1630 "eu-west-3": endpoint{},
1194 "sa-east-1": endpoint{}, 1631 "sa-east-1": endpoint{},
1195 "us-east-1": endpoint{}, 1632 "us-east-1": endpoint{},
1196 "us-east-2": endpoint{}, 1633 "us-east-2": endpoint{},
@@ -1208,6 +1645,35 @@ var awsPartition = partition{
1208 "us-east-1": endpoint{}, 1645 "us-east-1": endpoint{},
1209 }, 1646 },
1210 }, 1647 },
1648 "neptune": service{
1649
1650 Endpoints: endpoints{
1651 "eu-west-1": endpoint{
1652 Hostname: "rds.eu-west-1.amazonaws.com",
1653 CredentialScope: credentialScope{
1654 Region: "eu-west-1",
1655 },
1656 },
1657 "us-east-1": endpoint{
1658 Hostname: "rds.us-east-1.amazonaws.com",
1659 CredentialScope: credentialScope{
1660 Region: "us-east-1",
1661 },
1662 },
1663 "us-east-2": endpoint{
1664 Hostname: "rds.us-east-2.amazonaws.com",
1665 CredentialScope: credentialScope{
1666 Region: "us-east-2",
1667 },
1668 },
1669 "us-west-2": endpoint{
1670 Hostname: "rds.us-west-2.amazonaws.com",
1671 CredentialScope: credentialScope{
1672 Region: "us-west-2",
1673 },
1674 },
1675 },
1676 },
1211 "opsworks": service{ 1677 "opsworks": service{
1212 1678
1213 Endpoints: endpoints{ 1679 Endpoints: endpoints{
@@ -1216,9 +1682,11 @@ var awsPartition = partition{
1216 "ap-south-1": endpoint{}, 1682 "ap-south-1": endpoint{},
1217 "ap-southeast-1": endpoint{}, 1683 "ap-southeast-1": endpoint{},
1218 "ap-southeast-2": endpoint{}, 1684 "ap-southeast-2": endpoint{},
1685 "ca-central-1": endpoint{},
1219 "eu-central-1": endpoint{}, 1686 "eu-central-1": endpoint{},
1220 "eu-west-1": endpoint{}, 1687 "eu-west-1": endpoint{},
1221 "eu-west-2": endpoint{}, 1688 "eu-west-2": endpoint{},
1689 "eu-west-3": endpoint{},
1222 "sa-east-1": endpoint{}, 1690 "sa-east-1": endpoint{},
1223 "us-east-1": endpoint{}, 1691 "us-east-1": endpoint{},
1224 "us-east-2": endpoint{}, 1692 "us-east-2": endpoint{},
@@ -1229,9 +1697,15 @@ var awsPartition = partition{
1229 "opsworks-cm": service{ 1697 "opsworks-cm": service{
1230 1698
1231 Endpoints: endpoints{ 1699 Endpoints: endpoints{
1232 "eu-west-1": endpoint{}, 1700 "ap-northeast-1": endpoint{},
1233 "us-east-1": endpoint{}, 1701 "ap-southeast-1": endpoint{},
1234 "us-west-2": endpoint{}, 1702 "ap-southeast-2": endpoint{},
1703 "eu-central-1": endpoint{},
1704 "eu-west-1": endpoint{},
1705 "us-east-1": endpoint{},
1706 "us-east-2": endpoint{},
1707 "us-west-1": endpoint{},
1708 "us-west-2": endpoint{},
1235 }, 1709 },
1236 }, 1710 },
1237 "organizations": service{ 1711 "organizations": service{
@@ -1260,10 +1734,21 @@ var awsPartition = partition{
1260 "polly": service{ 1734 "polly": service{
1261 1735
1262 Endpoints: endpoints{ 1736 Endpoints: endpoints{
1263 "eu-west-1": endpoint{}, 1737 "ap-northeast-1": endpoint{},
1264 "us-east-1": endpoint{}, 1738 "ap-northeast-2": endpoint{},
1265 "us-east-2": endpoint{}, 1739 "ap-south-1": endpoint{},
1266 "us-west-2": endpoint{}, 1740 "ap-southeast-1": endpoint{},
1741 "ap-southeast-2": endpoint{},
1742 "ca-central-1": endpoint{},
1743 "eu-central-1": endpoint{},
1744 "eu-west-1": endpoint{},
1745 "eu-west-2": endpoint{},
1746 "eu-west-3": endpoint{},
1747 "sa-east-1": endpoint{},
1748 "us-east-1": endpoint{},
1749 "us-east-2": endpoint{},
1750 "us-west-1": endpoint{},
1751 "us-west-2": endpoint{},
1267 }, 1752 },
1268 }, 1753 },
1269 "rds": service{ 1754 "rds": service{
@@ -1278,6 +1763,7 @@ var awsPartition = partition{
1278 "eu-central-1": endpoint{}, 1763 "eu-central-1": endpoint{},
1279 "eu-west-1": endpoint{}, 1764 "eu-west-1": endpoint{},
1280 "eu-west-2": endpoint{}, 1765 "eu-west-2": endpoint{},
1766 "eu-west-3": endpoint{},
1281 "sa-east-1": endpoint{}, 1767 "sa-east-1": endpoint{},
1282 "us-east-1": endpoint{ 1768 "us-east-1": endpoint{
1283 SSLCommonName: "{service}.{dnsSuffix}", 1769 SSLCommonName: "{service}.{dnsSuffix}",
@@ -1299,6 +1785,7 @@ var awsPartition = partition{
1299 "eu-central-1": endpoint{}, 1785 "eu-central-1": endpoint{},
1300 "eu-west-1": endpoint{}, 1786 "eu-west-1": endpoint{},
1301 "eu-west-2": endpoint{}, 1787 "eu-west-2": endpoint{},
1788 "eu-west-3": endpoint{},
1302 "sa-east-1": endpoint{}, 1789 "sa-east-1": endpoint{},
1303 "us-east-1": endpoint{}, 1790 "us-east-1": endpoint{},
1304 "us-east-2": endpoint{}, 1791 "us-east-2": endpoint{},
@@ -1309,9 +1796,31 @@ var awsPartition = partition{
1309 "rekognition": service{ 1796 "rekognition": service{
1310 1797
1311 Endpoints: endpoints{ 1798 Endpoints: endpoints{
1312 "eu-west-1": endpoint{}, 1799 "ap-northeast-1": endpoint{},
1313 "us-east-1": endpoint{}, 1800 "ap-southeast-2": endpoint{},
1314 "us-west-2": endpoint{}, 1801 "eu-west-1": endpoint{},
1802 "us-east-1": endpoint{},
1803 "us-east-2": endpoint{},
1804 "us-west-2": endpoint{},
1805 },
1806 },
1807 "resource-groups": service{
1808
1809 Endpoints: endpoints{
1810 "ap-northeast-1": endpoint{},
1811 "ap-northeast-2": endpoint{},
1812 "ap-south-1": endpoint{},
1813 "ap-southeast-1": endpoint{},
1814 "ap-southeast-2": endpoint{},
1815 "ca-central-1": endpoint{},
1816 "eu-central-1": endpoint{},
1817 "eu-west-1": endpoint{},
1818 "eu-west-2": endpoint{},
1819 "sa-east-1": endpoint{},
1820 "us-east-1": endpoint{},
1821 "us-east-2": endpoint{},
1822 "us-west-1": endpoint{},
1823 "us-west-2": endpoint{},
1315 }, 1824 },
1316 }, 1825 },
1317 "route53": service{ 1826 "route53": service{
@@ -1340,7 +1849,19 @@ var awsPartition = partition{
1340 }, 1849 },
1341 }, 1850 },
1342 Endpoints: endpoints{ 1851 Endpoints: endpoints{
1852 "eu-west-1": endpoint{},
1343 "us-east-1": endpoint{}, 1853 "us-east-1": endpoint{},
1854 "us-west-2": endpoint{},
1855 },
1856 },
1857 "runtime.sagemaker": service{
1858
1859 Endpoints: endpoints{
1860 "ap-northeast-1": endpoint{},
1861 "eu-west-1": endpoint{},
1862 "us-east-1": endpoint{},
1863 "us-east-2": endpoint{},
1864 "us-west-2": endpoint{},
1344 }, 1865 },
1345 }, 1866 },
1346 "s3": service{ 1867 "s3": service{
@@ -1355,26 +1876,27 @@ var awsPartition = partition{
1355 }, 1876 },
1356 Endpoints: endpoints{ 1877 Endpoints: endpoints{
1357 "ap-northeast-1": endpoint{ 1878 "ap-northeast-1": endpoint{
1358 Hostname: "s3-ap-northeast-1.amazonaws.com", 1879 Hostname: "s3.ap-northeast-1.amazonaws.com",
1359 SignatureVersions: []string{"s3", "s3v4"}, 1880 SignatureVersions: []string{"s3", "s3v4"},
1360 }, 1881 },
1361 "ap-northeast-2": endpoint{}, 1882 "ap-northeast-2": endpoint{},
1362 "ap-south-1": endpoint{}, 1883 "ap-south-1": endpoint{},
1363 "ap-southeast-1": endpoint{ 1884 "ap-southeast-1": endpoint{
1364 Hostname: "s3-ap-southeast-1.amazonaws.com", 1885 Hostname: "s3.ap-southeast-1.amazonaws.com",
1365 SignatureVersions: []string{"s3", "s3v4"}, 1886 SignatureVersions: []string{"s3", "s3v4"},
1366 }, 1887 },
1367 "ap-southeast-2": endpoint{ 1888 "ap-southeast-2": endpoint{
1368 Hostname: "s3-ap-southeast-2.amazonaws.com", 1889 Hostname: "s3.ap-southeast-2.amazonaws.com",
1369 SignatureVersions: []string{"s3", "s3v4"}, 1890 SignatureVersions: []string{"s3", "s3v4"},
1370 }, 1891 },
1371 "ca-central-1": endpoint{}, 1892 "ca-central-1": endpoint{},
1372 "eu-central-1": endpoint{}, 1893 "eu-central-1": endpoint{},
1373 "eu-west-1": endpoint{ 1894 "eu-west-1": endpoint{
1374 Hostname: "s3-eu-west-1.amazonaws.com", 1895 Hostname: "s3.eu-west-1.amazonaws.com",
1375 SignatureVersions: []string{"s3", "s3v4"}, 1896 SignatureVersions: []string{"s3", "s3v4"},
1376 }, 1897 },
1377 "eu-west-2": endpoint{}, 1898 "eu-west-2": endpoint{},
1899 "eu-west-3": endpoint{},
1378 "s3-external-1": endpoint{ 1900 "s3-external-1": endpoint{
1379 Hostname: "s3-external-1.amazonaws.com", 1901 Hostname: "s3-external-1.amazonaws.com",
1380 SignatureVersions: []string{"s3", "s3v4"}, 1902 SignatureVersions: []string{"s3", "s3v4"},
@@ -1383,7 +1905,7 @@ var awsPartition = partition{
1383 }, 1905 },
1384 }, 1906 },
1385 "sa-east-1": endpoint{ 1907 "sa-east-1": endpoint{
1386 Hostname: "s3-sa-east-1.amazonaws.com", 1908 Hostname: "s3.sa-east-1.amazonaws.com",
1387 SignatureVersions: []string{"s3", "s3v4"}, 1909 SignatureVersions: []string{"s3", "s3v4"},
1388 }, 1910 },
1389 "us-east-1": endpoint{ 1911 "us-east-1": endpoint{
@@ -1392,15 +1914,26 @@ var awsPartition = partition{
1392 }, 1914 },
1393 "us-east-2": endpoint{}, 1915 "us-east-2": endpoint{},
1394 "us-west-1": endpoint{ 1916 "us-west-1": endpoint{
1395 Hostname: "s3-us-west-1.amazonaws.com", 1917 Hostname: "s3.us-west-1.amazonaws.com",
1396 SignatureVersions: []string{"s3", "s3v4"}, 1918 SignatureVersions: []string{"s3", "s3v4"},
1397 }, 1919 },
1398 "us-west-2": endpoint{ 1920 "us-west-2": endpoint{
1399 Hostname: "s3-us-west-2.amazonaws.com", 1921 Hostname: "s3.us-west-2.amazonaws.com",
1400 SignatureVersions: []string{"s3", "s3v4"}, 1922 SignatureVersions: []string{"s3", "s3v4"},
1401 }, 1923 },
1402 }, 1924 },
1403 }, 1925 },
1926 "sagemaker": service{
1927
1928 Endpoints: endpoints{
1929 "ap-northeast-1": endpoint{},
1930 "ap-northeast-2": endpoint{},
1931 "eu-west-1": endpoint{},
1932 "us-east-1": endpoint{},
1933 "us-east-2": endpoint{},
1934 "us-west-2": endpoint{},
1935 },
1936 },
1404 "sdb": service{ 1937 "sdb": service{
1405 Defaults: endpoint{ 1938 Defaults: endpoint{
1406 Protocols: []string{"http", "https"}, 1939 Protocols: []string{"http", "https"},
@@ -1419,21 +1952,104 @@ var awsPartition = partition{
1419 "us-west-2": endpoint{}, 1952 "us-west-2": endpoint{},
1420 }, 1953 },
1421 }, 1954 },
1955 "secretsmanager": service{
1956
1957 Endpoints: endpoints{
1958 "ap-northeast-1": endpoint{},
1959 "ap-northeast-2": endpoint{},
1960 "ap-south-1": endpoint{},
1961 "ap-southeast-1": endpoint{},
1962 "ap-southeast-2": endpoint{},
1963 "ca-central-1": endpoint{},
1964 "eu-central-1": endpoint{},
1965 "eu-west-1": endpoint{},
1966 "eu-west-2": endpoint{},
1967 "sa-east-1": endpoint{},
1968 "us-east-1": endpoint{},
1969 "us-east-2": endpoint{},
1970 "us-west-1": endpoint{},
1971 "us-west-2": endpoint{},
1972 },
1973 },
1974 "serverlessrepo": service{
1975 Defaults: endpoint{
1976 Protocols: []string{"https"},
1977 },
1978 Endpoints: endpoints{
1979 "ap-northeast-1": endpoint{
1980 Protocols: []string{"https"},
1981 },
1982 "ap-northeast-2": endpoint{
1983 Protocols: []string{"https"},
1984 },
1985 "ap-south-1": endpoint{
1986 Protocols: []string{"https"},
1987 },
1988 "ap-southeast-1": endpoint{
1989 Protocols: []string{"https"},
1990 },
1991 "ap-southeast-2": endpoint{
1992 Protocols: []string{"https"},
1993 },
1994 "ca-central-1": endpoint{
1995 Protocols: []string{"https"},
1996 },
1997 "eu-central-1": endpoint{
1998 Protocols: []string{"https"},
1999 },
2000 "eu-west-1": endpoint{
2001 Protocols: []string{"https"},
2002 },
2003 "eu-west-2": endpoint{
2004 Protocols: []string{"https"},
2005 },
2006 "sa-east-1": endpoint{
2007 Protocols: []string{"https"},
2008 },
2009 "us-east-1": endpoint{
2010 Protocols: []string{"https"},
2011 },
2012 "us-east-2": endpoint{
2013 Protocols: []string{"https"},
2014 },
2015 "us-west-1": endpoint{
2016 Protocols: []string{"https"},
2017 },
2018 "us-west-2": endpoint{
2019 Protocols: []string{"https"},
2020 },
2021 },
2022 },
1422 "servicecatalog": service{ 2023 "servicecatalog": service{
1423 2024
1424 Endpoints: endpoints{ 2025 Endpoints: endpoints{
1425 "ap-northeast-1": endpoint{}, 2026 "ap-northeast-1": endpoint{},
2027 "ap-northeast-2": endpoint{},
2028 "ap-south-1": endpoint{},
1426 "ap-southeast-1": endpoint{}, 2029 "ap-southeast-1": endpoint{},
1427 "ap-southeast-2": endpoint{}, 2030 "ap-southeast-2": endpoint{},
1428 "ca-central-1": endpoint{}, 2031 "ca-central-1": endpoint{},
1429 "eu-central-1": endpoint{}, 2032 "eu-central-1": endpoint{},
1430 "eu-west-1": endpoint{}, 2033 "eu-west-1": endpoint{},
1431 "eu-west-2": endpoint{}, 2034 "eu-west-2": endpoint{},
2035 "eu-west-3": endpoint{},
2036 "sa-east-1": endpoint{},
1432 "us-east-1": endpoint{}, 2037 "us-east-1": endpoint{},
1433 "us-east-2": endpoint{}, 2038 "us-east-2": endpoint{},
2039 "us-west-1": endpoint{},
1434 "us-west-2": endpoint{}, 2040 "us-west-2": endpoint{},
1435 }, 2041 },
1436 }, 2042 },
2043 "servicediscovery": service{
2044
2045 Endpoints: endpoints{
2046 "eu-west-1": endpoint{},
2047 "us-east-1": endpoint{},
2048 "us-east-2": endpoint{},
2049 "us-west-1": endpoint{},
2050 "us-west-2": endpoint{},
2051 },
2052 },
1437 "shield": service{ 2053 "shield": service{
1438 IsRegionalized: boxedFalse, 2054 IsRegionalized: boxedFalse,
1439 Defaults: endpoint{ 2055 Defaults: endpoint{
@@ -1447,19 +2063,36 @@ var awsPartition = partition{
1447 "sms": service{ 2063 "sms": service{
1448 2064
1449 Endpoints: endpoints{ 2065 Endpoints: endpoints{
2066 "ap-northeast-1": endpoint{},
2067 "ap-northeast-2": endpoint{},
2068 "ap-south-1": endpoint{},
2069 "ap-southeast-1": endpoint{},
1450 "ap-southeast-2": endpoint{}, 2070 "ap-southeast-2": endpoint{},
2071 "ca-central-1": endpoint{},
2072 "eu-central-1": endpoint{},
1451 "eu-west-1": endpoint{}, 2073 "eu-west-1": endpoint{},
2074 "eu-west-2": endpoint{},
2075 "eu-west-3": endpoint{},
2076 "sa-east-1": endpoint{},
1452 "us-east-1": endpoint{}, 2077 "us-east-1": endpoint{},
2078 "us-east-2": endpoint{},
2079 "us-west-1": endpoint{},
2080 "us-west-2": endpoint{},
1453 }, 2081 },
1454 }, 2082 },
1455 "snowball": service{ 2083 "snowball": service{
1456 2084
1457 Endpoints: endpoints{ 2085 Endpoints: endpoints{
2086 "ap-northeast-1": endpoint{},
1458 "ap-south-1": endpoint{}, 2087 "ap-south-1": endpoint{},
2088 "ap-southeast-1": endpoint{},
1459 "ap-southeast-2": endpoint{}, 2089 "ap-southeast-2": endpoint{},
2090 "ca-central-1": endpoint{},
1460 "eu-central-1": endpoint{}, 2091 "eu-central-1": endpoint{},
1461 "eu-west-1": endpoint{}, 2092 "eu-west-1": endpoint{},
1462 "eu-west-2": endpoint{}, 2093 "eu-west-2": endpoint{},
2094 "eu-west-3": endpoint{},
2095 "sa-east-1": endpoint{},
1463 "us-east-1": endpoint{}, 2096 "us-east-1": endpoint{},
1464 "us-east-2": endpoint{}, 2097 "us-east-2": endpoint{},
1465 "us-west-1": endpoint{}, 2098 "us-west-1": endpoint{},
@@ -1480,6 +2113,7 @@ var awsPartition = partition{
1480 "eu-central-1": endpoint{}, 2113 "eu-central-1": endpoint{},
1481 "eu-west-1": endpoint{}, 2114 "eu-west-1": endpoint{},
1482 "eu-west-2": endpoint{}, 2115 "eu-west-2": endpoint{},
2116 "eu-west-3": endpoint{},
1483 "sa-east-1": endpoint{}, 2117 "sa-east-1": endpoint{},
1484 "us-east-1": endpoint{}, 2118 "us-east-1": endpoint{},
1485 "us-east-2": endpoint{}, 2119 "us-east-2": endpoint{},
@@ -1502,7 +2136,32 @@ var awsPartition = partition{
1502 "eu-central-1": endpoint{}, 2136 "eu-central-1": endpoint{},
1503 "eu-west-1": endpoint{}, 2137 "eu-west-1": endpoint{},
1504 "eu-west-2": endpoint{}, 2138 "eu-west-2": endpoint{},
1505 "sa-east-1": endpoint{}, 2139 "eu-west-3": endpoint{},
2140 "fips-us-east-1": endpoint{
2141 Hostname: "sqs-fips.us-east-1.amazonaws.com",
2142 CredentialScope: credentialScope{
2143 Region: "us-east-1",
2144 },
2145 },
2146 "fips-us-east-2": endpoint{
2147 Hostname: "sqs-fips.us-east-2.amazonaws.com",
2148 CredentialScope: credentialScope{
2149 Region: "us-east-2",
2150 },
2151 },
2152 "fips-us-west-1": endpoint{
2153 Hostname: "sqs-fips.us-west-1.amazonaws.com",
2154 CredentialScope: credentialScope{
2155 Region: "us-west-1",
2156 },
2157 },
2158 "fips-us-west-2": endpoint{
2159 Hostname: "sqs-fips.us-west-2.amazonaws.com",
2160 CredentialScope: credentialScope{
2161 Region: "us-west-2",
2162 },
2163 },
2164 "sa-east-1": endpoint{},
1506 "us-east-1": endpoint{ 2165 "us-east-1": endpoint{
1507 SSLCommonName: "queue.{dnsSuffix}", 2166 SSLCommonName: "queue.{dnsSuffix}",
1508 }, 2167 },
@@ -1523,6 +2182,7 @@ var awsPartition = partition{
1523 "eu-central-1": endpoint{}, 2182 "eu-central-1": endpoint{},
1524 "eu-west-1": endpoint{}, 2183 "eu-west-1": endpoint{},
1525 "eu-west-2": endpoint{}, 2184 "eu-west-2": endpoint{},
2185 "eu-west-3": endpoint{},
1526 "sa-east-1": endpoint{}, 2186 "sa-east-1": endpoint{},
1527 "us-east-1": endpoint{}, 2187 "us-east-1": endpoint{},
1528 "us-east-2": endpoint{}, 2188 "us-east-2": endpoint{},
@@ -1534,10 +2194,16 @@ var awsPartition = partition{
1534 2194
1535 Endpoints: endpoints{ 2195 Endpoints: endpoints{
1536 "ap-northeast-1": endpoint{}, 2196 "ap-northeast-1": endpoint{},
2197 "ap-northeast-2": endpoint{},
2198 "ap-southeast-1": endpoint{},
2199 "ap-southeast-2": endpoint{},
2200 "ca-central-1": endpoint{},
1537 "eu-central-1": endpoint{}, 2201 "eu-central-1": endpoint{},
1538 "eu-west-1": endpoint{}, 2202 "eu-west-1": endpoint{},
2203 "eu-west-2": endpoint{},
1539 "us-east-1": endpoint{}, 2204 "us-east-1": endpoint{},
1540 "us-east-2": endpoint{}, 2205 "us-east-2": endpoint{},
2206 "us-west-1": endpoint{},
1541 "us-west-2": endpoint{}, 2207 "us-west-2": endpoint{},
1542 }, 2208 },
1543 }, 2209 },
@@ -1553,6 +2219,7 @@ var awsPartition = partition{
1553 "eu-central-1": endpoint{}, 2219 "eu-central-1": endpoint{},
1554 "eu-west-1": endpoint{}, 2220 "eu-west-1": endpoint{},
1555 "eu-west-2": endpoint{}, 2221 "eu-west-2": endpoint{},
2222 "eu-west-3": endpoint{},
1556 "sa-east-1": endpoint{}, 2223 "sa-east-1": endpoint{},
1557 "us-east-1": endpoint{}, 2224 "us-east-1": endpoint{},
1558 "us-east-2": endpoint{}, 2225 "us-east-2": endpoint{},
@@ -1577,6 +2244,7 @@ var awsPartition = partition{
1577 "eu-central-1": endpoint{}, 2244 "eu-central-1": endpoint{},
1578 "eu-west-1": endpoint{}, 2245 "eu-west-1": endpoint{},
1579 "eu-west-2": endpoint{}, 2246 "eu-west-2": endpoint{},
2247 "eu-west-3": endpoint{},
1580 "local": endpoint{ 2248 "local": endpoint{
1581 Hostname: "localhost:8000", 2249 Hostname: "localhost:8000",
1582 Protocols: []string{"http"}, 2250 Protocols: []string{"http"},
@@ -1615,6 +2283,7 @@ var awsPartition = partition{
1615 "eu-central-1": endpoint{}, 2283 "eu-central-1": endpoint{},
1616 "eu-west-1": endpoint{}, 2284 "eu-west-1": endpoint{},
1617 "eu-west-2": endpoint{}, 2285 "eu-west-2": endpoint{},
2286 "eu-west-3": endpoint{},
1618 "sa-east-1": endpoint{}, 2287 "sa-east-1": endpoint{},
1619 "us-east-1": endpoint{}, 2288 "us-east-1": endpoint{},
1620 "us-east-1-fips": endpoint{ 2289 "us-east-1-fips": endpoint{
@@ -1664,6 +2333,7 @@ var awsPartition = partition{
1664 "eu-central-1": endpoint{}, 2333 "eu-central-1": endpoint{},
1665 "eu-west-1": endpoint{}, 2334 "eu-west-1": endpoint{},
1666 "eu-west-2": endpoint{}, 2335 "eu-west-2": endpoint{},
2336 "eu-west-3": endpoint{},
1667 "sa-east-1": endpoint{}, 2337 "sa-east-1": endpoint{},
1668 "us-east-1": endpoint{}, 2338 "us-east-1": endpoint{},
1669 "us-east-2": endpoint{}, 2339 "us-east-2": endpoint{},
@@ -1683,6 +2353,7 @@ var awsPartition = partition{
1683 "eu-central-1": endpoint{}, 2353 "eu-central-1": endpoint{},
1684 "eu-west-1": endpoint{}, 2354 "eu-west-1": endpoint{},
1685 "eu-west-2": endpoint{}, 2355 "eu-west-2": endpoint{},
2356 "eu-west-3": endpoint{},
1686 "sa-east-1": endpoint{}, 2357 "sa-east-1": endpoint{},
1687 "us-east-1": endpoint{}, 2358 "us-east-1": endpoint{},
1688 "us-east-2": endpoint{}, 2359 "us-east-2": endpoint{},
@@ -1690,6 +2361,17 @@ var awsPartition = partition{
1690 "us-west-2": endpoint{}, 2361 "us-west-2": endpoint{},
1691 }, 2362 },
1692 }, 2363 },
2364 "translate": service{
2365 Defaults: endpoint{
2366 Protocols: []string{"https"},
2367 },
2368 Endpoints: endpoints{
2369 "eu-west-1": endpoint{},
2370 "us-east-1": endpoint{},
2371 "us-east-2": endpoint{},
2372 "us-west-2": endpoint{},
2373 },
2374 },
1693 "waf": service{ 2375 "waf": service{
1694 PartitionEndpoint: "aws-global", 2376 PartitionEndpoint: "aws-global",
1695 IsRegionalized: boxedFalse, 2377 IsRegionalized: boxedFalse,
@@ -1707,8 +2389,12 @@ var awsPartition = partition{
1707 2389
1708 Endpoints: endpoints{ 2390 Endpoints: endpoints{
1709 "ap-northeast-1": endpoint{}, 2391 "ap-northeast-1": endpoint{},
2392 "ap-southeast-2": endpoint{},
2393 "eu-central-1": endpoint{},
1710 "eu-west-1": endpoint{}, 2394 "eu-west-1": endpoint{},
1711 "us-east-1": endpoint{}, 2395 "us-east-1": endpoint{},
2396 "us-east-2": endpoint{},
2397 "us-west-1": endpoint{},
1712 "us-west-2": endpoint{}, 2398 "us-west-2": endpoint{},
1713 }, 2399 },
1714 }, 2400 },
@@ -1723,14 +2409,28 @@ var awsPartition = partition{
1723 "us-west-2": endpoint{}, 2409 "us-west-2": endpoint{},
1724 }, 2410 },
1725 }, 2411 },
2412 "workmail": service{
2413 Defaults: endpoint{
2414 Protocols: []string{"https"},
2415 },
2416 Endpoints: endpoints{
2417 "eu-west-1": endpoint{},
2418 "us-east-1": endpoint{},
2419 "us-west-2": endpoint{},
2420 },
2421 },
1726 "workspaces": service{ 2422 "workspaces": service{
1727 2423
1728 Endpoints: endpoints{ 2424 Endpoints: endpoints{
1729 "ap-northeast-1": endpoint{}, 2425 "ap-northeast-1": endpoint{},
2426 "ap-northeast-2": endpoint{},
1730 "ap-southeast-1": endpoint{}, 2427 "ap-southeast-1": endpoint{},
1731 "ap-southeast-2": endpoint{}, 2428 "ap-southeast-2": endpoint{},
2429 "ca-central-1": endpoint{},
1732 "eu-central-1": endpoint{}, 2430 "eu-central-1": endpoint{},
1733 "eu-west-1": endpoint{}, 2431 "eu-west-1": endpoint{},
2432 "eu-west-2": endpoint{},
2433 "sa-east-1": endpoint{},
1734 "us-east-1": endpoint{}, 2434 "us-east-1": endpoint{},
1735 "us-west-2": endpoint{}, 2435 "us-west-2": endpoint{},
1736 }, 2436 },
@@ -1781,44 +2481,86 @@ var awscnPartition = partition{
1781 "cn-north-1": region{ 2481 "cn-north-1": region{
1782 Description: "China (Beijing)", 2482 Description: "China (Beijing)",
1783 }, 2483 },
2484 "cn-northwest-1": region{
2485 Description: "China (Ningxia)",
2486 },
1784 }, 2487 },
1785 Services: services{ 2488 Services: services{
2489 "apigateway": service{
2490
2491 Endpoints: endpoints{
2492 "cn-north-1": endpoint{},
2493 "cn-northwest-1": endpoint{},
2494 },
2495 },
2496 "application-autoscaling": service{
2497 Defaults: endpoint{
2498 Hostname: "autoscaling.{region}.amazonaws.com",
2499 Protocols: []string{"http", "https"},
2500 CredentialScope: credentialScope{
2501 Service: "application-autoscaling",
2502 },
2503 },
2504 Endpoints: endpoints{
2505 "cn-north-1": endpoint{},
2506 "cn-northwest-1": endpoint{},
2507 },
2508 },
1786 "autoscaling": service{ 2509 "autoscaling": service{
1787 Defaults: endpoint{ 2510 Defaults: endpoint{
1788 Protocols: []string{"http", "https"}, 2511 Protocols: []string{"http", "https"},
1789 }, 2512 },
1790 Endpoints: endpoints{ 2513 Endpoints: endpoints{
1791 "cn-north-1": endpoint{}, 2514 "cn-north-1": endpoint{},
2515 "cn-northwest-1": endpoint{},
1792 }, 2516 },
1793 }, 2517 },
1794 "cloudformation": service{ 2518 "cloudformation": service{
1795 2519
1796 Endpoints: endpoints{ 2520 Endpoints: endpoints{
1797 "cn-north-1": endpoint{}, 2521 "cn-north-1": endpoint{},
2522 "cn-northwest-1": endpoint{},
1798 }, 2523 },
1799 }, 2524 },
1800 "cloudtrail": service{ 2525 "cloudtrail": service{
1801 2526
1802 Endpoints: endpoints{ 2527 Endpoints: endpoints{
1803 "cn-north-1": endpoint{}, 2528 "cn-north-1": endpoint{},
2529 "cn-northwest-1": endpoint{},
1804 }, 2530 },
1805 }, 2531 },
1806 "codedeploy": service{ 2532 "codedeploy": service{
1807 2533
1808 Endpoints: endpoints{ 2534 Endpoints: endpoints{
2535 "cn-north-1": endpoint{},
2536 "cn-northwest-1": endpoint{},
2537 },
2538 },
2539 "cognito-identity": service{
2540
2541 Endpoints: endpoints{
1809 "cn-north-1": endpoint{}, 2542 "cn-north-1": endpoint{},
1810 }, 2543 },
1811 }, 2544 },
1812 "config": service{ 2545 "config": service{
1813 2546
1814 Endpoints: endpoints{ 2547 Endpoints: endpoints{
1815 "cn-north-1": endpoint{}, 2548 "cn-north-1": endpoint{},
2549 "cn-northwest-1": endpoint{},
1816 }, 2550 },
1817 }, 2551 },
1818 "directconnect": service{ 2552 "directconnect": service{
1819 2553
1820 Endpoints: endpoints{ 2554 Endpoints: endpoints{
1821 "cn-north-1": endpoint{}, 2555 "cn-north-1": endpoint{},
2556 "cn-northwest-1": endpoint{},
2557 },
2558 },
2559 "ds": service{
2560
2561 Endpoints: endpoints{
2562 "cn-north-1": endpoint{},
2563 "cn-northwest-1": endpoint{},
1822 }, 2564 },
1823 }, 2565 },
1824 "dynamodb": service{ 2566 "dynamodb": service{
@@ -1826,7 +2568,8 @@ var awscnPartition = partition{
1826 Protocols: []string{"http", "https"}, 2568 Protocols: []string{"http", "https"},
1827 }, 2569 },
1828 Endpoints: endpoints{ 2570 Endpoints: endpoints{
1829 "cn-north-1": endpoint{}, 2571 "cn-north-1": endpoint{},
2572 "cn-northwest-1": endpoint{},
1830 }, 2573 },
1831 }, 2574 },
1832 "ec2": service{ 2575 "ec2": service{
@@ -1834,7 +2577,8 @@ var awscnPartition = partition{
1834 Protocols: []string{"http", "https"}, 2577 Protocols: []string{"http", "https"},
1835 }, 2578 },
1836 Endpoints: endpoints{ 2579 Endpoints: endpoints{
1837 "cn-north-1": endpoint{}, 2580 "cn-north-1": endpoint{},
2581 "cn-northwest-1": endpoint{},
1838 }, 2582 },
1839 }, 2583 },
1840 "ec2metadata": service{ 2584 "ec2metadata": service{
@@ -1848,24 +2592,41 @@ var awscnPartition = partition{
1848 }, 2592 },
1849 }, 2593 },
1850 }, 2594 },
2595 "ecr": service{
2596
2597 Endpoints: endpoints{
2598 "cn-north-1": endpoint{},
2599 "cn-northwest-1": endpoint{},
2600 },
2601 },
2602 "ecs": service{
2603
2604 Endpoints: endpoints{
2605 "cn-north-1": endpoint{},
2606 "cn-northwest-1": endpoint{},
2607 },
2608 },
1851 "elasticache": service{ 2609 "elasticache": service{
1852 2610
1853 Endpoints: endpoints{ 2611 Endpoints: endpoints{
1854 "cn-north-1": endpoint{}, 2612 "cn-north-1": endpoint{},
2613 "cn-northwest-1": endpoint{},
1855 }, 2614 },
1856 }, 2615 },
1857 "elasticbeanstalk": service{ 2616 "elasticbeanstalk": service{
1858 2617
1859 Endpoints: endpoints{ 2618 Endpoints: endpoints{
1860 "cn-north-1": endpoint{}, 2619 "cn-north-1": endpoint{},
2620 "cn-northwest-1": endpoint{},
1861 }, 2621 },
1862 }, 2622 },
1863 "elasticloadbalancing": service{ 2623 "elasticloadbalancing": service{
1864 Defaults: endpoint{ 2624 Defaults: endpoint{
1865 Protocols: []string{"http", "https"}, 2625 Protocols: []string{"https"},
1866 }, 2626 },
1867 Endpoints: endpoints{ 2627 Endpoints: endpoints{
1868 "cn-north-1": endpoint{}, 2628 "cn-north-1": endpoint{},
2629 "cn-northwest-1": endpoint{},
1869 }, 2630 },
1870 }, 2631 },
1871 "elasticmapreduce": service{ 2632 "elasticmapreduce": service{
@@ -1873,13 +2634,21 @@ var awscnPartition = partition{
1873 Protocols: []string{"http", "https"}, 2634 Protocols: []string{"http", "https"},
1874 }, 2635 },
1875 Endpoints: endpoints{ 2636 Endpoints: endpoints{
1876 "cn-north-1": endpoint{}, 2637 "cn-north-1": endpoint{},
2638 "cn-northwest-1": endpoint{},
2639 },
2640 },
2641 "es": service{
2642
2643 Endpoints: endpoints{
2644 "cn-northwest-1": endpoint{},
1877 }, 2645 },
1878 }, 2646 },
1879 "events": service{ 2647 "events": service{
1880 2648
1881 Endpoints: endpoints{ 2649 Endpoints: endpoints{
1882 "cn-north-1": endpoint{}, 2650 "cn-north-1": endpoint{},
2651 "cn-northwest-1": endpoint{},
1883 }, 2652 },
1884 }, 2653 },
1885 "glacier": service{ 2654 "glacier": service{
@@ -1887,7 +2656,8 @@ var awscnPartition = partition{
1887 Protocols: []string{"http", "https"}, 2656 Protocols: []string{"http", "https"},
1888 }, 2657 },
1889 Endpoints: endpoints{ 2658 Endpoints: endpoints{
1890 "cn-north-1": endpoint{}, 2659 "cn-north-1": endpoint{},
2660 "cn-northwest-1": endpoint{},
1891 }, 2661 },
1892 }, 2662 },
1893 "iam": service{ 2663 "iam": service{
@@ -1903,16 +2673,35 @@ var awscnPartition = partition{
1903 }, 2673 },
1904 }, 2674 },
1905 }, 2675 },
2676 "iot": service{
2677 Defaults: endpoint{
2678 CredentialScope: credentialScope{
2679 Service: "execute-api",
2680 },
2681 },
2682 Endpoints: endpoints{
2683 "cn-north-1": endpoint{},
2684 },
2685 },
1906 "kinesis": service{ 2686 "kinesis": service{
1907 2687
1908 Endpoints: endpoints{ 2688 Endpoints: endpoints{
1909 "cn-north-1": endpoint{}, 2689 "cn-north-1": endpoint{},
2690 "cn-northwest-1": endpoint{},
2691 },
2692 },
2693 "lambda": service{
2694
2695 Endpoints: endpoints{
2696 "cn-north-1": endpoint{},
2697 "cn-northwest-1": endpoint{},
1910 }, 2698 },
1911 }, 2699 },
1912 "logs": service{ 2700 "logs": service{
1913 2701
1914 Endpoints: endpoints{ 2702 Endpoints: endpoints{
1915 "cn-north-1": endpoint{}, 2703 "cn-north-1": endpoint{},
2704 "cn-northwest-1": endpoint{},
1916 }, 2705 },
1917 }, 2706 },
1918 "monitoring": service{ 2707 "monitoring": service{
@@ -1920,19 +2709,22 @@ var awscnPartition = partition{
1920 Protocols: []string{"http", "https"}, 2709 Protocols: []string{"http", "https"},
1921 }, 2710 },
1922 Endpoints: endpoints{ 2711 Endpoints: endpoints{
1923 "cn-north-1": endpoint{}, 2712 "cn-north-1": endpoint{},
2713 "cn-northwest-1": endpoint{},
1924 }, 2714 },
1925 }, 2715 },
1926 "rds": service{ 2716 "rds": service{
1927 2717
1928 Endpoints: endpoints{ 2718 Endpoints: endpoints{
1929 "cn-north-1": endpoint{}, 2719 "cn-north-1": endpoint{},
2720 "cn-northwest-1": endpoint{},
1930 }, 2721 },
1931 }, 2722 },
1932 "redshift": service{ 2723 "redshift": service{
1933 2724
1934 Endpoints: endpoints{ 2725 Endpoints: endpoints{
1935 "cn-north-1": endpoint{}, 2726 "cn-north-1": endpoint{},
2727 "cn-northwest-1": endpoint{},
1936 }, 2728 },
1937 }, 2729 },
1938 "s3": service{ 2730 "s3": service{
@@ -1941,6 +2733,20 @@ var awscnPartition = partition{
1941 SignatureVersions: []string{"s3v4"}, 2733 SignatureVersions: []string{"s3v4"},
1942 }, 2734 },
1943 Endpoints: endpoints{ 2735 Endpoints: endpoints{
2736 "cn-north-1": endpoint{},
2737 "cn-northwest-1": endpoint{},
2738 },
2739 },
2740 "sms": service{
2741
2742 Endpoints: endpoints{
2743 "cn-north-1": endpoint{},
2744 "cn-northwest-1": endpoint{},
2745 },
2746 },
2747 "snowball": service{
2748
2749 Endpoints: endpoints{
1944 "cn-north-1": endpoint{}, 2750 "cn-north-1": endpoint{},
1945 }, 2751 },
1946 }, 2752 },
@@ -1949,7 +2755,8 @@ var awscnPartition = partition{
1949 Protocols: []string{"http", "https"}, 2755 Protocols: []string{"http", "https"},
1950 }, 2756 },
1951 Endpoints: endpoints{ 2757 Endpoints: endpoints{
1952 "cn-north-1": endpoint{}, 2758 "cn-north-1": endpoint{},
2759 "cn-northwest-1": endpoint{},
1953 }, 2760 },
1954 }, 2761 },
1955 "sqs": service{ 2762 "sqs": service{
@@ -1958,7 +2765,15 @@ var awscnPartition = partition{
1958 Protocols: []string{"http", "https"}, 2765 Protocols: []string{"http", "https"},
1959 }, 2766 },
1960 Endpoints: endpoints{ 2767 Endpoints: endpoints{
1961 "cn-north-1": endpoint{}, 2768 "cn-north-1": endpoint{},
2769 "cn-northwest-1": endpoint{},
2770 },
2771 },
2772 "ssm": service{
2773
2774 Endpoints: endpoints{
2775 "cn-north-1": endpoint{},
2776 "cn-northwest-1": endpoint{},
1962 }, 2777 },
1963 }, 2778 },
1964 "storagegateway": service{ 2779 "storagegateway": service{
@@ -1975,25 +2790,29 @@ var awscnPartition = partition{
1975 }, 2790 },
1976 }, 2791 },
1977 Endpoints: endpoints{ 2792 Endpoints: endpoints{
1978 "cn-north-1": endpoint{}, 2793 "cn-north-1": endpoint{},
2794 "cn-northwest-1": endpoint{},
1979 }, 2795 },
1980 }, 2796 },
1981 "sts": service{ 2797 "sts": service{
1982 2798
1983 Endpoints: endpoints{ 2799 Endpoints: endpoints{
1984 "cn-north-1": endpoint{}, 2800 "cn-north-1": endpoint{},
2801 "cn-northwest-1": endpoint{},
1985 }, 2802 },
1986 }, 2803 },
1987 "swf": service{ 2804 "swf": service{
1988 2805
1989 Endpoints: endpoints{ 2806 Endpoints: endpoints{
1990 "cn-north-1": endpoint{}, 2807 "cn-north-1": endpoint{},
2808 "cn-northwest-1": endpoint{},
1991 }, 2809 },
1992 }, 2810 },
1993 "tagging": service{ 2811 "tagging": service{
1994 2812
1995 Endpoints: endpoints{ 2813 Endpoints: endpoints{
1996 "cn-north-1": endpoint{}, 2814 "cn-north-1": endpoint{},
2815 "cn-northwest-1": endpoint{},
1997 }, 2816 },
1998 }, 2817 },
1999 }, 2818 },
@@ -2025,6 +2844,18 @@ var awsusgovPartition = partition{
2025 }, 2844 },
2026 }, 2845 },
2027 Services: services{ 2846 Services: services{
2847 "acm": service{
2848
2849 Endpoints: endpoints{
2850 "us-gov-west-1": endpoint{},
2851 },
2852 },
2853 "apigateway": service{
2854
2855 Endpoints: endpoints{
2856 "us-gov-west-1": endpoint{},
2857 },
2858 },
2028 "autoscaling": service{ 2859 "autoscaling": service{
2029 2860
2030 Endpoints: endpoints{ 2861 Endpoints: endpoints{
@@ -2045,6 +2876,16 @@ var awsusgovPartition = partition{
2045 "us-gov-west-1": endpoint{}, 2876 "us-gov-west-1": endpoint{},
2046 }, 2877 },
2047 }, 2878 },
2879 "cloudhsmv2": service{
2880 Defaults: endpoint{
2881 CredentialScope: credentialScope{
2882 Service: "cloudhsm",
2883 },
2884 },
2885 Endpoints: endpoints{
2886 "us-gov-west-1": endpoint{},
2887 },
2888 },
2048 "cloudtrail": service{ 2889 "cloudtrail": service{
2049 2890
2050 Endpoints: endpoints{ 2891 Endpoints: endpoints{
@@ -2069,10 +2910,22 @@ var awsusgovPartition = partition{
2069 "us-gov-west-1": endpoint{}, 2910 "us-gov-west-1": endpoint{},
2070 }, 2911 },
2071 }, 2912 },
2913 "dms": service{
2914
2915 Endpoints: endpoints{
2916 "us-gov-west-1": endpoint{},
2917 },
2918 },
2072 "dynamodb": service{ 2919 "dynamodb": service{
2073 2920
2074 Endpoints: endpoints{ 2921 Endpoints: endpoints{
2075 "us-gov-west-1": endpoint{}, 2922 "us-gov-west-1": endpoint{},
2923 "us-gov-west-1-fips": endpoint{
2924 Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
2925 CredentialScope: credentialScope{
2926 Region: "us-gov-west-1",
2927 },
2928 },
2076 }, 2929 },
2077 }, 2930 },
2078 "ec2": service{ 2931 "ec2": service{
@@ -2092,12 +2945,30 @@ var awsusgovPartition = partition{
2092 }, 2945 },
2093 }, 2946 },
2094 }, 2947 },
2948 "ecr": service{
2949
2950 Endpoints: endpoints{
2951 "us-gov-west-1": endpoint{},
2952 },
2953 },
2954 "ecs": service{
2955
2956 Endpoints: endpoints{
2957 "us-gov-west-1": endpoint{},
2958 },
2959 },
2095 "elasticache": service{ 2960 "elasticache": service{
2096 2961
2097 Endpoints: endpoints{ 2962 Endpoints: endpoints{
2098 "us-gov-west-1": endpoint{}, 2963 "us-gov-west-1": endpoint{},
2099 }, 2964 },
2100 }, 2965 },
2966 "elasticbeanstalk": service{
2967
2968 Endpoints: endpoints{
2969 "us-gov-west-1": endpoint{},
2970 },
2971 },
2101 "elasticloadbalancing": service{ 2972 "elasticloadbalancing": service{
2102 2973
2103 Endpoints: endpoints{ 2974 Endpoints: endpoints{
@@ -2114,6 +2985,12 @@ var awsusgovPartition = partition{
2114 }, 2985 },
2115 }, 2986 },
2116 }, 2987 },
2988 "es": service{
2989
2990 Endpoints: endpoints{
2991 "us-gov-west-1": endpoint{},
2992 },
2993 },
2117 "events": service{ 2994 "events": service{
2118 2995
2119 Endpoints: endpoints{ 2996 Endpoints: endpoints{
@@ -2141,6 +3018,12 @@ var awsusgovPartition = partition{
2141 }, 3018 },
2142 }, 3019 },
2143 }, 3020 },
3021 "inspector": service{
3022
3023 Endpoints: endpoints{
3024 "us-gov-west-1": endpoint{},
3025 },
3026 },
2144 "kinesis": service{ 3027 "kinesis": service{
2145 3028
2146 Endpoints: endpoints{ 3029 Endpoints: endpoints{
@@ -2165,12 +3048,28 @@ var awsusgovPartition = partition{
2165 "us-gov-west-1": endpoint{}, 3048 "us-gov-west-1": endpoint{},
2166 }, 3049 },
2167 }, 3050 },
3051 "metering.marketplace": service{
3052 Defaults: endpoint{
3053 CredentialScope: credentialScope{
3054 Service: "aws-marketplace",
3055 },
3056 },
3057 Endpoints: endpoints{
3058 "us-gov-west-1": endpoint{},
3059 },
3060 },
2168 "monitoring": service{ 3061 "monitoring": service{
2169 3062
2170 Endpoints: endpoints{ 3063 Endpoints: endpoints{
2171 "us-gov-west-1": endpoint{}, 3064 "us-gov-west-1": endpoint{},
2172 }, 3065 },
2173 }, 3066 },
3067 "polly": service{
3068
3069 Endpoints: endpoints{
3070 "us-gov-west-1": endpoint{},
3071 },
3072 },
2174 "rds": service{ 3073 "rds": service{
2175 3074
2176 Endpoints: endpoints{ 3075 Endpoints: endpoints{
@@ -2183,6 +3082,12 @@ var awsusgovPartition = partition{
2183 "us-gov-west-1": endpoint{}, 3082 "us-gov-west-1": endpoint{},
2184 }, 3083 },
2185 }, 3084 },
3085 "rekognition": service{
3086
3087 Endpoints: endpoints{
3088 "us-gov-west-1": endpoint{},
3089 },
3090 },
2186 "s3": service{ 3091 "s3": service{
2187 Defaults: endpoint{ 3092 Defaults: endpoint{
2188 SignatureVersions: []string{"s3", "s3v4"}, 3093 SignatureVersions: []string{"s3", "s3v4"},
@@ -2195,11 +3100,17 @@ var awsusgovPartition = partition{
2195 }, 3100 },
2196 }, 3101 },
2197 "us-gov-west-1": endpoint{ 3102 "us-gov-west-1": endpoint{
2198 Hostname: "s3-us-gov-west-1.amazonaws.com", 3103 Hostname: "s3.us-gov-west-1.amazonaws.com",
2199 Protocols: []string{"http", "https"}, 3104 Protocols: []string{"http", "https"},
2200 }, 3105 },
2201 }, 3106 },
2202 }, 3107 },
3108 "sms": service{
3109
3110 Endpoints: endpoints{
3111 "us-gov-west-1": endpoint{},
3112 },
3113 },
2203 "snowball": service{ 3114 "snowball": service{
2204 3115
2205 Endpoints: endpoints{ 3116 Endpoints: endpoints{
@@ -2223,6 +3134,18 @@ var awsusgovPartition = partition{
2223 }, 3134 },
2224 }, 3135 },
2225 }, 3136 },
3137 "ssm": service{
3138
3139 Endpoints: endpoints{
3140 "us-gov-west-1": endpoint{},
3141 },
3142 },
3143 "storagegateway": service{
3144
3145 Endpoints: endpoints{
3146 "us-gov-west-1": endpoint{},
3147 },
3148 },
2226 "streams.dynamodb": service{ 3149 "streams.dynamodb": service{
2227 Defaults: endpoint{ 3150 Defaults: endpoint{
2228 CredentialScope: credentialScope{ 3151 CredentialScope: credentialScope{
@@ -2231,6 +3154,12 @@ var awsusgovPartition = partition{
2231 }, 3154 },
2232 Endpoints: endpoints{ 3155 Endpoints: endpoints{
2233 "us-gov-west-1": endpoint{}, 3156 "us-gov-west-1": endpoint{},
3157 "us-gov-west-1-fips": endpoint{
3158 Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
3159 CredentialScope: credentialScope{
3160 Region: "us-gov-west-1",
3161 },
3162 },
2234 }, 3163 },
2235 }, 3164 },
2236 "sts": service{ 3165 "sts": service{
@@ -2245,5 +3174,19 @@ var awsusgovPartition = partition{
2245 "us-gov-west-1": endpoint{}, 3174 "us-gov-west-1": endpoint{},
2246 }, 3175 },
2247 }, 3176 },
3177 "tagging": service{
3178
3179 Endpoints: endpoints{
3180 "us-gov-west-1": endpoint{},
3181 },
3182 },
3183 "translate": service{
3184 Defaults: endpoint{
3185 Protocols: []string{"https"},
3186 },
3187 Endpoints: endpoints{
3188 "us-gov-west-1": endpoint{},
3189 },
3190 },
2248 }, 3191 },
2249} 3192}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
index a0e9bc4..84316b9 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
@@ -21,12 +21,12 @@
21// partitions := resolver.(endpoints.EnumPartitions).Partitions() 21// partitions := resolver.(endpoints.EnumPartitions).Partitions()
22// 22//
23// for _, p := range partitions { 23// for _, p := range partitions {
24// fmt.Println("Regions for", p.Name) 24// fmt.Println("Regions for", p.ID())
25// for id, _ := range p.Regions() { 25// for id, _ := range p.Regions() {
26// fmt.Println("*", id) 26// fmt.Println("*", id)
27// } 27// }
28// 28//
29// fmt.Println("Services for", p.Name) 29// fmt.Println("Services for", p.ID())
30// for id, _ := range p.Services() { 30// for id, _ := range p.Services() {
31// fmt.Println("*", id) 31// fmt.Println("*", id)
32// } 32// }
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
index 9c3eedb..e29c095 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
@@ -206,10 +206,11 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (
206// enumerating over the regions in a partition. 206// enumerating over the regions in a partition.
207func (p Partition) Regions() map[string]Region { 207func (p Partition) Regions() map[string]Region {
208 rs := map[string]Region{} 208 rs := map[string]Region{}
209 for id := range p.p.Regions { 209 for id, r := range p.p.Regions {
210 rs[id] = Region{ 210 rs[id] = Region{
211 id: id, 211 id: id,
212 p: p.p, 212 desc: r.Description,
213 p: p.p,
213 } 214 }
214 } 215 }
215 216
@@ -240,6 +241,10 @@ type Region struct {
240// ID returns the region's identifier. 241// ID returns the region's identifier.
241func (r Region) ID() string { return r.id } 242func (r Region) ID() string { return r.id }
242 243
244// Description returns the region's description. The region description
245// is free text, it can be empty, and it may change between SDK releases.
246func (r Region) Description() string { return r.desc }
247
243// ResolveEndpoint resolves an endpoint from the context of the region given 248// ResolveEndpoint resolves an endpoint from the context of the region given
244// a service. See Partition.EndpointFor for usage and errors that can be returned. 249// a service. See Partition.EndpointFor for usage and errors that can be returned.
245func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { 250func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
@@ -284,10 +289,11 @@ func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (Resolve
284func (s Service) Regions() map[string]Region { 289func (s Service) Regions() map[string]Region {
285 rs := map[string]Region{} 290 rs := map[string]Region{}
286 for id := range s.p.Services[s.id].Endpoints { 291 for id := range s.p.Services[s.id].Endpoints {
287 if _, ok := s.p.Regions[id]; ok { 292 if r, ok := s.p.Regions[id]; ok {
288 rs[id] = Region{ 293 rs[id] = Region{
289 id: id, 294 id: id,
290 p: s.p, 295 desc: r.Description,
296 p: s.p,
291 } 297 }
292 } 298 }
293 } 299 }
@@ -347,6 +353,10 @@ type ResolvedEndpoint struct {
347 // The service name that should be used for signing requests. 353 // The service name that should be used for signing requests.
348 SigningName string 354 SigningName string
349 355
356 // States that the signing name for this endpoint was derived from metadata
357 // passed in, but was not explicitly modeled.
358 SigningNameDerived bool
359
350 // The signing method that should be used for signing requests. 360 // The signing method that should be used for signing requests.
351 SigningMethod string 361 SigningMethod string
352} 362}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
index 13d968a..ff6f76d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
@@ -226,16 +226,20 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op
226 if len(signingRegion) == 0 { 226 if len(signingRegion) == 0 {
227 signingRegion = region 227 signingRegion = region
228 } 228 }
229
229 signingName := e.CredentialScope.Service 230 signingName := e.CredentialScope.Service
231 var signingNameDerived bool
230 if len(signingName) == 0 { 232 if len(signingName) == 0 {
231 signingName = service 233 signingName = service
234 signingNameDerived = true
232 } 235 }
233 236
234 return ResolvedEndpoint{ 237 return ResolvedEndpoint{
235 URL: u, 238 URL: u,
236 SigningRegion: signingRegion, 239 SigningRegion: signingRegion,
237 SigningName: signingName, 240 SigningName: signingName,
238 SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), 241 SigningNameDerived: signingNameDerived,
242 SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
239 } 243 }
240} 244}
241 245
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
index db87188..6ed15b2 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
@@ -26,14 +26,14 @@ func (l *LogLevelType) Value() LogLevelType {
26 26
27// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be 27// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
28// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If 28// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
29// LogLevel is nill, will default to LogOff comparison. 29// LogLevel is nil, will default to LogOff comparison.
30func (l *LogLevelType) Matches(v LogLevelType) bool { 30func (l *LogLevelType) Matches(v LogLevelType) bool {
31 c := l.Value() 31 c := l.Value()
32 return c&v == v 32 return c&v == v
33} 33}
34 34
35// AtLeast returns true if this LogLevel is at least high enough to satisfies v. 35// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
36// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default 36// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
37// to LogOff comparison. 37// to LogOff comparison.
38func (l *LogLevelType) AtLeast(v LogLevelType) bool { 38func (l *LogLevelType) AtLeast(v LogLevelType) bool {
39 c := l.Value() 39 c := l.Value()
@@ -71,6 +71,12 @@ const (
71 // LogDebugWithRequestErrors states the SDK should log when service requests fail 71 // LogDebugWithRequestErrors states the SDK should log when service requests fail
72 // to build, send, validate, or unmarshal. 72 // to build, send, validate, or unmarshal.
73 LogDebugWithRequestErrors 73 LogDebugWithRequestErrors
74
75 // LogDebugWithEventStreamBody states the SDK should log EventStream
76 // request and response bodys. This should be used to log the EventStream
77 // wire unmarshaled message content of requests and responses made while
78 // using the SDK Will also enable LogDebug.
79 LogDebugWithEventStreamBody
74) 80)
75 81
76// A Logger is a minimalistic interface for the SDK to log messages to. Should 82// A Logger is a minimalistic interface for the SDK to log messages to. Should
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
index 802ac88..605a72d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -14,6 +14,7 @@ type Handlers struct {
14 Send HandlerList 14 Send HandlerList
15 ValidateResponse HandlerList 15 ValidateResponse HandlerList
16 Unmarshal HandlerList 16 Unmarshal HandlerList
17 UnmarshalStream HandlerList
17 UnmarshalMeta HandlerList 18 UnmarshalMeta HandlerList
18 UnmarshalError HandlerList 19 UnmarshalError HandlerList
19 Retry HandlerList 20 Retry HandlerList
@@ -30,6 +31,7 @@ func (h *Handlers) Copy() Handlers {
30 Send: h.Send.copy(), 31 Send: h.Send.copy(),
31 ValidateResponse: h.ValidateResponse.copy(), 32 ValidateResponse: h.ValidateResponse.copy(),
32 Unmarshal: h.Unmarshal.copy(), 33 Unmarshal: h.Unmarshal.copy(),
34 UnmarshalStream: h.UnmarshalStream.copy(),
33 UnmarshalError: h.UnmarshalError.copy(), 35 UnmarshalError: h.UnmarshalError.copy(),
34 UnmarshalMeta: h.UnmarshalMeta.copy(), 36 UnmarshalMeta: h.UnmarshalMeta.copy(),
35 Retry: h.Retry.copy(), 37 Retry: h.Retry.copy(),
@@ -45,6 +47,7 @@ func (h *Handlers) Clear() {
45 h.Send.Clear() 47 h.Send.Clear()
46 h.Sign.Clear() 48 h.Sign.Clear()
47 h.Unmarshal.Clear() 49 h.Unmarshal.Clear()
50 h.UnmarshalStream.Clear()
48 h.UnmarshalMeta.Clear() 51 h.UnmarshalMeta.Clear()
49 h.UnmarshalError.Clear() 52 h.UnmarshalError.Clear()
50 h.ValidateResponse.Clear() 53 h.ValidateResponse.Clear()
@@ -172,6 +175,21 @@ func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
172 return swapped 175 return swapped
173} 176}
174 177
178// Swap will swap out all handlers matching the name passed in. The matched
179// handlers will be swapped in. True is returned if the handlers were swapped.
180func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
181 var swapped bool
182
183 for i := 0; i < len(l.list); i++ {
184 if l.list[i].Name == name {
185 l.list[i] = replace
186 swapped = true
187 }
188 }
189
190 return swapped
191}
192
175// SetBackNamed will replace the named handler if it exists in the handler list. 193// SetBackNamed will replace the named handler if it exists in the handler list.
176// If the handler does not exist the handler will be added to the end of the list. 194// If the handler does not exist the handler will be added to the end of the list.
177func (l *HandlerList) SetBackNamed(n NamedHandler) { 195func (l *HandlerList) SetBackNamed(n NamedHandler) {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
index 02f07f4..b0c2ef4 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
@@ -3,6 +3,8 @@ package request
3import ( 3import (
4 "io" 4 "io"
5 "sync" 5 "sync"
6
7 "github.com/aws/aws-sdk-go/internal/sdkio"
6) 8)
7 9
8// offsetReader is a thread-safe io.ReadCloser to prevent racing 10// offsetReader is a thread-safe io.ReadCloser to prevent racing
@@ -15,7 +17,7 @@ type offsetReader struct {
15 17
16func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { 18func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
17 reader := &offsetReader{} 19 reader := &offsetReader{}
18 buf.Seek(offset, 0) 20 buf.Seek(offset, sdkio.SeekStart)
19 21
20 reader.buf = buf 22 reader.buf = buf
21 return reader 23 return reader
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
index 299dc37..75f0fe0 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -14,6 +14,7 @@ import (
14 "github.com/aws/aws-sdk-go/aws" 14 "github.com/aws/aws-sdk-go/aws"
15 "github.com/aws/aws-sdk-go/aws/awserr" 15 "github.com/aws/aws-sdk-go/aws/awserr"
16 "github.com/aws/aws-sdk-go/aws/client/metadata" 16 "github.com/aws/aws-sdk-go/aws/client/metadata"
17 "github.com/aws/aws-sdk-go/internal/sdkio"
17) 18)
18 19
19const ( 20const (
@@ -24,10 +25,14 @@ const (
24 // ErrCodeRead is an error that is returned during HTTP reads. 25 // ErrCodeRead is an error that is returned during HTTP reads.
25 ErrCodeRead = "ReadError" 26 ErrCodeRead = "ReadError"
26 27
27 // ErrCodeResponseTimeout is the connection timeout error that is recieved 28 // ErrCodeResponseTimeout is the connection timeout error that is received
28 // during body reads. 29 // during body reads.
29 ErrCodeResponseTimeout = "ResponseTimeout" 30 ErrCodeResponseTimeout = "ResponseTimeout"
30 31
32 // ErrCodeInvalidPresignExpire is returned when the expire time provided to
33 // presign is invalid
34 ErrCodeInvalidPresignExpire = "InvalidPresignExpireError"
35
31 // CanceledErrorCode is the error code that will be returned by an 36 // CanceledErrorCode is the error code that will be returned by an
32 // API request that was canceled. Requests given a aws.Context may 37 // API request that was canceled. Requests given a aws.Context may
33 // return this error when canceled. 38 // return this error when canceled.
@@ -41,8 +46,8 @@ type Request struct {
41 Handlers Handlers 46 Handlers Handlers
42 47
43 Retryer 48 Retryer
49 AttemptTime time.Time
44 Time time.Time 50 Time time.Time
45 ExpireTime time.Duration
46 Operation *Operation 51 Operation *Operation
47 HTTPRequest *http.Request 52 HTTPRequest *http.Request
48 HTTPResponse *http.Response 53 HTTPResponse *http.Response
@@ -60,6 +65,11 @@ type Request struct {
60 LastSignedAt time.Time 65 LastSignedAt time.Time
61 DisableFollowRedirects bool 66 DisableFollowRedirects bool
62 67
68 // A value greater than 0 instructs the request to be signed as Presigned URL
69 // You should not set this field directly. Instead use Request's
70 // Presign or PresignRequest methods.
71 ExpireTime time.Duration
72
63 context aws.Context 73 context aws.Context
64 74
65 built bool 75 built bool
@@ -104,12 +114,15 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
104 err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) 114 err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
105 } 115 }
106 116
117 SanitizeHostForHeader(httpReq)
118
107 r := &Request{ 119 r := &Request{
108 Config: cfg, 120 Config: cfg,
109 ClientInfo: clientInfo, 121 ClientInfo: clientInfo,
110 Handlers: handlers.Copy(), 122 Handlers: handlers.Copy(),
111 123
112 Retryer: retryer, 124 Retryer: retryer,
125 AttemptTime: time.Now(),
113 Time: time.Now(), 126 Time: time.Now(),
114 ExpireTime: 0, 127 ExpireTime: 0,
115 Operation: operation, 128 Operation: operation,
@@ -214,6 +227,9 @@ func (r *Request) SetContext(ctx aws.Context) {
214 227
215// WillRetry returns if the request's can be retried. 228// WillRetry returns if the request's can be retried.
216func (r *Request) WillRetry() bool { 229func (r *Request) WillRetry() bool {
230 if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody {
231 return false
232 }
217 return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() 233 return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
218} 234}
219 235
@@ -245,39 +261,70 @@ func (r *Request) SetStringBody(s string) {
245// SetReaderBody will set the request's body reader. 261// SetReaderBody will set the request's body reader.
246func (r *Request) SetReaderBody(reader io.ReadSeeker) { 262func (r *Request) SetReaderBody(reader io.ReadSeeker) {
247 r.Body = reader 263 r.Body = reader
264 r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset.
248 r.ResetBody() 265 r.ResetBody()
249} 266}
250 267
251// Presign returns the request's signed URL. Error will be returned 268// Presign returns the request's signed URL. Error will be returned
252// if the signing fails. 269// if the signing fails.
253func (r *Request) Presign(expireTime time.Duration) (string, error) { 270//
254 r.ExpireTime = expireTime 271// It is invalid to create a presigned URL with a expire duration 0 or less. An
272// error is returned if expire duration is 0 or less.
273func (r *Request) Presign(expire time.Duration) (string, error) {
274 r = r.copy()
275
276 // Presign requires all headers be hoisted. There is no way to retrieve
277 // the signed headers not hoisted without this. Making the presigned URL
278 // useless.
255 r.NotHoist = false 279 r.NotHoist = false
256 280
281 u, _, err := getPresignedURL(r, expire)
282 return u, err
283}
284
285// PresignRequest behaves just like presign, with the addition of returning a
286// set of headers that were signed.
287//
288// It is invalid to create a presigned URL with a expire duration 0 or less. An
289// error is returned if expire duration is 0 or less.
290//
291// Returns the URL string for the API operation with signature in the query string,
292// and the HTTP headers that were included in the signature. These headers must
293// be included in any HTTP request made with the presigned URL.
294//
295// To prevent hoisting any headers to the query string set NotHoist to true on
296// this Request value prior to calling PresignRequest.
297func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) {
298 r = r.copy()
299 return getPresignedURL(r, expire)
300}
301
302// IsPresigned returns true if the request represents a presigned API url.
303func (r *Request) IsPresigned() bool {
304 return r.ExpireTime != 0
305}
306
307func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) {
308 if expire <= 0 {
309 return "", nil, awserr.New(
310 ErrCodeInvalidPresignExpire,
311 "presigned URL requires an expire duration greater than 0",
312 nil,
313 )
314 }
315
316 r.ExpireTime = expire
317
257 if r.Operation.BeforePresignFn != nil { 318 if r.Operation.BeforePresignFn != nil {
258 r = r.copy() 319 if err := r.Operation.BeforePresignFn(r); err != nil {
259 err := r.Operation.BeforePresignFn(r) 320 return "", nil, err
260 if err != nil {
261 return "", err
262 } 321 }
263 } 322 }
264 323
265 r.Sign() 324 if err := r.Sign(); err != nil {
266 if r.Error != nil { 325 return "", nil, err
267 return "", r.Error
268 } 326 }
269 return r.HTTPRequest.URL.String(), nil
270}
271 327
272// PresignRequest behaves just like presign, but hoists all headers and signs them.
273// Also returns the signed hash back to the user
274func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
275 r.ExpireTime = expireTime
276 r.NotHoist = true
277 r.Sign()
278 if r.Error != nil {
279 return "", nil, r.Error
280 }
281 return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil 328 return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
282} 329}
283 330
@@ -297,7 +344,7 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) {
297 344
298// Build will build the request's object so it can be signed and sent 345// Build will build the request's object so it can be signed and sent
299// to the service. Build will also validate all the request's parameters. 346// to the service. Build will also validate all the request's parameters.
300// Anny additional build Handlers set on this request will be run 347// Any additional build Handlers set on this request will be run
301// in the order they were set. 348// in the order they were set.
302// 349//
303// The request will only be built once. Multiple calls to build will have 350// The request will only be built once. Multiple calls to build will have
@@ -323,9 +370,9 @@ func (r *Request) Build() error {
323 return r.Error 370 return r.Error
324} 371}
325 372
326// Sign will sign the request returning error if errors are encountered. 373// Sign will sign the request, returning error if errors are encountered.
327// 374//
328// Send will build the request prior to signing. All Sign Handlers will 375// Sign will build the request prior to signing. All Sign Handlers will
329// be executed in the order they were set. 376// be executed in the order they were set.
330func (r *Request) Sign() error { 377func (r *Request) Sign() error {
331 r.Build() 378 r.Build()
@@ -358,7 +405,7 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
358 // of the SDK if they used that field. 405 // of the SDK if they used that field.
359 // 406 //
360 // Related golang/go#18257 407 // Related golang/go#18257
361 l, err := computeBodyLength(r.Body) 408 l, err := aws.SeekerLen(r.Body)
362 if err != nil { 409 if err != nil {
363 return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) 410 return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
364 } 411 }
@@ -376,7 +423,8 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
376 // Transfer-Encoding: chunked bodies for these methods. 423 // Transfer-Encoding: chunked bodies for these methods.
377 // 424 //
378 // This would only happen if a aws.ReaderSeekerCloser was used with 425 // This would only happen if a aws.ReaderSeekerCloser was used with
379 // a io.Reader that was not also an io.Seeker. 426 // a io.Reader that was not also an io.Seeker, or did not implement
427 // Len() method.
380 switch r.Operation.HTTPMethod { 428 switch r.Operation.HTTPMethod {
381 case "GET", "HEAD", "DELETE": 429 case "GET", "HEAD", "DELETE":
382 body = NoBody 430 body = NoBody
@@ -388,49 +436,13 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
388 return body, nil 436 return body, nil
389} 437}
390 438
391// Attempts to compute the length of the body of the reader using the
392// io.Seeker interface. If the value is not seekable because of being
393// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned.
394// If no error occurs the length of the body will be returned.
395func computeBodyLength(r io.ReadSeeker) (int64, error) {
396 seekable := true
397 // Determine if the seeker is actually seekable. ReaderSeekerCloser
398 // hides the fact that a io.Readers might not actually be seekable.
399 switch v := r.(type) {
400 case aws.ReaderSeekerCloser:
401 seekable = v.IsSeeker()
402 case *aws.ReaderSeekerCloser:
403 seekable = v.IsSeeker()
404 }
405 if !seekable {
406 return -1, nil
407 }
408
409 curOffset, err := r.Seek(0, 1)
410 if err != nil {
411 return 0, err
412 }
413
414 endOffset, err := r.Seek(0, 2)
415 if err != nil {
416 return 0, err
417 }
418
419 _, err = r.Seek(curOffset, 0)
420 if err != nil {
421 return 0, err
422 }
423
424 return endOffset - curOffset, nil
425}
426
427// GetBody will return an io.ReadSeeker of the Request's underlying 439// GetBody will return an io.ReadSeeker of the Request's underlying
428// input body with a concurrency safe wrapper. 440// input body with a concurrency safe wrapper.
429func (r *Request) GetBody() io.ReadSeeker { 441func (r *Request) GetBody() io.ReadSeeker {
430 return r.safeBody 442 return r.safeBody
431} 443}
432 444
433// Send will send the request returning error if errors are encountered. 445// Send will send the request, returning error if errors are encountered.
434// 446//
435// Send will sign the request prior to sending. All Send Handlers will 447// Send will sign the request prior to sending. All Send Handlers will
436// be executed in the order they were set. 448// be executed in the order they were set.
@@ -451,6 +463,7 @@ func (r *Request) Send() error {
451 }() 463 }()
452 464
453 for { 465 for {
466 r.AttemptTime = time.Now()
454 if aws.BoolValue(r.Retryable) { 467 if aws.BoolValue(r.Retryable) {
455 if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { 468 if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
456 r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", 469 r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
@@ -573,3 +586,72 @@ func shouldRetryCancel(r *Request) bool {
573 errStr != "net/http: request canceled while waiting for connection") 586 errStr != "net/http: request canceled while waiting for connection")
574 587
575} 588}
589
590// SanitizeHostForHeader removes default port from host and updates request.Host
591func SanitizeHostForHeader(r *http.Request) {
592 host := getHost(r)
593 port := portOnly(host)
594 if port != "" && isDefaultPort(r.URL.Scheme, port) {
595 r.Host = stripPort(host)
596 }
597}
598
599// Returns host from request
600func getHost(r *http.Request) string {
601 if r.Host != "" {
602 return r.Host
603 }
604
605 return r.URL.Host
606}
607
608// Hostname returns u.Host, without any port number.
609//
610// If Host is an IPv6 literal with a port number, Hostname returns the
611// IPv6 literal without the square brackets. IPv6 literals may include
612// a zone identifier.
613//
614// Copied from the Go 1.8 standard library (net/url)
615func stripPort(hostport string) string {
616 colon := strings.IndexByte(hostport, ':')
617 if colon == -1 {
618 return hostport
619 }
620 if i := strings.IndexByte(hostport, ']'); i != -1 {
621 return strings.TrimPrefix(hostport[:i], "[")
622 }
623 return hostport[:colon]
624}
625
626// Port returns the port part of u.Host, without the leading colon.
627// If u.Host doesn't contain a port, Port returns an empty string.
628//
629// Copied from the Go 1.8 standard library (net/url)
630func portOnly(hostport string) string {
631 colon := strings.IndexByte(hostport, ':')
632 if colon == -1 {
633 return ""
634 }
635 if i := strings.Index(hostport, "]:"); i != -1 {
636 return hostport[i+len("]:"):]
637 }
638 if strings.Contains(hostport, "]") {
639 return ""
640 }
641 return hostport[colon+len(":"):]
642}
643
644// Returns true if the specified URI is using the standard port
645// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
646func isDefaultPort(scheme, port string) bool {
647 if port == "" {
648 return true
649 }
650
651 lowerCaseScheme := strings.ToLower(scheme)
652 if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
653 return true
654 }
655
656 return false
657}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
index 869b97a..e36e468 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
@@ -21,7 +21,7 @@ func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
21var NoBody = noBody{} 21var NoBody = noBody{}
22 22
23// ResetBody rewinds the request body back to its starting position, and 23// ResetBody rewinds the request body back to its starting position, and
24// set's the HTTP Request body reference. When the body is read prior 24// sets the HTTP Request body reference. When the body is read prior
25// to being sent in the HTTP request it will need to be rewound. 25// to being sent in the HTTP request it will need to be rewound.
26// 26//
27// ResetBody will automatically be called by the SDK's build handler, but if 27// ResetBody will automatically be called by the SDK's build handler, but if
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
index c32fc69..7c6a800 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
@@ -11,7 +11,7 @@ import (
11var NoBody = http.NoBody 11var NoBody = http.NoBody
12 12
13// ResetBody rewinds the request body back to its starting position, and 13// ResetBody rewinds the request body back to its starting position, and
14// set's the HTTP Request body reference. When the body is read prior 14// sets the HTTP Request body reference. When the body is read prior
15// to being sent in the HTTP request it will need to be rewound. 15// to being sent in the HTTP request it will need to be rewound.
16// 16//
17// ResetBody will automatically be called by the SDK's build handler, but if 17// ResetBody will automatically be called by the SDK's build handler, but if
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
index 59de673..a633ed5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -35,8 +35,12 @@ type Pagination struct {
35 // NewRequest should always be built from the same API operations. It is 35 // NewRequest should always be built from the same API operations. It is
36 // undefined if different API operations are returned on subsequent calls. 36 // undefined if different API operations are returned on subsequent calls.
37 NewRequest func() (*Request, error) 37 NewRequest func() (*Request, error)
38 // EndPageOnSameToken, when enabled, will allow the paginator to stop on
39 // token that are the same as its previous tokens.
40 EndPageOnSameToken bool
38 41
39 started bool 42 started bool
43 prevTokens []interface{}
40 nextTokens []interface{} 44 nextTokens []interface{}
41 45
42 err error 46 err error
@@ -49,7 +53,15 @@ type Pagination struct {
49// 53//
50// Will always return true if Next has not been called yet. 54// Will always return true if Next has not been called yet.
51func (p *Pagination) HasNextPage() bool { 55func (p *Pagination) HasNextPage() bool {
52 return !(p.started && len(p.nextTokens) == 0) 56 if !p.started {
57 return true
58 }
59
60 hasNextPage := len(p.nextTokens) != 0
61 if p.EndPageOnSameToken {
62 return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens)
63 }
64 return hasNextPage
53} 65}
54 66
55// Err returns the error Pagination encountered when retrieving the next page. 67// Err returns the error Pagination encountered when retrieving the next page.
@@ -96,6 +108,7 @@ func (p *Pagination) Next() bool {
96 return false 108 return false
97 } 109 }
98 110
111 p.prevTokens = p.nextTokens
99 p.nextTokens = req.nextPageTokens() 112 p.nextTokens = req.nextPageTokens()
100 p.curPage = req.Data 113 p.curPage = req.Data
101 114
@@ -142,13 +155,28 @@ func (r *Request) nextPageTokens() []interface{} {
142 tokens := []interface{}{} 155 tokens := []interface{}{}
143 tokenAdded := false 156 tokenAdded := false
144 for _, outToken := range r.Operation.OutputTokens { 157 for _, outToken := range r.Operation.OutputTokens {
145 v, _ := awsutil.ValuesAtPath(r.Data, outToken) 158 vs, _ := awsutil.ValuesAtPath(r.Data, outToken)
146 if len(v) > 0 { 159 if len(vs) == 0 {
147 tokens = append(tokens, v[0])
148 tokenAdded = true
149 } else {
150 tokens = append(tokens, nil) 160 tokens = append(tokens, nil)
161 continue
162 }
163 v := vs[0]
164
165 switch tv := v.(type) {
166 case *string:
167 if len(aws.StringValue(tv)) == 0 {
168 tokens = append(tokens, nil)
169 continue
170 }
171 case string:
172 if len(tv) == 0 {
173 tokens = append(tokens, nil)
174 continue
175 }
151 } 176 }
177
178 tokenAdded = true
179 tokens = append(tokens, v)
152 } 180 }
153 if !tokenAdded { 181 if !tokenAdded {
154 return nil 182 return nil
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
index 8d369c1..7d52702 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -8,7 +8,7 @@ import (
8) 8)
9 9
10// Retryer is an interface to control retry logic for a given service. 10// Retryer is an interface to control retry logic for a given service.
11// The default implementation used by most services is the service.DefaultRetryer 11// The default implementation used by most services is the client.DefaultRetryer
12// structure, which contains basic retry logic using exponential backoff. 12// structure, which contains basic retry logic using exponential backoff.
13type Retryer interface { 13type Retryer interface {
14 RetryRules(*Request) time.Duration 14 RetryRules(*Request) time.Duration
@@ -70,8 +70,8 @@ func isCodeExpiredCreds(code string) bool {
70} 70}
71 71
72var validParentCodes = map[string]struct{}{ 72var validParentCodes = map[string]struct{}{
73 ErrCodeSerialization: struct{}{}, 73 ErrCodeSerialization: {},
74 ErrCodeRead: struct{}{}, 74 ErrCodeRead: {},
75} 75}
76 76
77type temporaryError interface { 77type temporaryError interface {
@@ -97,7 +97,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
97 } 97 }
98 98
99 if t, ok := err.(temporaryError); ok { 99 if t, ok := err.(temporaryError); ok {
100 return t.Temporary() 100 return t.Temporary() || isErrConnectionReset(err)
101 } 101 }
102 102
103 return isErrConnectionReset(err) 103 return isErrConnectionReset(err)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
index 2520286..4012462 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
@@ -220,7 +220,7 @@ type ErrParamMinLen struct {
220func NewErrParamMinLen(field string, min int) *ErrParamMinLen { 220func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
221 return &ErrParamMinLen{ 221 return &ErrParamMinLen{
222 errInvalidParam: errInvalidParam{ 222 errInvalidParam: errInvalidParam{
223 code: ParamMinValueErrCode, 223 code: ParamMinLenErrCode,
224 field: field, 224 field: field,
225 msg: fmt.Sprintf("minimum field size of %v", min), 225 msg: fmt.Sprintf("minimum field size of %v", min),
226 }, 226 },
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
index 22d2f80..4601f88 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
@@ -79,8 +79,9 @@ type Waiter struct {
79 MaxAttempts int 79 MaxAttempts int
80 Delay WaiterDelay 80 Delay WaiterDelay
81 81
82 RequestOptions []Option 82 RequestOptions []Option
83 NewRequest func([]Option) (*Request, error) 83 NewRequest func([]Option) (*Request, error)
84 SleepWithContext func(aws.Context, time.Duration) error
84} 85}
85 86
86// ApplyOptions updates the waiter with the list of waiter options provided. 87// ApplyOptions updates the waiter with the list of waiter options provided.
@@ -195,8 +196,15 @@ func (w Waiter) WaitWithContext(ctx aws.Context) error {
195 if sleepFn := req.Config.SleepDelay; sleepFn != nil { 196 if sleepFn := req.Config.SleepDelay; sleepFn != nil {
196 // Support SleepDelay for backwards compatibility and testing 197 // Support SleepDelay for backwards compatibility and testing
197 sleepFn(delay) 198 sleepFn(delay)
198 } else if err := aws.SleepWithContext(ctx, delay); err != nil { 199 } else {
199 return awserr.New(CanceledErrorCode, "waiter context canceled", err) 200 sleepCtxFn := w.SleepWithContext
201 if sleepCtxFn == nil {
202 sleepCtxFn = aws.SleepWithContext
203 }
204
205 if err := sleepCtxFn(ctx, delay); err != nil {
206 return awserr.New(CanceledErrorCode, "waiter context canceled", err)
207 }
200 } 208 }
201 } 209 }
202 210
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
index ea7b886..98d420f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -128,7 +128,7 @@ read. The Session will be created from configuration values from the shared
128credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). 128credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
129 129
130Credentials are the values the SDK should use for authenticating requests with 130Credentials are the values the SDK should use for authenticating requests with
131AWS Services. They arfrom a configuration file will need to include both 131AWS Services. They are from a configuration file will need to include both
132aws_access_key_id and aws_secret_access_key must be provided together in the 132aws_access_key_id and aws_secret_access_key must be provided together in the
133same file to be considered valid. The values will be ignored if not a complete 133same file to be considered valid. The values will be ignored if not a complete
134group. aws_session_token is an optional field that can be provided if both of 134group. aws_session_token is an optional field that can be provided if both of
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
index 7357e54..82e04d7 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -5,8 +5,12 @@ import (
5 "strconv" 5 "strconv"
6 6
7 "github.com/aws/aws-sdk-go/aws/credentials" 7 "github.com/aws/aws-sdk-go/aws/credentials"
8 "github.com/aws/aws-sdk-go/aws/defaults"
8) 9)
9 10
11// EnvProviderName provides a name of the provider when config is loaded from environment.
12const EnvProviderName = "EnvConfigCredentials"
13
10// envConfig is a collection of environment values the SDK will read 14// envConfig is a collection of environment values the SDK will read
11// setup config from. All environment values are optional. But some values 15// setup config from. All environment values are optional. But some values
12// such as credentials require multiple values to be complete or the values 16// such as credentials require multiple values to be complete or the values
@@ -76,7 +80,7 @@ type envConfig struct {
76 SharedConfigFile string 80 SharedConfigFile string
77 81
78 // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file 82 // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
79 // that the SDK will use instead of the the system's root CA bundle. 83 // that the SDK will use instead of the system's root CA bundle.
80 // Only use this if you want to configure the SDK to use a custom set 84 // Only use this if you want to configure the SDK to use a custom set
81 // of CAs. 85 // of CAs.
82 // 86 //
@@ -92,9 +96,23 @@ type envConfig struct {
92 // 96 //
93 // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle 97 // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
94 CustomCABundle string 98 CustomCABundle string
99
100 csmEnabled string
101 CSMEnabled bool
102 CSMPort string
103 CSMClientID string
95} 104}
96 105
97var ( 106var (
107 csmEnabledEnvKey = []string{
108 "AWS_CSM_ENABLED",
109 }
110 csmPortEnvKey = []string{
111 "AWS_CSM_PORT",
112 }
113 csmClientIDEnvKey = []string{
114 "AWS_CSM_CLIENT_ID",
115 }
98 credAccessEnvKey = []string{ 116 credAccessEnvKey = []string{
99 "AWS_ACCESS_KEY_ID", 117 "AWS_ACCESS_KEY_ID",
100 "AWS_ACCESS_KEY", 118 "AWS_ACCESS_KEY",
@@ -153,11 +171,17 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
153 setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) 171 setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
154 setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) 172 setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
155 173
174 // CSM environment variables
175 setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey)
176 setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
177 setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
178 cfg.CSMEnabled = len(cfg.csmEnabled) > 0
179
156 // Require logical grouping of credentials 180 // Require logical grouping of credentials
157 if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { 181 if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
158 cfg.Creds = credentials.Value{} 182 cfg.Creds = credentials.Value{}
159 } else { 183 } else {
160 cfg.Creds.ProviderName = "EnvConfigCredentials" 184 cfg.Creds.ProviderName = EnvProviderName
161 } 185 }
162 186
163 regionKeys := regionEnvKeys 187 regionKeys := regionEnvKeys
@@ -173,6 +197,13 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
173 setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) 197 setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
174 setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) 198 setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
175 199
200 if len(cfg.SharedCredentialsFile) == 0 {
201 cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
202 }
203 if len(cfg.SharedConfigFile) == 0 {
204 cfg.SharedConfigFile = defaults.SharedConfigFilename()
205 }
206
176 cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") 207 cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
177 208
178 return cfg 209 return cfg
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
index 9f75d5a..51f3055 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -15,6 +15,7 @@ import (
15 "github.com/aws/aws-sdk-go/aws/corehandlers" 15 "github.com/aws/aws-sdk-go/aws/corehandlers"
16 "github.com/aws/aws-sdk-go/aws/credentials" 16 "github.com/aws/aws-sdk-go/aws/credentials"
17 "github.com/aws/aws-sdk-go/aws/credentials/stscreds" 17 "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
18 "github.com/aws/aws-sdk-go/aws/csm"
18 "github.com/aws/aws-sdk-go/aws/defaults" 19 "github.com/aws/aws-sdk-go/aws/defaults"
19 "github.com/aws/aws-sdk-go/aws/endpoints" 20 "github.com/aws/aws-sdk-go/aws/endpoints"
20 "github.com/aws/aws-sdk-go/aws/request" 21 "github.com/aws/aws-sdk-go/aws/request"
@@ -26,7 +27,7 @@ import (
26// Sessions are safe to create service clients concurrently, but it is not safe 27// Sessions are safe to create service clients concurrently, but it is not safe
27// to mutate the Session concurrently. 28// to mutate the Session concurrently.
28// 29//
29// The Session satisfies the service client's client.ClientConfigProvider. 30// The Session satisfies the service client's client.ConfigProvider.
30type Session struct { 31type Session struct {
31 Config *aws.Config 32 Config *aws.Config
32 Handlers request.Handlers 33 Handlers request.Handlers
@@ -58,7 +59,12 @@ func New(cfgs ...*aws.Config) *Session {
58 envCfg := loadEnvConfig() 59 envCfg := loadEnvConfig()
59 60
60 if envCfg.EnableSharedConfig { 61 if envCfg.EnableSharedConfig {
61 s, err := newSession(Options{}, envCfg, cfgs...) 62 var cfg aws.Config
63 cfg.MergeIn(cfgs...)
64 s, err := NewSessionWithOptions(Options{
65 Config: cfg,
66 SharedConfigState: SharedConfigEnable,
67 })
62 if err != nil { 68 if err != nil {
63 // Old session.New expected all errors to be discovered when 69 // Old session.New expected all errors to be discovered when
64 // a request is made, and would report the errors then. This 70 // a request is made, and would report the errors then. This
@@ -76,10 +82,16 @@ func New(cfgs ...*aws.Config) *Session {
76 r.Error = err 82 r.Error = err
77 }) 83 })
78 } 84 }
85
79 return s 86 return s
80 } 87 }
81 88
82 return deprecatedNewSession(cfgs...) 89 s := deprecatedNewSession(cfgs...)
90 if envCfg.CSMEnabled {
91 enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
92 }
93
94 return s
83} 95}
84 96
85// NewSession returns a new Session created from SDK defaults, config files, 97// NewSession returns a new Session created from SDK defaults, config files,
@@ -243,13 +255,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) {
243 envCfg.EnableSharedConfig = true 255 envCfg.EnableSharedConfig = true
244 } 256 }
245 257
246 if len(envCfg.SharedCredentialsFile) == 0 {
247 envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
248 }
249 if len(envCfg.SharedConfigFile) == 0 {
250 envCfg.SharedConfigFile = defaults.SharedConfigFilename()
251 }
252
253 // Only use AWS_CA_BUNDLE if session option is not provided. 258 // Only use AWS_CA_BUNDLE if session option is not provided.
254 if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { 259 if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
255 f, err := os.Open(envCfg.CustomCABundle) 260 f, err := os.Open(envCfg.CustomCABundle)
@@ -302,10 +307,22 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
302 } 307 }
303 308
304 initHandlers(s) 309 initHandlers(s)
305
306 return s 310 return s
307} 311}
308 312
313func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) {
314 logger.Log("Enabling CSM")
315 if len(port) == 0 {
316 port = csm.DefaultPort
317 }
318
319 r, err := csm.Start(clientID, "127.0.0.1:"+port)
320 if err != nil {
321 return
322 }
323 r.InjectHandlers(handlers)
324}
325
309func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { 326func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
310 cfg := defaults.Config() 327 cfg := defaults.Config()
311 handlers := defaults.Handlers() 328 handlers := defaults.Handlers()
@@ -345,6 +362,9 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
345 } 362 }
346 363
347 initHandlers(s) 364 initHandlers(s)
365 if envCfg.CSMEnabled {
366 enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
367 }
348 368
349 // Setup HTTP client with custom cert bundle if enabled 369 // Setup HTTP client with custom cert bundle if enabled
350 if opts.CustomCABundle != nil { 370 if opts.CustomCABundle != nil {
@@ -573,11 +593,12 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (
573 } 593 }
574 594
575 return client.Config{ 595 return client.Config{
576 Config: s.Config, 596 Config: s.Config,
577 Handlers: s.Handlers, 597 Handlers: s.Handlers,
578 Endpoint: resolved.URL, 598 Endpoint: resolved.URL,
579 SigningRegion: resolved.SigningRegion, 599 SigningRegion: resolved.SigningRegion,
580 SigningName: resolved.SigningName, 600 SigningNameDerived: resolved.SigningNameDerived,
601 SigningName: resolved.SigningName,
581 }, err 602 }, err
582} 603}
583 604
@@ -597,10 +618,11 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf
597 } 618 }
598 619
599 return client.Config{ 620 return client.Config{
600 Config: s.Config, 621 Config: s.Config,
601 Handlers: s.Handlers, 622 Handlers: s.Handlers,
602 Endpoint: resolved.URL, 623 Endpoint: resolved.URL,
603 SigningRegion: resolved.SigningRegion, 624 SigningRegion: resolved.SigningRegion,
604 SigningName: resolved.SigningName, 625 SigningNameDerived: resolved.SigningNameDerived,
626 SigningName: resolved.SigningName,
605 } 627 }
606} 628}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index 434ac87..8aa0681 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -45,7 +45,7 @@
45// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 45// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
46// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the 46// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
47// request URL. https://github.com/golang/go/issues/16847 points to a bug in 47// request URL. https://github.com/golang/go/issues/16847 points to a bug in
48// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP 48// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
49// message. URL.Opaque generally will force Go to make requests with absolute URL. 49// message. URL.Opaque generally will force Go to make requests with absolute URL.
50// URL.RawPath does not do this, but RawPath must be a valid escaping of Path 50// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
51// or url.EscapedPath will ignore the RawPath escaping. 51// or url.EscapedPath will ignore the RawPath escaping.
@@ -55,7 +55,6 @@
55package v4 55package v4
56 56
57import ( 57import (
58 "bytes"
59 "crypto/hmac" 58 "crypto/hmac"
60 "crypto/sha256" 59 "crypto/sha256"
61 "encoding/hex" 60 "encoding/hex"
@@ -72,6 +71,7 @@ import (
72 "github.com/aws/aws-sdk-go/aws" 71 "github.com/aws/aws-sdk-go/aws"
73 "github.com/aws/aws-sdk-go/aws/credentials" 72 "github.com/aws/aws-sdk-go/aws/credentials"
74 "github.com/aws/aws-sdk-go/aws/request" 73 "github.com/aws/aws-sdk-go/aws/request"
74 "github.com/aws/aws-sdk-go/internal/sdkio"
75 "github.com/aws/aws-sdk-go/private/protocol/rest" 75 "github.com/aws/aws-sdk-go/private/protocol/rest"
76) 76)
77 77
@@ -98,25 +98,25 @@ var ignoredHeaders = rules{
98var requiredSignedHeaders = rules{ 98var requiredSignedHeaders = rules{
99 whitelist{ 99 whitelist{
100 mapRule{ 100 mapRule{
101 "Cache-Control": struct{}{}, 101 "Cache-Control": struct{}{},
102 "Content-Disposition": struct{}{}, 102 "Content-Disposition": struct{}{},
103 "Content-Encoding": struct{}{}, 103 "Content-Encoding": struct{}{},
104 "Content-Language": struct{}{}, 104 "Content-Language": struct{}{},
105 "Content-Md5": struct{}{}, 105 "Content-Md5": struct{}{},
106 "Content-Type": struct{}{}, 106 "Content-Type": struct{}{},
107 "Expires": struct{}{}, 107 "Expires": struct{}{},
108 "If-Match": struct{}{}, 108 "If-Match": struct{}{},
109 "If-Modified-Since": struct{}{}, 109 "If-Modified-Since": struct{}{},
110 "If-None-Match": struct{}{}, 110 "If-None-Match": struct{}{},
111 "If-Unmodified-Since": struct{}{}, 111 "If-Unmodified-Since": struct{}{},
112 "Range": struct{}{}, 112 "Range": struct{}{},
113 "X-Amz-Acl": struct{}{}, 113 "X-Amz-Acl": struct{}{},
114 "X-Amz-Copy-Source": struct{}{}, 114 "X-Amz-Copy-Source": struct{}{},
115 "X-Amz-Copy-Source-If-Match": struct{}{}, 115 "X-Amz-Copy-Source-If-Match": struct{}{},
116 "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, 116 "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
117 "X-Amz-Copy-Source-If-None-Match": struct{}{}, 117 "X-Amz-Copy-Source-If-None-Match": struct{}{},
118 "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, 118 "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
119 "X-Amz-Copy-Source-Range": struct{}{}, 119 "X-Amz-Copy-Source-Range": struct{}{},
120 "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, 120 "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
121 "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, 121 "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
122 "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, 122 "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
@@ -135,6 +135,7 @@ var requiredSignedHeaders = rules{
135 "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, 135 "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
136 "X-Amz-Storage-Class": struct{}{}, 136 "X-Amz-Storage-Class": struct{}{},
137 "X-Amz-Website-Redirect-Location": struct{}{}, 137 "X-Amz-Website-Redirect-Location": struct{}{},
138 "X-Amz-Content-Sha256": struct{}{},
138 }, 139 },
139 }, 140 },
140 patterns{"X-Amz-Meta-"}, 141 patterns{"X-Amz-Meta-"},
@@ -269,7 +270,7 @@ type signingCtx struct {
269// "X-Amz-Content-Sha256" header with a precomputed value. The signer will 270// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
270// only compute the hash if the request header value is empty. 271// only compute the hash if the request header value is empty.
271func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { 272func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
272 return v4.signWithBody(r, body, service, region, 0, signTime) 273 return v4.signWithBody(r, body, service, region, 0, false, signTime)
273} 274}
274 275
275// Presign signs AWS v4 requests with the provided body, service name, region 276// Presign signs AWS v4 requests with the provided body, service name, region
@@ -303,10 +304,10 @@ func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region strin
303// presigned request's signature you can set the "X-Amz-Content-Sha256" 304// presigned request's signature you can set the "X-Amz-Content-Sha256"
304// HTTP header and that will be included in the request's signature. 305// HTTP header and that will be included in the request's signature.
305func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { 306func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
306 return v4.signWithBody(r, body, service, region, exp, signTime) 307 return v4.signWithBody(r, body, service, region, exp, true, signTime)
307} 308}
308 309
309func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { 310func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
310 currentTimeFn := v4.currentTimeFn 311 currentTimeFn := v4.currentTimeFn
311 if currentTimeFn == nil { 312 if currentTimeFn == nil {
312 currentTimeFn = time.Now 313 currentTimeFn = time.Now
@@ -318,7 +319,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
318 Query: r.URL.Query(), 319 Query: r.URL.Query(),
319 Time: signTime, 320 Time: signTime,
320 ExpireTime: exp, 321 ExpireTime: exp,
321 isPresign: exp != 0, 322 isPresign: isPresign,
322 ServiceName: service, 323 ServiceName: service,
323 Region: region, 324 Region: region,
324 DisableURIPathEscaping: v4.DisableURIPathEscaping, 325 DisableURIPathEscaping: v4.DisableURIPathEscaping,
@@ -340,8 +341,11 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
340 return http.Header{}, err 341 return http.Header{}, err
341 } 342 }
342 343
344 ctx.sanitizeHostForHeader()
343 ctx.assignAmzQueryValues() 345 ctx.assignAmzQueryValues()
344 ctx.build(v4.DisableHeaderHoisting) 346 if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
347 return nil, err
348 }
345 349
346 // If the request is not presigned the body should be attached to it. This 350 // If the request is not presigned the body should be attached to it. This
347 // prevents the confusion of wanting to send a signed request without 351 // prevents the confusion of wanting to send a signed request without
@@ -364,6 +368,10 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
364 return ctx.SignedHeaderVals, nil 368 return ctx.SignedHeaderVals, nil
365} 369}
366 370
371func (ctx *signingCtx) sanitizeHostForHeader() {
372 request.SanitizeHostForHeader(ctx.Request)
373}
374
367func (ctx *signingCtx) handlePresignRemoval() { 375func (ctx *signingCtx) handlePresignRemoval() {
368 if !ctx.isPresign { 376 if !ctx.isPresign {
369 return 377 return
@@ -402,7 +410,7 @@ var SignRequestHandler = request.NamedHandler{
402} 410}
403 411
404// SignSDKRequest signs an AWS request with the V4 signature. This 412// SignSDKRequest signs an AWS request with the V4 signature. This
405// request handler is bested used only with the SDK's built in service client's 413// request handler should only be used with the SDK's built in service client's
406// API operation requests. 414// API operation requests.
407// 415//
408// This function should not be used on its on its own, but in conjunction with 416// This function should not be used on its on its own, but in conjunction with
@@ -468,7 +476,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
468 } 476 }
469 477
470 signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), 478 signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
471 name, region, req.ExpireTime, signingTime, 479 name, region, req.ExpireTime, req.ExpireTime > 0, signingTime,
472 ) 480 )
473 if err != nil { 481 if err != nil {
474 req.Error = err 482 req.Error = err
@@ -499,10 +507,14 @@ func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
499 v4.Logger.Log(msg) 507 v4.Logger.Log(msg)
500} 508}
501 509
502func (ctx *signingCtx) build(disableHeaderHoisting bool) { 510func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
503 ctx.buildTime() // no depends 511 ctx.buildTime() // no depends
504 ctx.buildCredentialString() // no depends 512 ctx.buildCredentialString() // no depends
505 513
514 if err := ctx.buildBodyDigest(); err != nil {
515 return err
516 }
517
506 unsignedHeaders := ctx.Request.Header 518 unsignedHeaders := ctx.Request.Header
507 if ctx.isPresign { 519 if ctx.isPresign {
508 if !disableHeaderHoisting { 520 if !disableHeaderHoisting {
@@ -514,7 +526,6 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) {
514 } 526 }
515 } 527 }
516 528
517 ctx.buildBodyDigest()
518 ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) 529 ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
519 ctx.buildCanonicalString() // depends on canon headers / signed headers 530 ctx.buildCanonicalString() // depends on canon headers / signed headers
520 ctx.buildStringToSign() // depends on canon string 531 ctx.buildStringToSign() // depends on canon string
@@ -530,6 +541,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) {
530 } 541 }
531 ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) 542 ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
532 } 543 }
544
545 return nil
533} 546}
534 547
535func (ctx *signingCtx) buildTime() { 548func (ctx *signingCtx) buildTime() {
@@ -604,14 +617,18 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
604 headerValues := make([]string, len(headers)) 617 headerValues := make([]string, len(headers))
605 for i, k := range headers { 618 for i, k := range headers {
606 if k == "host" { 619 if k == "host" {
607 headerValues[i] = "host:" + ctx.Request.URL.Host 620 if ctx.Request.Host != "" {
621 headerValues[i] = "host:" + ctx.Request.Host
622 } else {
623 headerValues[i] = "host:" + ctx.Request.URL.Host
624 }
608 } else { 625 } else {
609 headerValues[i] = k + ":" + 626 headerValues[i] = k + ":" +
610 strings.Join(ctx.SignedHeaderVals[k], ",") 627 strings.Join(ctx.SignedHeaderVals[k], ",")
611 } 628 }
612 } 629 }
613 630 stripExcessSpaces(headerValues)
614 ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") 631 ctx.canonicalHeaders = strings.Join(headerValues, "\n")
615} 632}
616 633
617func (ctx *signingCtx) buildCanonicalString() { 634func (ctx *signingCtx) buildCanonicalString() {
@@ -652,21 +669,34 @@ func (ctx *signingCtx) buildSignature() {
652 ctx.signature = hex.EncodeToString(signature) 669 ctx.signature = hex.EncodeToString(signature)
653} 670}
654 671
655func (ctx *signingCtx) buildBodyDigest() { 672func (ctx *signingCtx) buildBodyDigest() error {
656 hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") 673 hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
657 if hash == "" { 674 if hash == "" {
658 if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { 675 includeSHA256Header := ctx.unsignedPayload ||
676 ctx.ServiceName == "s3" ||
677 ctx.ServiceName == "glacier"
678
679 s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
680
681 if ctx.unsignedPayload || s3Presign {
659 hash = "UNSIGNED-PAYLOAD" 682 hash = "UNSIGNED-PAYLOAD"
683 includeSHA256Header = !s3Presign
660 } else if ctx.Body == nil { 684 } else if ctx.Body == nil {
661 hash = emptyStringSHA256 685 hash = emptyStringSHA256
662 } else { 686 } else {
687 if !aws.IsReaderSeekable(ctx.Body) {
688 return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
689 }
663 hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) 690 hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
664 } 691 }
665 if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { 692
693 if includeSHA256Header {
666 ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) 694 ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
667 } 695 }
668 } 696 }
669 ctx.bodyDigest = hash 697 ctx.bodyDigest = hash
698
699 return nil
670} 700}
671 701
672// isRequestSigned returns if the request is currently signed or presigned 702// isRequestSigned returns if the request is currently signed or presigned
@@ -706,56 +736,53 @@ func makeSha256(data []byte) []byte {
706 736
707func makeSha256Reader(reader io.ReadSeeker) []byte { 737func makeSha256Reader(reader io.ReadSeeker) []byte {
708 hash := sha256.New() 738 hash := sha256.New()
709 start, _ := reader.Seek(0, 1) 739 start, _ := reader.Seek(0, sdkio.SeekCurrent)
710 defer reader.Seek(start, 0) 740 defer reader.Seek(start, sdkio.SeekStart)
711 741
712 io.Copy(hash, reader) 742 io.Copy(hash, reader)
713 return hash.Sum(nil) 743 return hash.Sum(nil)
714} 744}
715 745
716const doubleSpaces = " " 746const doubleSpace = " "
717
718var doubleSpaceBytes = []byte(doubleSpaces)
719 747
720func stripExcessSpaces(headerVals []string) []string { 748// stripExcessSpaces will rewrite the passed in slice's string values to not
721 vals := make([]string, len(headerVals)) 749// contain muliple side-by-side spaces.
722 for i, str := range headerVals { 750func stripExcessSpaces(vals []string) {
723 // Trim leading and trailing spaces 751 var j, k, l, m, spaces int
724 trimmed := strings.TrimSpace(str) 752 for i, str := range vals {
753 // Trim trailing spaces
754 for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
755 }
725 756
726 idx := strings.Index(trimmed, doubleSpaces) 757 // Trim leading spaces
727 var buf []byte 758 for k = 0; k < j && str[k] == ' '; k++ {
728 for idx > -1 { 759 }
729 // Multiple adjacent spaces found 760 str = str[k : j+1]
730 if buf == nil {
731 // first time create the buffer
732 buf = []byte(trimmed)
733 }
734 761
735 stripToIdx := -1 762 // Strip multiple spaces.
736 for j := idx + 1; j < len(buf); j++ { 763 j = strings.Index(str, doubleSpace)
737 if buf[j] != ' ' { 764 if j < 0 {
738 buf = append(buf[:idx+1], buf[j:]...) 765 vals[i] = str
739 stripToIdx = j 766 continue
740 break 767 }
741 }
742 }
743 768
744 if stripToIdx >= 0 { 769 buf := []byte(str)
745 idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) 770 for k, m, l = j, j, len(buf); k < l; k++ {
746 if idx >= 0 { 771 if buf[k] == ' ' {
747 idx += stripToIdx 772 if spaces == 0 {
773 // First space.
774 buf[m] = buf[k]
775 m++
748 } 776 }
777 spaces++
749 } else { 778 } else {
750 idx = -1 779 // End of multiple spaces.
780 spaces = 0
781 buf[m] = buf[k]
782 m++
751 } 783 }
752 } 784 }
753 785
754 if buf != nil { 786 vals[i] = string(buf[:m])
755 vals[i] = string(buf)
756 } else {
757 vals[i] = trimmed
758 }
759 } 787 }
760 return vals
761} 788}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
index 0e2d864..8b6f234 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/types.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go
@@ -3,6 +3,8 @@ package aws
3import ( 3import (
4 "io" 4 "io"
5 "sync" 5 "sync"
6
7 "github.com/aws/aws-sdk-go/internal/sdkio"
6) 8)
7 9
8// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should 10// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
@@ -22,6 +24,22 @@ type ReaderSeekerCloser struct {
22 r io.Reader 24 r io.Reader
23} 25}
24 26
27// IsReaderSeekable returns if the underlying reader type can be seeked. A
28// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
29// type.
30func IsReaderSeekable(r io.Reader) bool {
31 switch v := r.(type) {
32 case ReaderSeekerCloser:
33 return v.IsSeeker()
34 case *ReaderSeekerCloser:
35 return v.IsSeeker()
36 case io.ReadSeeker:
37 return true
38 default:
39 return false
40 }
41}
42
25// Read reads from the reader up to size of p. The number of bytes read, and 43// Read reads from the reader up to size of p. The number of bytes read, and
26// error if it occurred will be returned. 44// error if it occurred will be returned.
27// 45//
@@ -56,6 +74,71 @@ func (r ReaderSeekerCloser) IsSeeker() bool {
56 return ok 74 return ok
57} 75}
58 76
77// HasLen returns the length of the underlying reader if the value implements
78// the Len() int method.
79func (r ReaderSeekerCloser) HasLen() (int, bool) {
80 type lenner interface {
81 Len() int
82 }
83
84 if lr, ok := r.r.(lenner); ok {
85 return lr.Len(), true
86 }
87
88 return 0, false
89}
90
91// GetLen returns the length of the bytes remaining in the underlying reader.
92// Checks first for Len(), then io.Seeker to determine the size of the
93// underlying reader.
94//
95// Will return -1 if the length cannot be determined.
96func (r ReaderSeekerCloser) GetLen() (int64, error) {
97 if l, ok := r.HasLen(); ok {
98 return int64(l), nil
99 }
100
101 if s, ok := r.r.(io.Seeker); ok {
102 return seekerLen(s)
103 }
104
105 return -1, nil
106}
107
108// SeekerLen attempts to get the number of bytes remaining at the seeker's
109// current position. Returns the number of bytes remaining or error.
110func SeekerLen(s io.Seeker) (int64, error) {
111 // Determine if the seeker is actually seekable. ReaderSeekerCloser
112 // hides the fact that a io.Readers might not actually be seekable.
113 switch v := s.(type) {
114 case ReaderSeekerCloser:
115 return v.GetLen()
116 case *ReaderSeekerCloser:
117 return v.GetLen()
118 }
119
120 return seekerLen(s)
121}
122
123func seekerLen(s io.Seeker) (int64, error) {
124 curOffset, err := s.Seek(0, sdkio.SeekCurrent)
125 if err != nil {
126 return 0, err
127 }
128
129 endOffset, err := s.Seek(0, sdkio.SeekEnd)
130 if err != nil {
131 return 0, err
132 }
133
134 _, err = s.Seek(curOffset, sdkio.SeekStart)
135 if err != nil {
136 return 0, err
137 }
138
139 return endOffset - curOffset, nil
140}
141
59// Close closes the ReaderSeekerCloser. 142// Close closes the ReaderSeekerCloser.
60// 143//
61// If the ReaderSeekerCloser is not an io.Closer nothing will be done. 144// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index bbf4c2f..c4d155c 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
5const SDKName = "aws-sdk-go" 5const SDKName = "aws-sdk-go"
6 6
7// SDKVersion is the version of this SDK 7// SDKVersion is the version of this SDK
8const SDKVersion = "1.8.34" 8const SDKVersion = "1.14.31"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
new file mode 100644
index 0000000..5aa9137
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
@@ -0,0 +1,10 @@
1// +build !go1.7
2
3package sdkio
4
5// Copy of Go 1.7 io package's Seeker constants.
6const (
7 SeekStart = 0 // seek relative to the origin of the file
8 SeekCurrent = 1 // seek relative to the current offset
9 SeekEnd = 2 // seek relative to the end
10)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
new file mode 100644
index 0000000..e5f0056
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
@@ -0,0 +1,12 @@
1// +build go1.7
2
3package sdkio
4
5import "io"
6
7// Alias for Go 1.7 io package Seeker constants
8const (
9 SeekStart = io.SeekStart // seek relative to the origin of the file
10 SeekCurrent = io.SeekCurrent // seek relative to the current offset
11 SeekEnd = io.SeekEnd // seek relative to the end
12)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
new file mode 100644
index 0000000..0c9802d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
@@ -0,0 +1,29 @@
1package sdkrand
2
3import (
4 "math/rand"
5 "sync"
6 "time"
7)
8
9// lockedSource is a thread-safe implementation of rand.Source
10type lockedSource struct {
11 lk sync.Mutex
12 src rand.Source
13}
14
15func (r *lockedSource) Int63() (n int64) {
16 r.lk.Lock()
17 n = r.src.Int63()
18 r.lk.Unlock()
19 return
20}
21
22func (r *lockedSource) Seed(seed int64) {
23 r.lk.Lock()
24 r.src.Seed(seed)
25 r.lk.Unlock()
26}
27
28// SeededRand is a new RNG using a thread safe implementation of rand.Source
29var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
new file mode 100644
index 0000000..38ea61a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
@@ -0,0 +1,23 @@
1package sdkuri
2
3import (
4 "path"
5 "strings"
6)
7
8// PathJoin will join the elements of the path delimited by the "/"
9// character. Similar to path.Join with the exception the trailing "/"
10// character is preserved if present.
11func PathJoin(elems ...string) string {
12 if len(elems) == 0 {
13 return ""
14 }
15
16 hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/")
17 str := path.Join(elems...)
18 if hasTrailing && str != "/" {
19 str += "/"
20 }
21
22 return str
23}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go
new file mode 100644
index 0000000..ecc7bf8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go
@@ -0,0 +1,144 @@
1package eventstream
2
3import (
4 "bytes"
5 "encoding/base64"
6 "encoding/json"
7 "fmt"
8 "strconv"
9)
10
11type decodedMessage struct {
12 rawMessage
13 Headers decodedHeaders `json:"headers"`
14}
15type jsonMessage struct {
16 Length json.Number `json:"total_length"`
17 HeadersLen json.Number `json:"headers_length"`
18 PreludeCRC json.Number `json:"prelude_crc"`
19 Headers decodedHeaders `json:"headers"`
20 Payload []byte `json:"payload"`
21 CRC json.Number `json:"message_crc"`
22}
23
24func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) {
25 var jsonMsg jsonMessage
26 if err = json.Unmarshal(b, &jsonMsg); err != nil {
27 return err
28 }
29
30 d.Length, err = numAsUint32(jsonMsg.Length)
31 if err != nil {
32 return err
33 }
34 d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen)
35 if err != nil {
36 return err
37 }
38 d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC)
39 if err != nil {
40 return err
41 }
42 d.Headers = jsonMsg.Headers
43 d.Payload = jsonMsg.Payload
44 d.CRC, err = numAsUint32(jsonMsg.CRC)
45 if err != nil {
46 return err
47 }
48
49 return nil
50}
51
52func (d *decodedMessage) MarshalJSON() ([]byte, error) {
53 jsonMsg := jsonMessage{
54 Length: json.Number(strconv.Itoa(int(d.Length))),
55 HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))),
56 PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))),
57 Headers: d.Headers,
58 Payload: d.Payload,
59 CRC: json.Number(strconv.Itoa(int(d.CRC))),
60 }
61
62 return json.Marshal(jsonMsg)
63}
64
65func numAsUint32(n json.Number) (uint32, error) {
66 v, err := n.Int64()
67 if err != nil {
68 return 0, fmt.Errorf("failed to get int64 json number, %v", err)
69 }
70
71 return uint32(v), nil
72}
73
74func (d decodedMessage) Message() Message {
75 return Message{
76 Headers: Headers(d.Headers),
77 Payload: d.Payload,
78 }
79}
80
81type decodedHeaders Headers
82
83func (hs *decodedHeaders) UnmarshalJSON(b []byte) error {
84 var jsonHeaders []struct {
85 Name string `json:"name"`
86 Type valueType `json:"type"`
87 Value interface{} `json:"value"`
88 }
89
90 decoder := json.NewDecoder(bytes.NewReader(b))
91 decoder.UseNumber()
92 if err := decoder.Decode(&jsonHeaders); err != nil {
93 return err
94 }
95
96 var headers Headers
97 for _, h := range jsonHeaders {
98 value, err := valueFromType(h.Type, h.Value)
99 if err != nil {
100 return err
101 }
102 headers.Set(h.Name, value)
103 }
104 (*hs) = decodedHeaders(headers)
105
106 return nil
107}
108
109func valueFromType(typ valueType, val interface{}) (Value, error) {
110 switch typ {
111 case trueValueType:
112 return BoolValue(true), nil
113 case falseValueType:
114 return BoolValue(false), nil
115 case int8ValueType:
116 v, err := val.(json.Number).Int64()
117 return Int8Value(int8(v)), err
118 case int16ValueType:
119 v, err := val.(json.Number).Int64()
120 return Int16Value(int16(v)), err
121 case int32ValueType:
122 v, err := val.(json.Number).Int64()
123 return Int32Value(int32(v)), err
124 case int64ValueType:
125 v, err := val.(json.Number).Int64()
126 return Int64Value(v), err
127 case bytesValueType:
128 v, err := base64.StdEncoding.DecodeString(val.(string))
129 return BytesValue(v), err
130 case stringValueType:
131 v, err := base64.StdEncoding.DecodeString(val.(string))
132 return StringValue(string(v)), err
133 case timestampValueType:
134 v, err := val.(json.Number).Int64()
135 return TimestampValue(timeFromEpochMilli(v)), err
136 case uuidValueType:
137 v, err := base64.StdEncoding.DecodeString(val.(string))
138 var tv UUIDValue
139 copy(tv[:], v)
140 return tv, err
141 default:
142 panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val))
143 }
144}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go
new file mode 100644
index 0000000..4b972b2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go
@@ -0,0 +1,199 @@
1package eventstream
2
3import (
4 "bytes"
5 "encoding/binary"
6 "encoding/hex"
7 "encoding/json"
8 "fmt"
9 "hash"
10 "hash/crc32"
11 "io"
12
13 "github.com/aws/aws-sdk-go/aws"
14)
15
16// Decoder provides decoding of an Event Stream messages.
17type Decoder struct {
18 r io.Reader
19 logger aws.Logger
20}
21
22// NewDecoder initializes and returns a Decoder for decoding event
23// stream messages from the reader provided.
24func NewDecoder(r io.Reader) *Decoder {
25 return &Decoder{
26 r: r,
27 }
28}
29
30// Decode attempts to decode a single message from the event stream reader.
31// Will return the event stream message, or error if Decode fails to read
32// the message from the stream.
33func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) {
34 reader := d.r
35 if d.logger != nil {
36 debugMsgBuf := bytes.NewBuffer(nil)
37 reader = io.TeeReader(reader, debugMsgBuf)
38 defer func() {
39 logMessageDecode(d.logger, debugMsgBuf, m, err)
40 }()
41 }
42
43 crc := crc32.New(crc32IEEETable)
44 hashReader := io.TeeReader(reader, crc)
45
46 prelude, err := decodePrelude(hashReader, crc)
47 if err != nil {
48 return Message{}, err
49 }
50
51 if prelude.HeadersLen > 0 {
52 lr := io.LimitReader(hashReader, int64(prelude.HeadersLen))
53 m.Headers, err = decodeHeaders(lr)
54 if err != nil {
55 return Message{}, err
56 }
57 }
58
59 if payloadLen := prelude.PayloadLen(); payloadLen > 0 {
60 buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen)))
61 if err != nil {
62 return Message{}, err
63 }
64 m.Payload = buf
65 }
66
67 msgCRC := crc.Sum32()
68 if err := validateCRC(reader, msgCRC); err != nil {
69 return Message{}, err
70 }
71
72 return m, nil
73}
74
75// UseLogger specifies the Logger that that the decoder should use to log the
76// message decode to.
77func (d *Decoder) UseLogger(logger aws.Logger) {
78 d.logger = logger
79}
80
81func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) {
82 w := bytes.NewBuffer(nil)
83 defer func() { logger.Log(w.String()) }()
84
85 fmt.Fprintf(w, "Raw message:\n%s\n",
86 hex.Dump(msgBuf.Bytes()))
87
88 if decodeErr != nil {
89 fmt.Fprintf(w, "Decode error: %v\n", decodeErr)
90 return
91 }
92
93 rawMsg, err := msg.rawMessage()
94 if err != nil {
95 fmt.Fprintf(w, "failed to create raw message, %v\n", err)
96 return
97 }
98
99 decodedMsg := decodedMessage{
100 rawMessage: rawMsg,
101 Headers: decodedHeaders(msg.Headers),
102 }
103
104 fmt.Fprintf(w, "Decoded message:\n")
105 encoder := json.NewEncoder(w)
106 if err := encoder.Encode(decodedMsg); err != nil {
107 fmt.Fprintf(w, "failed to generate decoded message, %v\n", err)
108 }
109}
110
111func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) {
112 var p messagePrelude
113
114 var err error
115 p.Length, err = decodeUint32(r)
116 if err != nil {
117 return messagePrelude{}, err
118 }
119
120 p.HeadersLen, err = decodeUint32(r)
121 if err != nil {
122 return messagePrelude{}, err
123 }
124
125 if err := p.ValidateLens(); err != nil {
126 return messagePrelude{}, err
127 }
128
129 preludeCRC := crc.Sum32()
130 if err := validateCRC(r, preludeCRC); err != nil {
131 return messagePrelude{}, err
132 }
133
134 p.PreludeCRC = preludeCRC
135
136 return p, nil
137}
138
139func decodePayload(buf []byte, r io.Reader) ([]byte, error) {
140 w := bytes.NewBuffer(buf[0:0])
141
142 _, err := io.Copy(w, r)
143 return w.Bytes(), err
144}
145
146func decodeUint8(r io.Reader) (uint8, error) {
147 type byteReader interface {
148 ReadByte() (byte, error)
149 }
150
151 if br, ok := r.(byteReader); ok {
152 v, err := br.ReadByte()
153 return uint8(v), err
154 }
155
156 var b [1]byte
157 _, err := io.ReadFull(r, b[:])
158 return uint8(b[0]), err
159}
160func decodeUint16(r io.Reader) (uint16, error) {
161 var b [2]byte
162 bs := b[:]
163 _, err := io.ReadFull(r, bs)
164 if err != nil {
165 return 0, err
166 }
167 return binary.BigEndian.Uint16(bs), nil
168}
169func decodeUint32(r io.Reader) (uint32, error) {
170 var b [4]byte
171 bs := b[:]
172 _, err := io.ReadFull(r, bs)
173 if err != nil {
174 return 0, err
175 }
176 return binary.BigEndian.Uint32(bs), nil
177}
178func decodeUint64(r io.Reader) (uint64, error) {
179 var b [8]byte
180 bs := b[:]
181 _, err := io.ReadFull(r, bs)
182 if err != nil {
183 return 0, err
184 }
185 return binary.BigEndian.Uint64(bs), nil
186}
187
188func validateCRC(r io.Reader, expect uint32) error {
189 msgCRC, err := decodeUint32(r)
190 if err != nil {
191 return err
192 }
193
194 if msgCRC != expect {
195 return ChecksumError{}
196 }
197
198 return nil
199}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go
new file mode 100644
index 0000000..150a609
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go
@@ -0,0 +1,114 @@
1package eventstream
2
3import (
4 "bytes"
5 "encoding/binary"
6 "hash"
7 "hash/crc32"
8 "io"
9)
10
11// Encoder provides EventStream message encoding.
12type Encoder struct {
13 w io.Writer
14
15 headersBuf *bytes.Buffer
16}
17
18// NewEncoder initializes and returns an Encoder to encode Event Stream
19// messages to an io.Writer.
20func NewEncoder(w io.Writer) *Encoder {
21 return &Encoder{
22 w: w,
23 headersBuf: bytes.NewBuffer(nil),
24 }
25}
26
27// Encode encodes a single EventStream message to the io.Writer the Encoder
28// was created with. An error is returned if writing the message fails.
29func (e *Encoder) Encode(msg Message) error {
30 e.headersBuf.Reset()
31
32 err := encodeHeaders(e.headersBuf, msg.Headers)
33 if err != nil {
34 return err
35 }
36
37 crc := crc32.New(crc32IEEETable)
38 hashWriter := io.MultiWriter(e.w, crc)
39
40 headersLen := uint32(e.headersBuf.Len())
41 payloadLen := uint32(len(msg.Payload))
42
43 if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil {
44 return err
45 }
46
47 if headersLen > 0 {
48 if _, err := io.Copy(hashWriter, e.headersBuf); err != nil {
49 return err
50 }
51 }
52
53 if payloadLen > 0 {
54 if _, err := hashWriter.Write(msg.Payload); err != nil {
55 return err
56 }
57 }
58
59 msgCRC := crc.Sum32()
60 return binary.Write(e.w, binary.BigEndian, msgCRC)
61}
62
63func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error {
64 p := messagePrelude{
65 Length: minMsgLen + headersLen + payloadLen,
66 HeadersLen: headersLen,
67 }
68 if err := p.ValidateLens(); err != nil {
69 return err
70 }
71
72 err := binaryWriteFields(w, binary.BigEndian,
73 p.Length,
74 p.HeadersLen,
75 )
76 if err != nil {
77 return err
78 }
79
80 p.PreludeCRC = crc.Sum32()
81 err = binary.Write(w, binary.BigEndian, p.PreludeCRC)
82 if err != nil {
83 return err
84 }
85
86 return nil
87}
88
89func encodeHeaders(w io.Writer, headers Headers) error {
90 for _, h := range headers {
91 hn := headerName{
92 Len: uint8(len(h.Name)),
93 }
94 copy(hn.Name[:hn.Len], h.Name)
95 if err := hn.encode(w); err != nil {
96 return err
97 }
98
99 if err := h.Value.encode(w); err != nil {
100 return err
101 }
102 }
103
104 return nil
105}
106
107func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error {
108 for _, v := range vs {
109 if err := binary.Write(w, order, v); err != nil {
110 return err
111 }
112 }
113 return nil
114}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go
new file mode 100644
index 0000000..5481ef3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go
@@ -0,0 +1,23 @@
1package eventstream
2
3import "fmt"
4
5// LengthError provides the error for items being larger than a maximum length.
6type LengthError struct {
7 Part string
8 Want int
9 Have int
10 Value interface{}
11}
12
13func (e LengthError) Error() string {
14 return fmt.Sprintf("%s length invalid, %d/%d, %v",
15 e.Part, e.Want, e.Have, e.Value)
16}
17
18// ChecksumError provides the error for message checksum invalidation errors.
19type ChecksumError struct{}
20
21func (e ChecksumError) Error() string {
22 return "message checksum mismatch"
23}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
new file mode 100644
index 0000000..97937c8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
@@ -0,0 +1,196 @@
1package eventstreamapi
2
3import (
4 "fmt"
5 "io"
6
7 "github.com/aws/aws-sdk-go/aws"
8 "github.com/aws/aws-sdk-go/private/protocol"
9 "github.com/aws/aws-sdk-go/private/protocol/eventstream"
10)
11
12// Unmarshaler provides the interface for unmarshaling a EventStream
13// message into a SDK type.
14type Unmarshaler interface {
15 UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error
16}
17
18// EventStream headers with specific meaning to async API functionality.
19const (
20 MessageTypeHeader = `:message-type` // Identifies type of message.
21 EventMessageType = `event`
22 ErrorMessageType = `error`
23 ExceptionMessageType = `exception`
24
25 // Message Events
26 EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats".
27
28 // Message Error
29 ErrorCodeHeader = `:error-code`
30 ErrorMessageHeader = `:error-message`
31
32 // Message Exception
33 ExceptionTypeHeader = `:exception-type`
34)
35
36// EventReader provides reading from the EventStream of an reader.
37type EventReader struct {
38 reader io.ReadCloser
39 decoder *eventstream.Decoder
40
41 unmarshalerForEventType func(string) (Unmarshaler, error)
42 payloadUnmarshaler protocol.PayloadUnmarshaler
43
44 payloadBuf []byte
45}
46
47// NewEventReader returns a EventReader built from the reader and unmarshaler
48// provided. Use ReadStream method to start reading from the EventStream.
49func NewEventReader(
50 reader io.ReadCloser,
51 payloadUnmarshaler protocol.PayloadUnmarshaler,
52 unmarshalerForEventType func(string) (Unmarshaler, error),
53) *EventReader {
54 return &EventReader{
55 reader: reader,
56 decoder: eventstream.NewDecoder(reader),
57 payloadUnmarshaler: payloadUnmarshaler,
58 unmarshalerForEventType: unmarshalerForEventType,
59 payloadBuf: make([]byte, 10*1024),
60 }
61}
62
63// UseLogger instructs the EventReader to use the logger and log level
64// specified.
65func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) {
66 if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) {
67 r.decoder.UseLogger(logger)
68 }
69}
70
71// ReadEvent attempts to read a message from the EventStream and return the
72// unmarshaled event value that the message is for.
73//
74// For EventStream API errors check if the returned error satisfies the
75// awserr.Error interface to get the error's Code and Message components.
76//
77// EventUnmarshalers called with EventStream messages must take copies of the
78// message's Payload. The payload will is reused between events read.
79func (r *EventReader) ReadEvent() (event interface{}, err error) {
80 msg, err := r.decoder.Decode(r.payloadBuf)
81 if err != nil {
82 return nil, err
83 }
84 defer func() {
85 // Reclaim payload buffer for next message read.
86 r.payloadBuf = msg.Payload[0:0]
87 }()
88
89 typ, err := GetHeaderString(msg, MessageTypeHeader)
90 if err != nil {
91 return nil, err
92 }
93
94 switch typ {
95 case EventMessageType:
96 return r.unmarshalEventMessage(msg)
97 case ExceptionMessageType:
98 err = r.unmarshalEventException(msg)
99 return nil, err
100 case ErrorMessageType:
101 return nil, r.unmarshalErrorMessage(msg)
102 default:
103 return nil, fmt.Errorf("unknown eventstream message type, %v", typ)
104 }
105}
106
107func (r *EventReader) unmarshalEventMessage(
108 msg eventstream.Message,
109) (event interface{}, err error) {
110 eventType, err := GetHeaderString(msg, EventTypeHeader)
111 if err != nil {
112 return nil, err
113 }
114
115 ev, err := r.unmarshalerForEventType(eventType)
116 if err != nil {
117 return nil, err
118 }
119
120 err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg)
121 if err != nil {
122 return nil, err
123 }
124
125 return ev, nil
126}
127
128func (r *EventReader) unmarshalEventException(
129 msg eventstream.Message,
130) (err error) {
131 eventType, err := GetHeaderString(msg, ExceptionTypeHeader)
132 if err != nil {
133 return err
134 }
135
136 ev, err := r.unmarshalerForEventType(eventType)
137 if err != nil {
138 return err
139 }
140
141 err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg)
142 if err != nil {
143 return err
144 }
145
146 var ok bool
147 err, ok = ev.(error)
148 if !ok {
149 err = messageError{
150 code: "SerializationError",
151 msg: fmt.Sprintf(
152 "event stream exception %s mapped to non-error %T, %v",
153 eventType, ev, ev,
154 ),
155 }
156 }
157
158 return err
159}
160
161func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) {
162 var msgErr messageError
163
164 msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader)
165 if err != nil {
166 return err
167 }
168
169 msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader)
170 if err != nil {
171 return err
172 }
173
174 return msgErr
175}
176
177// Close closes the EventReader's EventStream reader.
178func (r *EventReader) Close() error {
179 return r.reader.Close()
180}
181
182// GetHeaderString returns the value of the header as a string. If the header
183// is not set or the value is not a string an error will be returned.
184func GetHeaderString(msg eventstream.Message, headerName string) (string, error) {
185 headerVal := msg.Headers.Get(headerName)
186 if headerVal == nil {
187 return "", fmt.Errorf("error header %s not present", headerName)
188 }
189
190 v, ok := headerVal.Get().(string)
191 if !ok {
192 return "", fmt.Errorf("error header value is not a string, %T", headerVal)
193 }
194
195 return v, nil
196}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go
new file mode 100644
index 0000000..5ea5a98
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go
@@ -0,0 +1,24 @@
1package eventstreamapi
2
3import "fmt"
4
5type messageError struct {
6 code string
7 msg string
8}
9
10func (e messageError) Code() string {
11 return e.code
12}
13
14func (e messageError) Message() string {
15 return e.msg
16}
17
18func (e messageError) Error() string {
19 return fmt.Sprintf("%s: %s", e.code, e.msg)
20}
21
22func (e messageError) OrigErr() error {
23 return nil
24}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go
new file mode 100644
index 0000000..3b44dde
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go
@@ -0,0 +1,166 @@
1package eventstream
2
3import (
4 "encoding/binary"
5 "fmt"
6 "io"
7)
8
9// Headers are a collection of EventStream header values.
10type Headers []Header
11
12// Header is a single EventStream Key Value header pair.
13type Header struct {
14 Name string
15 Value Value
16}
17
18// Set associates the name with a value. If the header name already exists in
19// the Headers the value will be replaced with the new one.
20func (hs *Headers) Set(name string, value Value) {
21 var i int
22 for ; i < len(*hs); i++ {
23 if (*hs)[i].Name == name {
24 (*hs)[i].Value = value
25 return
26 }
27 }
28
29 *hs = append(*hs, Header{
30 Name: name, Value: value,
31 })
32}
33
34// Get returns the Value associated with the header. Nil is returned if the
35// value does not exist.
36func (hs Headers) Get(name string) Value {
37 for i := 0; i < len(hs); i++ {
38 if h := hs[i]; h.Name == name {
39 return h.Value
40 }
41 }
42 return nil
43}
44
45// Del deletes the value in the Headers if it exists.
46func (hs *Headers) Del(name string) {
47 for i := 0; i < len(*hs); i++ {
48 if (*hs)[i].Name == name {
49 copy((*hs)[i:], (*hs)[i+1:])
50 (*hs) = (*hs)[:len(*hs)-1]
51 }
52 }
53}
54
55func decodeHeaders(r io.Reader) (Headers, error) {
56 hs := Headers{}
57
58 for {
59 name, err := decodeHeaderName(r)
60 if err != nil {
61 if err == io.EOF {
62 // EOF while getting header name means no more headers
63 break
64 }
65 return nil, err
66 }
67
68 value, err := decodeHeaderValue(r)
69 if err != nil {
70 return nil, err
71 }
72
73 hs.Set(name, value)
74 }
75
76 return hs, nil
77}
78
79func decodeHeaderName(r io.Reader) (string, error) {
80 var n headerName
81
82 var err error
83 n.Len, err = decodeUint8(r)
84 if err != nil {
85 return "", err
86 }
87
88 name := n.Name[:n.Len]
89 if _, err := io.ReadFull(r, name); err != nil {
90 return "", err
91 }
92
93 return string(name), nil
94}
95
96func decodeHeaderValue(r io.Reader) (Value, error) {
97 var raw rawValue
98
99 typ, err := decodeUint8(r)
100 if err != nil {
101 return nil, err
102 }
103 raw.Type = valueType(typ)
104
105 var v Value
106
107 switch raw.Type {
108 case trueValueType:
109 v = BoolValue(true)
110 case falseValueType:
111 v = BoolValue(false)
112 case int8ValueType:
113 var tv Int8Value
114 err = tv.decode(r)
115 v = tv
116 case int16ValueType:
117 var tv Int16Value
118 err = tv.decode(r)
119 v = tv
120 case int32ValueType:
121 var tv Int32Value
122 err = tv.decode(r)
123 v = tv
124 case int64ValueType:
125 var tv Int64Value
126 err = tv.decode(r)
127 v = tv
128 case bytesValueType:
129 var tv BytesValue
130 err = tv.decode(r)
131 v = tv
132 case stringValueType:
133 var tv StringValue
134 err = tv.decode(r)
135 v = tv
136 case timestampValueType:
137 var tv TimestampValue
138 err = tv.decode(r)
139 v = tv
140 case uuidValueType:
141 var tv UUIDValue
142 err = tv.decode(r)
143 v = tv
144 default:
145 panic(fmt.Sprintf("unknown value type %d", raw.Type))
146 }
147
148 // Error could be EOF, let caller deal with it
149 return v, err
150}
151
152const maxHeaderNameLen = 255
153
154type headerName struct {
155 Len uint8
156 Name [maxHeaderNameLen]byte
157}
158
159func (v headerName) encode(w io.Writer) error {
160 if err := binary.Write(w, binary.BigEndian, v.Len); err != nil {
161 return err
162 }
163
164 _, err := w.Write(v.Name[:v.Len])
165 return err
166}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
new file mode 100644
index 0000000..e3fc076
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
@@ -0,0 +1,501 @@
1package eventstream
2
3import (
4 "encoding/base64"
5 "encoding/binary"
6 "fmt"
7 "io"
8 "strconv"
9 "time"
10)
11
12const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1
13
14// valueType is the EventStream header value type.
15type valueType uint8
16
17// Header value types
18const (
19 trueValueType valueType = iota
20 falseValueType
21 int8ValueType // Byte
22 int16ValueType // Short
23 int32ValueType // Integer
24 int64ValueType // Long
25 bytesValueType
26 stringValueType
27 timestampValueType
28 uuidValueType
29)
30
31func (t valueType) String() string {
32 switch t {
33 case trueValueType:
34 return "bool"
35 case falseValueType:
36 return "bool"
37 case int8ValueType:
38 return "int8"
39 case int16ValueType:
40 return "int16"
41 case int32ValueType:
42 return "int32"
43 case int64ValueType:
44 return "int64"
45 case bytesValueType:
46 return "byte_array"
47 case stringValueType:
48 return "string"
49 case timestampValueType:
50 return "timestamp"
51 case uuidValueType:
52 return "uuid"
53 default:
54 return fmt.Sprintf("unknown value type %d", uint8(t))
55 }
56}
57
58type rawValue struct {
59 Type valueType
60 Len uint16 // Only set for variable length slices
61 Value []byte // byte representation of value, BigEndian encoding.
62}
63
64func (r rawValue) encodeScalar(w io.Writer, v interface{}) error {
65 return binaryWriteFields(w, binary.BigEndian,
66 r.Type,
67 v,
68 )
69}
70
71func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error {
72 binary.Write(w, binary.BigEndian, r.Type)
73
74 _, err := w.Write(v)
75 return err
76}
77
78func (r rawValue) encodeBytes(w io.Writer, v []byte) error {
79 if len(v) > maxHeaderValueLen {
80 return LengthError{
81 Part: "header value",
82 Want: maxHeaderValueLen, Have: len(v),
83 Value: v,
84 }
85 }
86 r.Len = uint16(len(v))
87
88 err := binaryWriteFields(w, binary.BigEndian,
89 r.Type,
90 r.Len,
91 )
92 if err != nil {
93 return err
94 }
95
96 _, err = w.Write(v)
97 return err
98}
99
100func (r rawValue) encodeString(w io.Writer, v string) error {
101 if len(v) > maxHeaderValueLen {
102 return LengthError{
103 Part: "header value",
104 Want: maxHeaderValueLen, Have: len(v),
105 Value: v,
106 }
107 }
108 r.Len = uint16(len(v))
109
110 type stringWriter interface {
111 WriteString(string) (int, error)
112 }
113
114 err := binaryWriteFields(w, binary.BigEndian,
115 r.Type,
116 r.Len,
117 )
118 if err != nil {
119 return err
120 }
121
122 if sw, ok := w.(stringWriter); ok {
123 _, err = sw.WriteString(v)
124 } else {
125 _, err = w.Write([]byte(v))
126 }
127
128 return err
129}
130
131func decodeFixedBytesValue(r io.Reader, buf []byte) error {
132 _, err := io.ReadFull(r, buf)
133 return err
134}
135
136func decodeBytesValue(r io.Reader) ([]byte, error) {
137 var raw rawValue
138 var err error
139 raw.Len, err = decodeUint16(r)
140 if err != nil {
141 return nil, err
142 }
143
144 buf := make([]byte, raw.Len)
145 _, err = io.ReadFull(r, buf)
146 if err != nil {
147 return nil, err
148 }
149
150 return buf, nil
151}
152
153func decodeStringValue(r io.Reader) (string, error) {
154 v, err := decodeBytesValue(r)
155 return string(v), err
156}
157
158// Value represents the abstract header value.
159type Value interface {
160 Get() interface{}
161 String() string
162 valueType() valueType
163 encode(io.Writer) error
164}
165
166// An BoolValue provides eventstream encoding, and representation
167// of a Go bool value.
168type BoolValue bool
169
170// Get returns the underlying type
171func (v BoolValue) Get() interface{} {
172 return bool(v)
173}
174
175// valueType returns the EventStream header value type value.
176func (v BoolValue) valueType() valueType {
177 if v {
178 return trueValueType
179 }
180 return falseValueType
181}
182
183func (v BoolValue) String() string {
184 return strconv.FormatBool(bool(v))
185}
186
187// encode encodes the BoolValue into an eventstream binary value
188// representation.
189func (v BoolValue) encode(w io.Writer) error {
190 return binary.Write(w, binary.BigEndian, v.valueType())
191}
192
193// An Int8Value provides eventstream encoding, and representation of a Go
194// int8 value.
195type Int8Value int8
196
197// Get returns the underlying value.
198func (v Int8Value) Get() interface{} {
199 return int8(v)
200}
201
202// valueType returns the EventStream header value type value.
203func (Int8Value) valueType() valueType {
204 return int8ValueType
205}
206
207func (v Int8Value) String() string {
208 return fmt.Sprintf("0x%02x", int8(v))
209}
210
211// encode encodes the Int8Value into an eventstream binary value
212// representation.
213func (v Int8Value) encode(w io.Writer) error {
214 raw := rawValue{
215 Type: v.valueType(),
216 }
217
218 return raw.encodeScalar(w, v)
219}
220
221func (v *Int8Value) decode(r io.Reader) error {
222 n, err := decodeUint8(r)
223 if err != nil {
224 return err
225 }
226
227 *v = Int8Value(n)
228 return nil
229}
230
231// An Int16Value provides eventstream encoding, and representation of a Go
232// int16 value.
233type Int16Value int16
234
235// Get returns the underlying value.
236func (v Int16Value) Get() interface{} {
237 return int16(v)
238}
239
240// valueType returns the EventStream header value type value.
241func (Int16Value) valueType() valueType {
242 return int16ValueType
243}
244
245func (v Int16Value) String() string {
246 return fmt.Sprintf("0x%04x", int16(v))
247}
248
249// encode encodes the Int16Value into an eventstream binary value
250// representation.
251func (v Int16Value) encode(w io.Writer) error {
252 raw := rawValue{
253 Type: v.valueType(),
254 }
255 return raw.encodeScalar(w, v)
256}
257
258func (v *Int16Value) decode(r io.Reader) error {
259 n, err := decodeUint16(r)
260 if err != nil {
261 return err
262 }
263
264 *v = Int16Value(n)
265 return nil
266}
267
268// An Int32Value provides eventstream encoding, and representation of a Go
269// int32 value.
270type Int32Value int32
271
272// Get returns the underlying value.
273func (v Int32Value) Get() interface{} {
274 return int32(v)
275}
276
277// valueType returns the EventStream header value type value.
278func (Int32Value) valueType() valueType {
279 return int32ValueType
280}
281
282func (v Int32Value) String() string {
283 return fmt.Sprintf("0x%08x", int32(v))
284}
285
286// encode encodes the Int32Value into an eventstream binary value
287// representation.
288func (v Int32Value) encode(w io.Writer) error {
289 raw := rawValue{
290 Type: v.valueType(),
291 }
292 return raw.encodeScalar(w, v)
293}
294
295func (v *Int32Value) decode(r io.Reader) error {
296 n, err := decodeUint32(r)
297 if err != nil {
298 return err
299 }
300
301 *v = Int32Value(n)
302 return nil
303}
304
305// An Int64Value provides eventstream encoding, and representation of a Go
306// int64 value.
307type Int64Value int64
308
309// Get returns the underlying value.
310func (v Int64Value) Get() interface{} {
311 return int64(v)
312}
313
314// valueType returns the EventStream header value type value.
315func (Int64Value) valueType() valueType {
316 return int64ValueType
317}
318
319func (v Int64Value) String() string {
320 return fmt.Sprintf("0x%016x", int64(v))
321}
322
323// encode encodes the Int64Value into an eventstream binary value
324// representation.
325func (v Int64Value) encode(w io.Writer) error {
326 raw := rawValue{
327 Type: v.valueType(),
328 }
329 return raw.encodeScalar(w, v)
330}
331
332func (v *Int64Value) decode(r io.Reader) error {
333 n, err := decodeUint64(r)
334 if err != nil {
335 return err
336 }
337
338 *v = Int64Value(n)
339 return nil
340}
341
342// An BytesValue provides eventstream encoding, and representation of a Go
343// byte slice.
344type BytesValue []byte
345
346// Get returns the underlying value.
347func (v BytesValue) Get() interface{} {
348 return []byte(v)
349}
350
351// valueType returns the EventStream header value type value.
352func (BytesValue) valueType() valueType {
353 return bytesValueType
354}
355
356func (v BytesValue) String() string {
357 return base64.StdEncoding.EncodeToString([]byte(v))
358}
359
360// encode encodes the BytesValue into an eventstream binary value
361// representation.
362func (v BytesValue) encode(w io.Writer) error {
363 raw := rawValue{
364 Type: v.valueType(),
365 }
366
367 return raw.encodeBytes(w, []byte(v))
368}
369
370func (v *BytesValue) decode(r io.Reader) error {
371 buf, err := decodeBytesValue(r)
372 if err != nil {
373 return err
374 }
375
376 *v = BytesValue(buf)
377 return nil
378}
379
380// An StringValue provides eventstream encoding, and representation of a Go
381// string.
382type StringValue string
383
384// Get returns the underlying value.
385func (v StringValue) Get() interface{} {
386 return string(v)
387}
388
389// valueType returns the EventStream header value type value.
390func (StringValue) valueType() valueType {
391 return stringValueType
392}
393
394func (v StringValue) String() string {
395 return string(v)
396}
397
398// encode encodes the StringValue into an eventstream binary value
399// representation.
400func (v StringValue) encode(w io.Writer) error {
401 raw := rawValue{
402 Type: v.valueType(),
403 }
404
405 return raw.encodeString(w, string(v))
406}
407
408func (v *StringValue) decode(r io.Reader) error {
409 s, err := decodeStringValue(r)
410 if err != nil {
411 return err
412 }
413
414 *v = StringValue(s)
415 return nil
416}
417
418// An TimestampValue provides eventstream encoding, and representation of a Go
419// timestamp.
420type TimestampValue time.Time
421
422// Get returns the underlying value.
423func (v TimestampValue) Get() interface{} {
424 return time.Time(v)
425}
426
427// valueType returns the EventStream header value type value.
428func (TimestampValue) valueType() valueType {
429 return timestampValueType
430}
431
432func (v TimestampValue) epochMilli() int64 {
433 nano := time.Time(v).UnixNano()
434 msec := nano / int64(time.Millisecond)
435 return msec
436}
437
438func (v TimestampValue) String() string {
439 msec := v.epochMilli()
440 return strconv.FormatInt(msec, 10)
441}
442
443// encode encodes the TimestampValue into an eventstream binary value
444// representation.
445func (v TimestampValue) encode(w io.Writer) error {
446 raw := rawValue{
447 Type: v.valueType(),
448 }
449
450 msec := v.epochMilli()
451 return raw.encodeScalar(w, msec)
452}
453
454func (v *TimestampValue) decode(r io.Reader) error {
455 n, err := decodeUint64(r)
456 if err != nil {
457 return err
458 }
459
460 *v = TimestampValue(timeFromEpochMilli(int64(n)))
461 return nil
462}
463
464func timeFromEpochMilli(t int64) time.Time {
465 secs := t / 1e3
466 msec := t % 1e3
467 return time.Unix(secs, msec*int64(time.Millisecond)).UTC()
468}
469
470// An UUIDValue provides eventstream encoding, and representation of a UUID
471// value.
472type UUIDValue [16]byte
473
474// Get returns the underlying value.
475func (v UUIDValue) Get() interface{} {
476 return v[:]
477}
478
479// valueType returns the EventStream header value type value.
480func (UUIDValue) valueType() valueType {
481 return uuidValueType
482}
483
484func (v UUIDValue) String() string {
485 return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:])
486}
487
488// encode encodes the UUIDValue into an eventstream binary value
489// representation.
490func (v UUIDValue) encode(w io.Writer) error {
491 raw := rawValue{
492 Type: v.valueType(),
493 }
494
495 return raw.encodeFixedSlice(w, v[:])
496}
497
498func (v *UUIDValue) decode(r io.Reader) error {
499 tv := (*v)[:]
500 return decodeFixedBytesValue(r, tv)
501}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go
new file mode 100644
index 0000000..2dc012a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go
@@ -0,0 +1,103 @@
1package eventstream
2
3import (
4 "bytes"
5 "encoding/binary"
6 "hash/crc32"
7)
8
9const preludeLen = 8
10const preludeCRCLen = 4
11const msgCRCLen = 4
12const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen
13const maxPayloadLen = 1024 * 1024 * 16 // 16MB
14const maxHeadersLen = 1024 * 128 // 128KB
15const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen
16
17var crc32IEEETable = crc32.MakeTable(crc32.IEEE)
18
19// A Message provides the eventstream message representation.
20type Message struct {
21 Headers Headers
22 Payload []byte
23}
24
25func (m *Message) rawMessage() (rawMessage, error) {
26 var raw rawMessage
27
28 if len(m.Headers) > 0 {
29 var headers bytes.Buffer
30 if err := encodeHeaders(&headers, m.Headers); err != nil {
31 return rawMessage{}, err
32 }
33 raw.Headers = headers.Bytes()
34 raw.HeadersLen = uint32(len(raw.Headers))
35 }
36
37 raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen
38
39 hash := crc32.New(crc32IEEETable)
40 binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen)
41 raw.PreludeCRC = hash.Sum32()
42
43 binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC)
44
45 if raw.HeadersLen > 0 {
46 hash.Write(raw.Headers)
47 }
48
49 // Read payload bytes and update hash for it as well.
50 if len(m.Payload) > 0 {
51 raw.Payload = m.Payload
52 hash.Write(raw.Payload)
53 }
54
55 raw.CRC = hash.Sum32()
56
57 return raw, nil
58}
59
60type messagePrelude struct {
61 Length uint32
62 HeadersLen uint32
63 PreludeCRC uint32
64}
65
66func (p messagePrelude) PayloadLen() uint32 {
67 return p.Length - p.HeadersLen - minMsgLen
68}
69
70func (p messagePrelude) ValidateLens() error {
71 if p.Length == 0 || p.Length > maxMsgLen {
72 return LengthError{
73 Part: "message prelude",
74 Want: maxMsgLen,
75 Have: int(p.Length),
76 }
77 }
78 if p.HeadersLen > maxHeadersLen {
79 return LengthError{
80 Part: "message headers",
81 Want: maxHeadersLen,
82 Have: int(p.HeadersLen),
83 }
84 }
85 if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen {
86 return LengthError{
87 Part: "message payload",
88 Want: maxPayloadLen,
89 Have: int(payloadLen),
90 }
91 }
92
93 return nil
94}
95
96type rawMessage struct {
97 messagePrelude
98
99 Headers []byte
100 Payload []byte
101
102 CRC uint32
103}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
new file mode 100644
index 0000000..776d110
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
@@ -0,0 +1,76 @@
1package protocol
2
3import (
4 "encoding/base64"
5 "encoding/json"
6 "fmt"
7 "strconv"
8
9 "github.com/aws/aws-sdk-go/aws"
10)
11
12// EscapeMode is the mode that should be use for escaping a value
13type EscapeMode uint
14
15// The modes for escaping a value before it is marshaled, and unmarshaled.
16const (
17 NoEscape EscapeMode = iota
18 Base64Escape
19 QuotedEscape
20)
21
22// EncodeJSONValue marshals the value into a JSON string, and optionally base64
23// encodes the string before returning it.
24//
25// Will panic if the escape mode is unknown.
26func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) {
27 b, err := json.Marshal(v)
28 if err != nil {
29 return "", err
30 }
31
32 switch escape {
33 case NoEscape:
34 return string(b), nil
35 case Base64Escape:
36 return base64.StdEncoding.EncodeToString(b), nil
37 case QuotedEscape:
38 return strconv.Quote(string(b)), nil
39 }
40
41 panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape))
42}
43
44// DecodeJSONValue will attempt to decode the string input as a JSONValue.
45// Optionally decoding base64 the value first before JSON unmarshaling.
46//
47// Will panic if the escape mode is unknown.
48func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) {
49 var b []byte
50 var err error
51
52 switch escape {
53 case NoEscape:
54 b = []byte(v)
55 case Base64Escape:
56 b, err = base64.StdEncoding.DecodeString(v)
57 case QuotedEscape:
58 var u string
59 u, err = strconv.Unquote(v)
60 b = []byte(u)
61 default:
62 panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape))
63 }
64
65 if err != nil {
66 return nil, err
67 }
68
69 m := aws.JSONValue{}
70 err = json.Unmarshal(b, &m)
71 if err != nil {
72 return nil, err
73 }
74
75 return m, nil
76}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
new file mode 100644
index 0000000..e21614a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
@@ -0,0 +1,81 @@
1package protocol
2
3import (
4 "io"
5 "io/ioutil"
6 "net/http"
7
8 "github.com/aws/aws-sdk-go/aws"
9 "github.com/aws/aws-sdk-go/aws/client/metadata"
10 "github.com/aws/aws-sdk-go/aws/request"
11)
12
13// PayloadUnmarshaler provides the interface for unmarshaling a payload's
14// reader into a SDK shape.
15type PayloadUnmarshaler interface {
16 UnmarshalPayload(io.Reader, interface{}) error
17}
18
19// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a
20// HandlerList. This provides the support for unmarshaling a payload reader to
21// a shape without needing a SDK request first.
22type HandlerPayloadUnmarshal struct {
23 Unmarshalers request.HandlerList
24}
25
26// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using
27// the Unmarshalers HandlerList provided. Returns an error if unable
28// unmarshaling fails.
29func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error {
30 req := &request.Request{
31 HTTPRequest: &http.Request{},
32 HTTPResponse: &http.Response{
33 StatusCode: 200,
34 Header: http.Header{},
35 Body: ioutil.NopCloser(r),
36 },
37 Data: v,
38 }
39
40 h.Unmarshalers.Run(req)
41
42 return req.Error
43}
44
45// PayloadMarshaler provides the interface for marshaling a SDK shape into and
46// io.Writer.
47type PayloadMarshaler interface {
48 MarshalPayload(io.Writer, interface{}) error
49}
50
51// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList.
52// This provides support for marshaling a SDK shape into an io.Writer without
53// needing a SDK request first.
54type HandlerPayloadMarshal struct {
55 Marshalers request.HandlerList
56}
57
58// MarshalPayload marshals the SDK shape into the io.Writer using the
59// Marshalers HandlerList provided. Returns an error if unable if marshal
60// fails.
61func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error {
62 req := request.New(
63 aws.Config{},
64 metadata.ClientInfo{},
65 request.Handlers{},
66 nil,
67 &request.Operation{HTTPMethod: "GET"},
68 v,
69 nil,
70 )
71
72 h.Marshalers.Run(req)
73
74 if req.Error != nil {
75 return req.Error
76 }
77
78 io.Copy(w, req.GetBody())
79
80 return nil
81}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
index 18169f0..60e5b09 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -25,7 +25,7 @@ func Build(r *request.Request) {
25 return 25 return
26 } 26 }
27 27
28 if r.ExpireTime == 0 { 28 if !r.IsPresigned() {
29 r.HTTPRequest.Method = "POST" 29 r.HTTPRequest.Method = "POST"
30 r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") 30 r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
31 r.SetBufferBody([]byte(body.Encode())) 31 r.SetBufferBody([]byte(body.Encode()))
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
index 524ca95..75866d0 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -121,6 +121,10 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string
121 return nil 121 return nil
122 } 122 }
123 123
124 if _, ok := value.Interface().([]byte); ok {
125 return q.parseScalar(v, value, prefix, tag)
126 }
127
124 // check for unflattened list member 128 // check for unflattened list member
125 if !q.isEC2 && tag.Get("flattened") == "" { 129 if !q.isEC2 && tag.Get("flattened") == "" {
126 if listName := tag.Get("locationNameList"); listName == "" { 130 if listName := tag.Get("locationNameList"); listName == "" {
@@ -229,7 +233,12 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta
229 v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) 233 v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
230 case time.Time: 234 case time.Time:
231 const ISO8601UTC = "2006-01-02T15:04:05Z" 235 const ISO8601UTC = "2006-01-02T15:04:05Z"
232 v.Set(name, value.UTC().Format(ISO8601UTC)) 236 format := tag.Get("timestampFormat")
237 if len(format) == 0 {
238 format = protocol.ISO8601TimeFormatName
239 }
240
241 v.Set(name, protocol.FormatTime(format, value))
233 default: 242 default:
234 return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) 243 return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
235 } 244 }
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
index 7161835..b34f525 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -4,7 +4,6 @@ package rest
4import ( 4import (
5 "bytes" 5 "bytes"
6 "encoding/base64" 6 "encoding/base64"
7 "encoding/json"
8 "fmt" 7 "fmt"
9 "io" 8 "io"
10 "net/http" 9 "net/http"
@@ -18,11 +17,9 @@ import (
18 "github.com/aws/aws-sdk-go/aws" 17 "github.com/aws/aws-sdk-go/aws"
19 "github.com/aws/aws-sdk-go/aws/awserr" 18 "github.com/aws/aws-sdk-go/aws/awserr"
20 "github.com/aws/aws-sdk-go/aws/request" 19 "github.com/aws/aws-sdk-go/aws/request"
20 "github.com/aws/aws-sdk-go/private/protocol"
21) 21)
22 22
23// RFC822 returns an RFC822 formatted timestamp for AWS protocols
24const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
25
26// Whether the byte value can be sent without escaping in AWS URLs 23// Whether the byte value can be sent without escaping in AWS URLs
27var noEscape [256]bool 24var noEscape [256]bool
28 25
@@ -252,13 +249,12 @@ func EscapePath(path string, encodeSep bool) string {
252 return buf.String() 249 return buf.String()
253} 250}
254 251
255func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { 252func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) {
256 v = reflect.Indirect(v) 253 v = reflect.Indirect(v)
257 if !v.IsValid() { 254 if !v.IsValid() {
258 return "", errValueNotSet 255 return "", errValueNotSet
259 } 256 }
260 257
261 var str string
262 switch value := v.Interface().(type) { 258 switch value := v.Interface().(type) {
263 case string: 259 case string:
264 str = value 260 str = value
@@ -271,19 +267,28 @@ func convertType(v reflect.Value, tag reflect.StructTag) (string, error) {
271 case float64: 267 case float64:
272 str = strconv.FormatFloat(value, 'f', -1, 64) 268 str = strconv.FormatFloat(value, 'f', -1, 64)
273 case time.Time: 269 case time.Time:
274 str = value.UTC().Format(RFC822) 270 format := tag.Get("timestampFormat")
271 if len(format) == 0 {
272 format = protocol.RFC822TimeFormatName
273 if tag.Get("location") == "querystring" {
274 format = protocol.ISO8601TimeFormatName
275 }
276 }
277 str = protocol.FormatTime(format, value)
275 case aws.JSONValue: 278 case aws.JSONValue:
276 b, err := json.Marshal(value) 279 if len(value) == 0 {
277 if err != nil { 280 return "", errValueNotSet
278 return "", err
279 } 281 }
282 escaping := protocol.NoEscape
280 if tag.Get("location") == "header" { 283 if tag.Get("location") == "header" {
281 str = base64.StdEncoding.EncodeToString(b) 284 escaping = protocol.Base64Escape
282 } else { 285 }
283 str = string(b) 286 str, err = protocol.EncodeJSONValue(value, escaping)
287 if err != nil {
288 return "", fmt.Errorf("unable to encode JSONValue, %v", err)
284 } 289 }
285 default: 290 default:
286 err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) 291 err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
287 return "", err 292 return "", err
288 } 293 }
289 return str, nil 294 return str, nil
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
index 7a779ee..33fd53b 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -3,7 +3,6 @@ package rest
3import ( 3import (
4 "bytes" 4 "bytes"
5 "encoding/base64" 5 "encoding/base64"
6 "encoding/json"
7 "fmt" 6 "fmt"
8 "io" 7 "io"
9 "io/ioutil" 8 "io/ioutil"
@@ -16,6 +15,7 @@ import (
16 "github.com/aws/aws-sdk-go/aws" 15 "github.com/aws/aws-sdk-go/aws"
17 "github.com/aws/aws-sdk-go/aws/awserr" 16 "github.com/aws/aws-sdk-go/aws/awserr"
18 "github.com/aws/aws-sdk-go/aws/request" 17 "github.com/aws/aws-sdk-go/aws/request"
18 "github.com/aws/aws-sdk-go/private/protocol"
19) 19)
20 20
21// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests 21// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
@@ -198,23 +198,21 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
198 } 198 }
199 v.Set(reflect.ValueOf(&f)) 199 v.Set(reflect.ValueOf(&f))
200 case *time.Time: 200 case *time.Time:
201 t, err := time.Parse(RFC822, header) 201 format := tag.Get("timestampFormat")
202 if len(format) == 0 {
203 format = protocol.RFC822TimeFormatName
204 }
205 t, err := protocol.ParseTime(format, header)
202 if err != nil { 206 if err != nil {
203 return err 207 return err
204 } 208 }
205 v.Set(reflect.ValueOf(&t)) 209 v.Set(reflect.ValueOf(&t))
206 case aws.JSONValue: 210 case aws.JSONValue:
207 b := []byte(header) 211 escaping := protocol.NoEscape
208 var err error
209 if tag.Get("location") == "header" { 212 if tag.Get("location") == "header" {
210 b, err = base64.StdEncoding.DecodeString(header) 213 escaping = protocol.Base64Escape
211 if err != nil {
212 return err
213 }
214 } 214 }
215 215 m, err := protocol.DecodeJSONValue(header, escaping)
216 m := aws.JSONValue{}
217 err = json.Unmarshal(b, &m)
218 if err != nil { 216 if err != nil {
219 return err 217 return err
220 } 218 }
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
new file mode 100644
index 0000000..b7ed6c6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
@@ -0,0 +1,72 @@
1package protocol
2
3import (
4 "strconv"
5 "time"
6)
7
8// Names of time formats supported by the SDK
9const (
10 RFC822TimeFormatName = "rfc822"
11 ISO8601TimeFormatName = "iso8601"
12 UnixTimeFormatName = "unixTimestamp"
13)
14
15// Time formats supported by the SDK
16const (
17 // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
18 RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
19
20 // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
21 ISO8601TimeFormat = "2006-01-02T15:04:05Z"
22)
23
24// IsKnownTimestampFormat returns if the timestamp format name
25// is know to the SDK's protocols.
26func IsKnownTimestampFormat(name string) bool {
27 switch name {
28 case RFC822TimeFormatName:
29 fallthrough
30 case ISO8601TimeFormatName:
31 fallthrough
32 case UnixTimeFormatName:
33 return true
34 default:
35 return false
36 }
37}
38
39// FormatTime returns a string value of the time.
40func FormatTime(name string, t time.Time) string {
41 t = t.UTC()
42
43 switch name {
44 case RFC822TimeFormatName:
45 return t.Format(RFC822TimeFormat)
46 case ISO8601TimeFormatName:
47 return t.Format(ISO8601TimeFormat)
48 case UnixTimeFormatName:
49 return strconv.FormatInt(t.Unix(), 10)
50 default:
51 panic("unknown timestamp format name, " + name)
52 }
53}
54
55// ParseTime attempts to parse the time given the format. Returns
56// the time if it was able to be parsed, and fails otherwise.
57func ParseTime(formatName, value string) (time.Time, error) {
58 switch formatName {
59 case RFC822TimeFormatName:
60 return time.Parse(RFC822TimeFormat, value)
61 case ISO8601TimeFormatName:
62 return time.Parse(ISO8601TimeFormat, value)
63 case UnixTimeFormatName:
64 v, err := strconv.ParseFloat(value, 64)
65 if err != nil {
66 return time.Time{}, err
67 }
68 return time.Unix(int64(v), 0), nil
69 default:
70 panic("unknown timestamp format name, " + formatName)
71 }
72}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
index 7091b45..07764c8 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -13,9 +13,13 @@ import (
13 "github.com/aws/aws-sdk-go/private/protocol" 13 "github.com/aws/aws-sdk-go/private/protocol"
14) 14)
15 15
16// BuildXML will serialize params into an xml.Encoder. 16// BuildXML will serialize params into an xml.Encoder. Error will be returned
17// Error will be returned if the serialization of any of the params or nested values fails. 17// if the serialization of any of the params or nested values fails.
18func BuildXML(params interface{}, e *xml.Encoder) error { 18func BuildXML(params interface{}, e *xml.Encoder) error {
19 return buildXML(params, e, false)
20}
21
22func buildXML(params interface{}, e *xml.Encoder, sorted bool) error {
19 b := xmlBuilder{encoder: e, namespaces: map[string]string{}} 23 b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
20 root := NewXMLElement(xml.Name{}) 24 root := NewXMLElement(xml.Name{})
21 if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { 25 if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
@@ -23,7 +27,7 @@ func BuildXML(params interface{}, e *xml.Encoder) error {
23 } 27 }
24 for _, c := range root.Children { 28 for _, c := range root.Children {
25 for _, v := range c { 29 for _, v := range c {
26 return StructToXML(e, v, false) 30 return StructToXML(e, v, sorted)
27 } 31 }
28 } 32 }
29 return nil 33 return nil
@@ -278,8 +282,12 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl
278 case float32: 282 case float32:
279 str = strconv.FormatFloat(float64(converted), 'f', -1, 32) 283 str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
280 case time.Time: 284 case time.Time:
281 const ISO8601UTC = "2006-01-02T15:04:05Z" 285 format := tag.Get("timestampFormat")
282 str = converted.UTC().Format(ISO8601UTC) 286 if len(format) == 0 {
287 format = protocol.ISO8601TimeFormatName
288 }
289
290 str = protocol.FormatTime(format, converted)
283 default: 291 default:
284 return fmt.Errorf("unsupported value for param %s: %v (%s)", 292 return fmt.Errorf("unsupported value for param %s: %v (%s)",
285 tag.Get("locationName"), value.Interface(), value.Type().Name()) 293 tag.Get("locationName"), value.Interface(), value.Type().Name())
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
index 8758462..ff1ef68 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -9,6 +9,8 @@ import (
9 "strconv" 9 "strconv"
10 "strings" 10 "strings"
11 "time" 11 "time"
12
13 "github.com/aws/aws-sdk-go/private/protocol"
12) 14)
13 15
14// UnmarshalXML deserializes an xml.Decoder into the container v. V 16// UnmarshalXML deserializes an xml.Decoder into the container v. V
@@ -52,9 +54,15 @@ func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
52 if t == "" { 54 if t == "" {
53 switch rtype.Kind() { 55 switch rtype.Kind() {
54 case reflect.Struct: 56 case reflect.Struct:
55 t = "structure" 57 // also it can't be a time object
58 if _, ok := r.Interface().(*time.Time); !ok {
59 t = "structure"
60 }
56 case reflect.Slice: 61 case reflect.Slice:
57 t = "list" 62 // also it can't be a byte slice
63 if _, ok := r.Interface().([]byte); !ok {
64 t = "list"
65 }
58 case reflect.Map: 66 case reflect.Map:
59 t = "map" 67 t = "map"
60 } 68 }
@@ -247,8 +255,12 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
247 } 255 }
248 r.Set(reflect.ValueOf(&v)) 256 r.Set(reflect.ValueOf(&v))
249 case *time.Time: 257 case *time.Time:
250 const ISO8601UTC = "2006-01-02T15:04:05Z" 258 format := tag.Get("timestampFormat")
251 t, err := time.Parse(ISO8601UTC, node.Text) 259 if len(format) == 0 {
260 format = protocol.ISO8601TimeFormatName
261 }
262
263 t, err := protocol.ParseTime(format, node.Text)
252 if err != nil { 264 if err != nil {
253 return err 265 return err
254 } 266 }
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
index 3e970b6..515ce15 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -29,6 +29,7 @@ func NewXMLElement(name xml.Name) *XMLNode {
29 29
30// AddChild adds child to the XMLNode. 30// AddChild adds child to the XMLNode.
31func (n *XMLNode) AddChild(child *XMLNode) { 31func (n *XMLNode) AddChild(child *XMLNode) {
32 child.parent = n
32 if _, ok := n.Children[child.Name.Local]; !ok { 33 if _, ok := n.Children[child.Name.Local]; !ok {
33 n.Children[child.Name.Local] = []*XMLNode{} 34 n.Children[child.Name.Local] = []*XMLNode{}
34 } 35 }
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
index 52ac02c..0e999ca 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -3,14 +3,22 @@
3package s3 3package s3
4 4
5import ( 5import (
6 "bytes"
6 "fmt" 7 "fmt"
7 "io" 8 "io"
9 "sync"
10 "sync/atomic"
8 "time" 11 "time"
9 12
10 "github.com/aws/aws-sdk-go/aws" 13 "github.com/aws/aws-sdk-go/aws"
14 "github.com/aws/aws-sdk-go/aws/awserr"
11 "github.com/aws/aws-sdk-go/aws/awsutil" 15 "github.com/aws/aws-sdk-go/aws/awsutil"
16 "github.com/aws/aws-sdk-go/aws/client"
12 "github.com/aws/aws-sdk-go/aws/request" 17 "github.com/aws/aws-sdk-go/aws/request"
13 "github.com/aws/aws-sdk-go/private/protocol" 18 "github.com/aws/aws-sdk-go/private/protocol"
19 "github.com/aws/aws-sdk-go/private/protocol/eventstream"
20 "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi"
21 "github.com/aws/aws-sdk-go/private/protocol/rest"
14 "github.com/aws/aws-sdk-go/private/protocol/restxml" 22 "github.com/aws/aws-sdk-go/private/protocol/restxml"
15) 23)
16 24
@@ -18,19 +26,18 @@ const opAbortMultipartUpload = "AbortMultipartUpload"
18 26
19// AbortMultipartUploadRequest generates a "aws/request.Request" representing the 27// AbortMultipartUploadRequest generates a "aws/request.Request" representing the
20// client's request for the AbortMultipartUpload operation. The "output" return 28// client's request for the AbortMultipartUpload operation. The "output" return
21// value can be used to capture response data after the request's "Send" method 29// value will be populated with the request's response once the request completes
22// is called. 30// successfuly.
23// 31//
24// See AbortMultipartUpload for usage and error information. 32// Use "Send" method on the returned Request to send the API call to the service.
33// the "output" return value is not valid until after Send returns without error.
25// 34//
26// Creating a request object using this method should be used when you want to inject 35// See AbortMultipartUpload for more information on using the AbortMultipartUpload
27// custom logic into the request's lifecycle using a custom handler, or if you want to 36// API call, and error handling.
28// access properties on the request object before or after sending the request. If 37//
29// you just want the service response, call the AbortMultipartUpload method directly 38// This method is useful when you want to inject custom logic or configuration
30// instead. 39// into the SDK's request lifecycle. Such as custom headers, or retry logic.
31// 40//
32// Note: You must call the "Send" method on the returned request object in order
33// to execute the request.
34// 41//
35// // Example sending a request using the AbortMultipartUploadRequest method. 42// // Example sending a request using the AbortMultipartUploadRequest method.
36// req, resp := client.AbortMultipartUploadRequest(params) 43// req, resp := client.AbortMultipartUploadRequest(params)
@@ -40,7 +47,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload"
40// fmt.Println(resp) 47// fmt.Println(resp)
41// } 48// }
42// 49//
43// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload 50// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
44func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { 51func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) {
45 op := &request.Operation{ 52 op := &request.Operation{
46 Name: opAbortMultipartUpload, 53 Name: opAbortMultipartUpload,
@@ -76,7 +83,7 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req
76// * ErrCodeNoSuchUpload "NoSuchUpload" 83// * ErrCodeNoSuchUpload "NoSuchUpload"
77// The specified multipart upload does not exist. 84// The specified multipart upload does not exist.
78// 85//
79// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload 86// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
80func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { 87func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) {
81 req, out := c.AbortMultipartUploadRequest(input) 88 req, out := c.AbortMultipartUploadRequest(input)
82 return out, req.Send() 89 return out, req.Send()
@@ -102,19 +109,18 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload"
102 109
103// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the 110// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the
104// client's request for the CompleteMultipartUpload operation. The "output" return 111// client's request for the CompleteMultipartUpload operation. The "output" return
105// value can be used to capture response data after the request's "Send" method 112// value will be populated with the request's response once the request completes
106// is called. 113// successfuly.
114//
115// Use "Send" method on the returned Request to send the API call to the service.
116// the "output" return value is not valid until after Send returns without error.
107// 117//
108// See CompleteMultipartUpload for usage and error information. 118// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload
119// API call, and error handling.
109// 120//
110// Creating a request object using this method should be used when you want to inject 121// This method is useful when you want to inject custom logic or configuration
111// custom logic into the request's lifecycle using a custom handler, or if you want to 122// into the SDK's request lifecycle. Such as custom headers, or retry logic.
112// access properties on the request object before or after sending the request. If
113// you just want the service response, call the CompleteMultipartUpload method directly
114// instead.
115// 123//
116// Note: You must call the "Send" method on the returned request object in order
117// to execute the request.
118// 124//
119// // Example sending a request using the CompleteMultipartUploadRequest method. 125// // Example sending a request using the CompleteMultipartUploadRequest method.
120// req, resp := client.CompleteMultipartUploadRequest(params) 126// req, resp := client.CompleteMultipartUploadRequest(params)
@@ -124,7 +130,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload"
124// fmt.Println(resp) 130// fmt.Println(resp)
125// } 131// }
126// 132//
127// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload 133// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
128func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { 134func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) {
129 op := &request.Operation{ 135 op := &request.Operation{
130 Name: opCompleteMultipartUpload, 136 Name: opCompleteMultipartUpload,
@@ -151,7 +157,7 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput)
151// 157//
152// See the AWS API reference guide for Amazon Simple Storage Service's 158// See the AWS API reference guide for Amazon Simple Storage Service's
153// API operation CompleteMultipartUpload for usage and error information. 159// API operation CompleteMultipartUpload for usage and error information.
154// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload 160// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
155func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { 161func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) {
156 req, out := c.CompleteMultipartUploadRequest(input) 162 req, out := c.CompleteMultipartUploadRequest(input)
157 return out, req.Send() 163 return out, req.Send()
@@ -177,19 +183,18 @@ const opCopyObject = "CopyObject"
177 183
178// CopyObjectRequest generates a "aws/request.Request" representing the 184// CopyObjectRequest generates a "aws/request.Request" representing the
179// client's request for the CopyObject operation. The "output" return 185// client's request for the CopyObject operation. The "output" return
180// value can be used to capture response data after the request's "Send" method 186// value will be populated with the request's response once the request completes
181// is called. 187// successfuly.
182// 188//
183// See CopyObject for usage and error information. 189// Use "Send" method on the returned Request to send the API call to the service.
190// the "output" return value is not valid until after Send returns without error.
184// 191//
185// Creating a request object using this method should be used when you want to inject 192// See CopyObject for more information on using the CopyObject
186// custom logic into the request's lifecycle using a custom handler, or if you want to 193// API call, and error handling.
187// access properties on the request object before or after sending the request. If 194//
188// you just want the service response, call the CopyObject method directly 195// This method is useful when you want to inject custom logic or configuration
189// instead. 196// into the SDK's request lifecycle. Such as custom headers, or retry logic.
190// 197//
191// Note: You must call the "Send" method on the returned request object in order
192// to execute the request.
193// 198//
194// // Example sending a request using the CopyObjectRequest method. 199// // Example sending a request using the CopyObjectRequest method.
195// req, resp := client.CopyObjectRequest(params) 200// req, resp := client.CopyObjectRequest(params)
@@ -199,7 +204,7 @@ const opCopyObject = "CopyObject"
199// fmt.Println(resp) 204// fmt.Println(resp)
200// } 205// }
201// 206//
202// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject 207// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
203func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { 208func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) {
204 op := &request.Operation{ 209 op := &request.Operation{
205 Name: opCopyObject, 210 Name: opCopyObject,
@@ -232,7 +237,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou
232// The source object of the COPY operation is not in the active tier and is 237// The source object of the COPY operation is not in the active tier and is
233// only stored in Amazon Glacier. 238// only stored in Amazon Glacier.
234// 239//
235// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject 240// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
236func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { 241func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
237 req, out := c.CopyObjectRequest(input) 242 req, out := c.CopyObjectRequest(input)
238 return out, req.Send() 243 return out, req.Send()
@@ -258,19 +263,18 @@ const opCreateBucket = "CreateBucket"
258 263
259// CreateBucketRequest generates a "aws/request.Request" representing the 264// CreateBucketRequest generates a "aws/request.Request" representing the
260// client's request for the CreateBucket operation. The "output" return 265// client's request for the CreateBucket operation. The "output" return
261// value can be used to capture response data after the request's "Send" method 266// value will be populated with the request's response once the request completes
262// is called. 267// successfuly.
268//
269// Use "Send" method on the returned Request to send the API call to the service.
270// the "output" return value is not valid until after Send returns without error.
263// 271//
264// See CreateBucket for usage and error information. 272// See CreateBucket for more information on using the CreateBucket
273// API call, and error handling.
265// 274//
266// Creating a request object using this method should be used when you want to inject 275// This method is useful when you want to inject custom logic or configuration
267// custom logic into the request's lifecycle using a custom handler, or if you want to 276// into the SDK's request lifecycle. Such as custom headers, or retry logic.
268// access properties on the request object before or after sending the request. If
269// you just want the service response, call the CreateBucket method directly
270// instead.
271// 277//
272// Note: You must call the "Send" method on the returned request object in order
273// to execute the request.
274// 278//
275// // Example sending a request using the CreateBucketRequest method. 279// // Example sending a request using the CreateBucketRequest method.
276// req, resp := client.CreateBucketRequest(params) 280// req, resp := client.CreateBucketRequest(params)
@@ -280,7 +284,7 @@ const opCreateBucket = "CreateBucket"
280// fmt.Println(resp) 284// fmt.Println(resp)
281// } 285// }
282// 286//
283// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket 287// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
284func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { 288func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) {
285 op := &request.Operation{ 289 op := &request.Operation{
286 Name: opCreateBucket, 290 Name: opCreateBucket,
@@ -315,7 +319,7 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request
315// 319//
316// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" 320// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou"
317// 321//
318// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket 322// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
319func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { 323func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
320 req, out := c.CreateBucketRequest(input) 324 req, out := c.CreateBucketRequest(input)
321 return out, req.Send() 325 return out, req.Send()
@@ -341,19 +345,18 @@ const opCreateMultipartUpload = "CreateMultipartUpload"
341 345
342// CreateMultipartUploadRequest generates a "aws/request.Request" representing the 346// CreateMultipartUploadRequest generates a "aws/request.Request" representing the
343// client's request for the CreateMultipartUpload operation. The "output" return 347// client's request for the CreateMultipartUpload operation. The "output" return
344// value can be used to capture response data after the request's "Send" method 348// value will be populated with the request's response once the request completes
345// is called. 349// successfuly.
346// 350//
347// See CreateMultipartUpload for usage and error information. 351// Use "Send" method on the returned Request to send the API call to the service.
352// the "output" return value is not valid until after Send returns without error.
348// 353//
349// Creating a request object using this method should be used when you want to inject 354// See CreateMultipartUpload for more information on using the CreateMultipartUpload
350// custom logic into the request's lifecycle using a custom handler, or if you want to 355// API call, and error handling.
351// access properties on the request object before or after sending the request. If 356//
352// you just want the service response, call the CreateMultipartUpload method directly 357// This method is useful when you want to inject custom logic or configuration
353// instead. 358// into the SDK's request lifecycle. Such as custom headers, or retry logic.
354// 359//
355// Note: You must call the "Send" method on the returned request object in order
356// to execute the request.
357// 360//
358// // Example sending a request using the CreateMultipartUploadRequest method. 361// // Example sending a request using the CreateMultipartUploadRequest method.
359// req, resp := client.CreateMultipartUploadRequest(params) 362// req, resp := client.CreateMultipartUploadRequest(params)
@@ -363,7 +366,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload"
363// fmt.Println(resp) 366// fmt.Println(resp)
364// } 367// }
365// 368//
366// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload 369// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
367func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { 370func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) {
368 op := &request.Operation{ 371 op := &request.Operation{
369 Name: opCreateMultipartUpload, 372 Name: opCreateMultipartUpload,
@@ -396,7 +399,7 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re
396// 399//
397// See the AWS API reference guide for Amazon Simple Storage Service's 400// See the AWS API reference guide for Amazon Simple Storage Service's
398// API operation CreateMultipartUpload for usage and error information. 401// API operation CreateMultipartUpload for usage and error information.
399// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload 402// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
400func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { 403func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) {
401 req, out := c.CreateMultipartUploadRequest(input) 404 req, out := c.CreateMultipartUploadRequest(input)
402 return out, req.Send() 405 return out, req.Send()
@@ -422,19 +425,18 @@ const opDeleteBucket = "DeleteBucket"
422 425
423// DeleteBucketRequest generates a "aws/request.Request" representing the 426// DeleteBucketRequest generates a "aws/request.Request" representing the
424// client's request for the DeleteBucket operation. The "output" return 427// client's request for the DeleteBucket operation. The "output" return
425// value can be used to capture response data after the request's "Send" method 428// value will be populated with the request's response once the request completes
426// is called. 429// successfuly.
430//
431// Use "Send" method on the returned Request to send the API call to the service.
432// the "output" return value is not valid until after Send returns without error.
427// 433//
428// See DeleteBucket for usage and error information. 434// See DeleteBucket for more information on using the DeleteBucket
435// API call, and error handling.
429// 436//
430// Creating a request object using this method should be used when you want to inject 437// This method is useful when you want to inject custom logic or configuration
431// custom logic into the request's lifecycle using a custom handler, or if you want to 438// into the SDK's request lifecycle. Such as custom headers, or retry logic.
432// access properties on the request object before or after sending the request. If
433// you just want the service response, call the DeleteBucket method directly
434// instead.
435// 439//
436// Note: You must call the "Send" method on the returned request object in order
437// to execute the request.
438// 440//
439// // Example sending a request using the DeleteBucketRequest method. 441// // Example sending a request using the DeleteBucketRequest method.
440// req, resp := client.DeleteBucketRequest(params) 442// req, resp := client.DeleteBucketRequest(params)
@@ -444,7 +446,7 @@ const opDeleteBucket = "DeleteBucket"
444// fmt.Println(resp) 446// fmt.Println(resp)
445// } 447// }
446// 448//
447// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket 449// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
448func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { 450func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) {
449 op := &request.Operation{ 451 op := &request.Operation{
450 Name: opDeleteBucket, 452 Name: opDeleteBucket,
@@ -474,7 +476,7 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request
474// 476//
475// See the AWS API reference guide for Amazon Simple Storage Service's 477// See the AWS API reference guide for Amazon Simple Storage Service's
476// API operation DeleteBucket for usage and error information. 478// API operation DeleteBucket for usage and error information.
477// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket 479// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
478func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { 480func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
479 req, out := c.DeleteBucketRequest(input) 481 req, out := c.DeleteBucketRequest(input)
480 return out, req.Send() 482 return out, req.Send()
@@ -500,19 +502,18 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration
500 502
501// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the 503// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
502// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return 504// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return
503// value can be used to capture response data after the request's "Send" method 505// value will be populated with the request's response once the request completes
504// is called. 506// successfuly.
505// 507//
506// See DeleteBucketAnalyticsConfiguration for usage and error information. 508// Use "Send" method on the returned Request to send the API call to the service.
509// the "output" return value is not valid until after Send returns without error.
507// 510//
508// Creating a request object using this method should be used when you want to inject 511// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration
509// custom logic into the request's lifecycle using a custom handler, or if you want to 512// API call, and error handling.
510// access properties on the request object before or after sending the request. If 513//
511// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly 514// This method is useful when you want to inject custom logic or configuration
512// instead. 515// into the SDK's request lifecycle. Such as custom headers, or retry logic.
513// 516//
514// Note: You must call the "Send" method on the returned request object in order
515// to execute the request.
516// 517//
517// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. 518// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method.
518// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) 519// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params)
@@ -522,7 +523,7 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration
522// fmt.Println(resp) 523// fmt.Println(resp)
523// } 524// }
524// 525//
525// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration 526// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
526func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { 527func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) {
527 op := &request.Operation{ 528 op := &request.Operation{
528 Name: opDeleteBucketAnalyticsConfiguration, 529 Name: opDeleteBucketAnalyticsConfiguration,
@@ -552,7 +553,7 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt
552// 553//
553// See the AWS API reference guide for Amazon Simple Storage Service's 554// See the AWS API reference guide for Amazon Simple Storage Service's
554// API operation DeleteBucketAnalyticsConfiguration for usage and error information. 555// API operation DeleteBucketAnalyticsConfiguration for usage and error information.
555// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration 556// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
556func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { 557func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) {
557 req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) 558 req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
558 return out, req.Send() 559 return out, req.Send()
@@ -578,19 +579,18 @@ const opDeleteBucketCors = "DeleteBucketCors"
578 579
579// DeleteBucketCorsRequest generates a "aws/request.Request" representing the 580// DeleteBucketCorsRequest generates a "aws/request.Request" representing the
580// client's request for the DeleteBucketCors operation. The "output" return 581// client's request for the DeleteBucketCors operation. The "output" return
581// value can be used to capture response data after the request's "Send" method 582// value will be populated with the request's response once the request completes
582// is called. 583// successfuly.
584//
585// Use "Send" method on the returned Request to send the API call to the service.
586// the "output" return value is not valid until after Send returns without error.
583// 587//
584// See DeleteBucketCors for usage and error information. 588// See DeleteBucketCors for more information on using the DeleteBucketCors
589// API call, and error handling.
585// 590//
586// Creating a request object using this method should be used when you want to inject 591// This method is useful when you want to inject custom logic or configuration
587// custom logic into the request's lifecycle using a custom handler, or if you want to 592// into the SDK's request lifecycle. Such as custom headers, or retry logic.
588// access properties on the request object before or after sending the request. If
589// you just want the service response, call the DeleteBucketCors method directly
590// instead.
591// 593//
592// Note: You must call the "Send" method on the returned request object in order
593// to execute the request.
594// 594//
595// // Example sending a request using the DeleteBucketCorsRequest method. 595// // Example sending a request using the DeleteBucketCorsRequest method.
596// req, resp := client.DeleteBucketCorsRequest(params) 596// req, resp := client.DeleteBucketCorsRequest(params)
@@ -600,7 +600,7 @@ const opDeleteBucketCors = "DeleteBucketCors"
600// fmt.Println(resp) 600// fmt.Println(resp)
601// } 601// }
602// 602//
603// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors 603// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
604func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { 604func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) {
605 op := &request.Operation{ 605 op := &request.Operation{
606 Name: opDeleteBucketCors, 606 Name: opDeleteBucketCors,
@@ -629,7 +629,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request
629// 629//
630// See the AWS API reference guide for Amazon Simple Storage Service's 630// See the AWS API reference guide for Amazon Simple Storage Service's
631// API operation DeleteBucketCors for usage and error information. 631// API operation DeleteBucketCors for usage and error information.
632// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors 632// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
633func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { 633func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) {
634 req, out := c.DeleteBucketCorsRequest(input) 634 req, out := c.DeleteBucketCorsRequest(input)
635 return out, req.Send() 635 return out, req.Send()
@@ -651,23 +651,98 @@ func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCor
651 return out, req.Send() 651 return out, req.Send()
652} 652}
653 653
654const opDeleteBucketEncryption = "DeleteBucketEncryption"
655
656// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the
657// client's request for the DeleteBucketEncryption operation. The "output" return
658// value will be populated with the request's response once the request completes
659// successfuly.
660//
661// Use "Send" method on the returned Request to send the API call to the service.
662// the "output" return value is not valid until after Send returns without error.
663//
664// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption
665// API call, and error handling.
666//
667// This method is useful when you want to inject custom logic or configuration
668// into the SDK's request lifecycle. Such as custom headers, or retry logic.
669//
670//
671// // Example sending a request using the DeleteBucketEncryptionRequest method.
672// req, resp := client.DeleteBucketEncryptionRequest(params)
673//
674// err := req.Send()
675// if err == nil { // resp is now filled
676// fmt.Println(resp)
677// }
678//
679// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption
680func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) {
681 op := &request.Operation{
682 Name: opDeleteBucketEncryption,
683 HTTPMethod: "DELETE",
684 HTTPPath: "/{Bucket}?encryption",
685 }
686
687 if input == nil {
688 input = &DeleteBucketEncryptionInput{}
689 }
690
691 output = &DeleteBucketEncryptionOutput{}
692 req = c.newRequest(op, input, output)
693 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
694 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
695 return
696}
697
698// DeleteBucketEncryption API operation for Amazon Simple Storage Service.
699//
700// Deletes the server-side encryption configuration from the bucket.
701//
702// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
703// with awserr.Error's Code and Message methods to get detailed information about
704// the error.
705//
706// See the AWS API reference guide for Amazon Simple Storage Service's
707// API operation DeleteBucketEncryption for usage and error information.
708// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption
709func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) {
710 req, out := c.DeleteBucketEncryptionRequest(input)
711 return out, req.Send()
712}
713
714// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of
715// the ability to pass a context and additional request options.
716//
717// See DeleteBucketEncryption for details on how to use this API operation.
718//
719// The context must be non-nil and will be used for request cancellation. If
720// the context is nil a panic will occur. In the future the SDK may create
721// sub-contexts for http.Requests. See https://golang.org/pkg/context/
722// for more information on using Contexts.
723func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) {
724 req, out := c.DeleteBucketEncryptionRequest(input)
725 req.SetContext(ctx)
726 req.ApplyOptions(opts...)
727 return out, req.Send()
728}
729
654const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" 730const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration"
655 731
656// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the 732// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
657// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return 733// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return
658// value can be used to capture response data after the request's "Send" method 734// value will be populated with the request's response once the request completes
659// is called. 735// successfuly.
736//
737// Use "Send" method on the returned Request to send the API call to the service.
738// the "output" return value is not valid until after Send returns without error.
660// 739//
661// See DeleteBucketInventoryConfiguration for usage and error information. 740// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration
741// API call, and error handling.
662// 742//
663// Creating a request object using this method should be used when you want to inject 743// This method is useful when you want to inject custom logic or configuration
664// custom logic into the request's lifecycle using a custom handler, or if you want to 744// into the SDK's request lifecycle. Such as custom headers, or retry logic.
665// access properties on the request object before or after sending the request. If
666// you just want the service response, call the DeleteBucketInventoryConfiguration method directly
667// instead.
668// 745//
669// Note: You must call the "Send" method on the returned request object in order
670// to execute the request.
671// 746//
672// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. 747// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method.
673// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) 748// req, resp := client.DeleteBucketInventoryConfigurationRequest(params)
@@ -677,7 +752,7 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration
677// fmt.Println(resp) 752// fmt.Println(resp)
678// } 753// }
679// 754//
680// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration 755// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
681func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { 756func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) {
682 op := &request.Operation{ 757 op := &request.Operation{
683 Name: opDeleteBucketInventoryConfiguration, 758 Name: opDeleteBucketInventoryConfiguration,
@@ -707,7 +782,7 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent
707// 782//
708// See the AWS API reference guide for Amazon Simple Storage Service's 783// See the AWS API reference guide for Amazon Simple Storage Service's
709// API operation DeleteBucketInventoryConfiguration for usage and error information. 784// API operation DeleteBucketInventoryConfiguration for usage and error information.
710// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration 785// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
711func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { 786func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) {
712 req, out := c.DeleteBucketInventoryConfigurationRequest(input) 787 req, out := c.DeleteBucketInventoryConfigurationRequest(input)
713 return out, req.Send() 788 return out, req.Send()
@@ -733,19 +808,18 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
733 808
734// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the 809// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the
735// client's request for the DeleteBucketLifecycle operation. The "output" return 810// client's request for the DeleteBucketLifecycle operation. The "output" return
736// value can be used to capture response data after the request's "Send" method 811// value will be populated with the request's response once the request completes
737// is called. 812// successfuly.
813//
814// Use "Send" method on the returned Request to send the API call to the service.
815// the "output" return value is not valid until after Send returns without error.
738// 816//
739// See DeleteBucketLifecycle for usage and error information. 817// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle
818// API call, and error handling.
740// 819//
741// Creating a request object using this method should be used when you want to inject 820// This method is useful when you want to inject custom logic or configuration
742// custom logic into the request's lifecycle using a custom handler, or if you want to 821// into the SDK's request lifecycle. Such as custom headers, or retry logic.
743// access properties on the request object before or after sending the request. If
744// you just want the service response, call the DeleteBucketLifecycle method directly
745// instead.
746// 822//
747// Note: You must call the "Send" method on the returned request object in order
748// to execute the request.
749// 823//
750// // Example sending a request using the DeleteBucketLifecycleRequest method. 824// // Example sending a request using the DeleteBucketLifecycleRequest method.
751// req, resp := client.DeleteBucketLifecycleRequest(params) 825// req, resp := client.DeleteBucketLifecycleRequest(params)
@@ -755,7 +829,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
755// fmt.Println(resp) 829// fmt.Println(resp)
756// } 830// }
757// 831//
758// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle 832// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
759func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { 833func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) {
760 op := &request.Operation{ 834 op := &request.Operation{
761 Name: opDeleteBucketLifecycle, 835 Name: opDeleteBucketLifecycle,
@@ -784,7 +858,7 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re
784// 858//
785// See the AWS API reference guide for Amazon Simple Storage Service's 859// See the AWS API reference guide for Amazon Simple Storage Service's
786// API operation DeleteBucketLifecycle for usage and error information. 860// API operation DeleteBucketLifecycle for usage and error information.
787// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle 861// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
788func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { 862func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) {
789 req, out := c.DeleteBucketLifecycleRequest(input) 863 req, out := c.DeleteBucketLifecycleRequest(input)
790 return out, req.Send() 864 return out, req.Send()
@@ -810,19 +884,18 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration"
810 884
811// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the 885// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
812// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return 886// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return
813// value can be used to capture response data after the request's "Send" method 887// value will be populated with the request's response once the request completes
814// is called. 888// successfuly.
815// 889//
816// See DeleteBucketMetricsConfiguration for usage and error information. 890// Use "Send" method on the returned Request to send the API call to the service.
891// the "output" return value is not valid until after Send returns without error.
817// 892//
818// Creating a request object using this method should be used when you want to inject 893// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration
819// custom logic into the request's lifecycle using a custom handler, or if you want to 894// API call, and error handling.
820// access properties on the request object before or after sending the request. If 895//
821// you just want the service response, call the DeleteBucketMetricsConfiguration method directly 896// This method is useful when you want to inject custom logic or configuration
822// instead. 897// into the SDK's request lifecycle. Such as custom headers, or retry logic.
823// 898//
824// Note: You must call the "Send" method on the returned request object in order
825// to execute the request.
826// 899//
827// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. 900// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method.
828// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) 901// req, resp := client.DeleteBucketMetricsConfigurationRequest(params)
@@ -832,7 +905,7 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration"
832// fmt.Println(resp) 905// fmt.Println(resp)
833// } 906// }
834// 907//
835// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration 908// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
836func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { 909func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) {
837 op := &request.Operation{ 910 op := &request.Operation{
838 Name: opDeleteBucketMetricsConfiguration, 911 Name: opDeleteBucketMetricsConfiguration,
@@ -862,7 +935,7 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC
862// 935//
863// See the AWS API reference guide for Amazon Simple Storage Service's 936// See the AWS API reference guide for Amazon Simple Storage Service's
864// API operation DeleteBucketMetricsConfiguration for usage and error information. 937// API operation DeleteBucketMetricsConfiguration for usage and error information.
865// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration 938// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
866func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { 939func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) {
867 req, out := c.DeleteBucketMetricsConfigurationRequest(input) 940 req, out := c.DeleteBucketMetricsConfigurationRequest(input)
868 return out, req.Send() 941 return out, req.Send()
@@ -888,19 +961,18 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy"
888 961
889// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the 962// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the
890// client's request for the DeleteBucketPolicy operation. The "output" return 963// client's request for the DeleteBucketPolicy operation. The "output" return
891// value can be used to capture response data after the request's "Send" method 964// value will be populated with the request's response once the request completes
892// is called. 965// successfuly.
966//
967// Use "Send" method on the returned Request to send the API call to the service.
968// the "output" return value is not valid until after Send returns without error.
893// 969//
894// See DeleteBucketPolicy for usage and error information. 970// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy
971// API call, and error handling.
895// 972//
896// Creating a request object using this method should be used when you want to inject 973// This method is useful when you want to inject custom logic or configuration
897// custom logic into the request's lifecycle using a custom handler, or if you want to 974// into the SDK's request lifecycle. Such as custom headers, or retry logic.
898// access properties on the request object before or after sending the request. If
899// you just want the service response, call the DeleteBucketPolicy method directly
900// instead.
901// 975//
902// Note: You must call the "Send" method on the returned request object in order
903// to execute the request.
904// 976//
905// // Example sending a request using the DeleteBucketPolicyRequest method. 977// // Example sending a request using the DeleteBucketPolicyRequest method.
906// req, resp := client.DeleteBucketPolicyRequest(params) 978// req, resp := client.DeleteBucketPolicyRequest(params)
@@ -910,7 +982,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy"
910// fmt.Println(resp) 982// fmt.Println(resp)
911// } 983// }
912// 984//
913// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy 985// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
914func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { 986func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) {
915 op := &request.Operation{ 987 op := &request.Operation{
916 Name: opDeleteBucketPolicy, 988 Name: opDeleteBucketPolicy,
@@ -939,7 +1011,7 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req
939// 1011//
940// See the AWS API reference guide for Amazon Simple Storage Service's 1012// See the AWS API reference guide for Amazon Simple Storage Service's
941// API operation DeleteBucketPolicy for usage and error information. 1013// API operation DeleteBucketPolicy for usage and error information.
942// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy 1014// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
943func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { 1015func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) {
944 req, out := c.DeleteBucketPolicyRequest(input) 1016 req, out := c.DeleteBucketPolicyRequest(input)
945 return out, req.Send() 1017 return out, req.Send()
@@ -965,19 +1037,18 @@ const opDeleteBucketReplication = "DeleteBucketReplication"
965 1037
966// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the 1038// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the
967// client's request for the DeleteBucketReplication operation. The "output" return 1039// client's request for the DeleteBucketReplication operation. The "output" return
968// value can be used to capture response data after the request's "Send" method 1040// value will be populated with the request's response once the request completes
969// is called. 1041// successfuly.
970// 1042//
971// See DeleteBucketReplication for usage and error information. 1043// Use "Send" method on the returned Request to send the API call to the service.
1044// the "output" return value is not valid until after Send returns without error.
972// 1045//
973// Creating a request object using this method should be used when you want to inject 1046// See DeleteBucketReplication for more information on using the DeleteBucketReplication
974// custom logic into the request's lifecycle using a custom handler, or if you want to 1047// API call, and error handling.
975// access properties on the request object before or after sending the request. If 1048//
976// you just want the service response, call the DeleteBucketReplication method directly 1049// This method is useful when you want to inject custom logic or configuration
977// instead. 1050// into the SDK's request lifecycle. Such as custom headers, or retry logic.
978// 1051//
979// Note: You must call the "Send" method on the returned request object in order
980// to execute the request.
981// 1052//
982// // Example sending a request using the DeleteBucketReplicationRequest method. 1053// // Example sending a request using the DeleteBucketReplicationRequest method.
983// req, resp := client.DeleteBucketReplicationRequest(params) 1054// req, resp := client.DeleteBucketReplicationRequest(params)
@@ -987,7 +1058,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication"
987// fmt.Println(resp) 1058// fmt.Println(resp)
988// } 1059// }
989// 1060//
990// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication 1061// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
991func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { 1062func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) {
992 op := &request.Operation{ 1063 op := &request.Operation{
993 Name: opDeleteBucketReplication, 1064 Name: opDeleteBucketReplication,
@@ -1016,7 +1087,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput)
1016// 1087//
1017// See the AWS API reference guide for Amazon Simple Storage Service's 1088// See the AWS API reference guide for Amazon Simple Storage Service's
1018// API operation DeleteBucketReplication for usage and error information. 1089// API operation DeleteBucketReplication for usage and error information.
1019// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication 1090// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
1020func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { 1091func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
1021 req, out := c.DeleteBucketReplicationRequest(input) 1092 req, out := c.DeleteBucketReplicationRequest(input)
1022 return out, req.Send() 1093 return out, req.Send()
@@ -1042,19 +1113,18 @@ const opDeleteBucketTagging = "DeleteBucketTagging"
1042 1113
1043// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the 1114// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the
1044// client's request for the DeleteBucketTagging operation. The "output" return 1115// client's request for the DeleteBucketTagging operation. The "output" return
1045// value can be used to capture response data after the request's "Send" method 1116// value will be populated with the request's response once the request completes
1046// is called. 1117// successfuly.
1118//
1119// Use "Send" method on the returned Request to send the API call to the service.
1120// the "output" return value is not valid until after Send returns without error.
1047// 1121//
1048// See DeleteBucketTagging for usage and error information. 1122// See DeleteBucketTagging for more information on using the DeleteBucketTagging
1123// API call, and error handling.
1049// 1124//
1050// Creating a request object using this method should be used when you want to inject 1125// This method is useful when you want to inject custom logic or configuration
1051// custom logic into the request's lifecycle using a custom handler, or if you want to 1126// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1052// access properties on the request object before or after sending the request. If
1053// you just want the service response, call the DeleteBucketTagging method directly
1054// instead.
1055// 1127//
1056// Note: You must call the "Send" method on the returned request object in order
1057// to execute the request.
1058// 1128//
1059// // Example sending a request using the DeleteBucketTaggingRequest method. 1129// // Example sending a request using the DeleteBucketTaggingRequest method.
1060// req, resp := client.DeleteBucketTaggingRequest(params) 1130// req, resp := client.DeleteBucketTaggingRequest(params)
@@ -1064,7 +1134,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging"
1064// fmt.Println(resp) 1134// fmt.Println(resp)
1065// } 1135// }
1066// 1136//
1067// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging 1137// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
1068func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { 1138func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) {
1069 op := &request.Operation{ 1139 op := &request.Operation{
1070 Name: opDeleteBucketTagging, 1140 Name: opDeleteBucketTagging,
@@ -1093,7 +1163,7 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r
1093// 1163//
1094// See the AWS API reference guide for Amazon Simple Storage Service's 1164// See the AWS API reference guide for Amazon Simple Storage Service's
1095// API operation DeleteBucketTagging for usage and error information. 1165// API operation DeleteBucketTagging for usage and error information.
1096// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging 1166// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
1097func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { 1167func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) {
1098 req, out := c.DeleteBucketTaggingRequest(input) 1168 req, out := c.DeleteBucketTaggingRequest(input)
1099 return out, req.Send() 1169 return out, req.Send()
@@ -1119,19 +1189,18 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite"
1119 1189
1120// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the 1190// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the
1121// client's request for the DeleteBucketWebsite operation. The "output" return 1191// client's request for the DeleteBucketWebsite operation. The "output" return
1122// value can be used to capture response data after the request's "Send" method 1192// value will be populated with the request's response once the request completes
1123// is called. 1193// successfuly.
1124// 1194//
1125// See DeleteBucketWebsite for usage and error information. 1195// Use "Send" method on the returned Request to send the API call to the service.
1196// the "output" return value is not valid until after Send returns without error.
1126// 1197//
1127// Creating a request object using this method should be used when you want to inject 1198// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite
1128// custom logic into the request's lifecycle using a custom handler, or if you want to 1199// API call, and error handling.
1129// access properties on the request object before or after sending the request. If 1200//
1130// you just want the service response, call the DeleteBucketWebsite method directly 1201// This method is useful when you want to inject custom logic or configuration
1131// instead. 1202// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1132// 1203//
1133// Note: You must call the "Send" method on the returned request object in order
1134// to execute the request.
1135// 1204//
1136// // Example sending a request using the DeleteBucketWebsiteRequest method. 1205// // Example sending a request using the DeleteBucketWebsiteRequest method.
1137// req, resp := client.DeleteBucketWebsiteRequest(params) 1206// req, resp := client.DeleteBucketWebsiteRequest(params)
@@ -1141,7 +1210,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite"
1141// fmt.Println(resp) 1210// fmt.Println(resp)
1142// } 1211// }
1143// 1212//
1144// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite 1213// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
1145func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { 1214func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) {
1146 op := &request.Operation{ 1215 op := &request.Operation{
1147 Name: opDeleteBucketWebsite, 1216 Name: opDeleteBucketWebsite,
@@ -1170,7 +1239,7 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r
1170// 1239//
1171// See the AWS API reference guide for Amazon Simple Storage Service's 1240// See the AWS API reference guide for Amazon Simple Storage Service's
1172// API operation DeleteBucketWebsite for usage and error information. 1241// API operation DeleteBucketWebsite for usage and error information.
1173// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite 1242// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
1174func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { 1243func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) {
1175 req, out := c.DeleteBucketWebsiteRequest(input) 1244 req, out := c.DeleteBucketWebsiteRequest(input)
1176 return out, req.Send() 1245 return out, req.Send()
@@ -1196,19 +1265,18 @@ const opDeleteObject = "DeleteObject"
1196 1265
1197// DeleteObjectRequest generates a "aws/request.Request" representing the 1266// DeleteObjectRequest generates a "aws/request.Request" representing the
1198// client's request for the DeleteObject operation. The "output" return 1267// client's request for the DeleteObject operation. The "output" return
1199// value can be used to capture response data after the request's "Send" method 1268// value will be populated with the request's response once the request completes
1200// is called. 1269// successfuly.
1270//
1271// Use "Send" method on the returned Request to send the API call to the service.
1272// the "output" return value is not valid until after Send returns without error.
1201// 1273//
1202// See DeleteObject for usage and error information. 1274// See DeleteObject for more information on using the DeleteObject
1275// API call, and error handling.
1203// 1276//
1204// Creating a request object using this method should be used when you want to inject 1277// This method is useful when you want to inject custom logic or configuration
1205// custom logic into the request's lifecycle using a custom handler, or if you want to 1278// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1206// access properties on the request object before or after sending the request. If
1207// you just want the service response, call the DeleteObject method directly
1208// instead.
1209// 1279//
1210// Note: You must call the "Send" method on the returned request object in order
1211// to execute the request.
1212// 1280//
1213// // Example sending a request using the DeleteObjectRequest method. 1281// // Example sending a request using the DeleteObjectRequest method.
1214// req, resp := client.DeleteObjectRequest(params) 1282// req, resp := client.DeleteObjectRequest(params)
@@ -1218,7 +1286,7 @@ const opDeleteObject = "DeleteObject"
1218// fmt.Println(resp) 1286// fmt.Println(resp)
1219// } 1287// }
1220// 1288//
1221// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject 1289// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
1222func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { 1290func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) {
1223 op := &request.Operation{ 1291 op := &request.Operation{
1224 Name: opDeleteObject, 1292 Name: opDeleteObject,
@@ -1247,7 +1315,7 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request
1247// 1315//
1248// See the AWS API reference guide for Amazon Simple Storage Service's 1316// See the AWS API reference guide for Amazon Simple Storage Service's
1249// API operation DeleteObject for usage and error information. 1317// API operation DeleteObject for usage and error information.
1250// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject 1318// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
1251func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { 1319func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) {
1252 req, out := c.DeleteObjectRequest(input) 1320 req, out := c.DeleteObjectRequest(input)
1253 return out, req.Send() 1321 return out, req.Send()
@@ -1273,19 +1341,18 @@ const opDeleteObjectTagging = "DeleteObjectTagging"
1273 1341
1274// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the 1342// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the
1275// client's request for the DeleteObjectTagging operation. The "output" return 1343// client's request for the DeleteObjectTagging operation. The "output" return
1276// value can be used to capture response data after the request's "Send" method 1344// value will be populated with the request's response once the request completes
1277// is called. 1345// successfuly.
1278// 1346//
1279// See DeleteObjectTagging for usage and error information. 1347// Use "Send" method on the returned Request to send the API call to the service.
1348// the "output" return value is not valid until after Send returns without error.
1280// 1349//
1281// Creating a request object using this method should be used when you want to inject 1350// See DeleteObjectTagging for more information on using the DeleteObjectTagging
1282// custom logic into the request's lifecycle using a custom handler, or if you want to 1351// API call, and error handling.
1283// access properties on the request object before or after sending the request. If 1352//
1284// you just want the service response, call the DeleteObjectTagging method directly 1353// This method is useful when you want to inject custom logic or configuration
1285// instead. 1354// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1286// 1355//
1287// Note: You must call the "Send" method on the returned request object in order
1288// to execute the request.
1289// 1356//
1290// // Example sending a request using the DeleteObjectTaggingRequest method. 1357// // Example sending a request using the DeleteObjectTaggingRequest method.
1291// req, resp := client.DeleteObjectTaggingRequest(params) 1358// req, resp := client.DeleteObjectTaggingRequest(params)
@@ -1295,7 +1362,7 @@ const opDeleteObjectTagging = "DeleteObjectTagging"
1295// fmt.Println(resp) 1362// fmt.Println(resp)
1296// } 1363// }
1297// 1364//
1298// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging 1365// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
1299func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { 1366func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) {
1300 op := &request.Operation{ 1367 op := &request.Operation{
1301 Name: opDeleteObjectTagging, 1368 Name: opDeleteObjectTagging,
@@ -1322,7 +1389,7 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r
1322// 1389//
1323// See the AWS API reference guide for Amazon Simple Storage Service's 1390// See the AWS API reference guide for Amazon Simple Storage Service's
1324// API operation DeleteObjectTagging for usage and error information. 1391// API operation DeleteObjectTagging for usage and error information.
1325// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging 1392// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
1326func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { 1393func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) {
1327 req, out := c.DeleteObjectTaggingRequest(input) 1394 req, out := c.DeleteObjectTaggingRequest(input)
1328 return out, req.Send() 1395 return out, req.Send()
@@ -1348,19 +1415,18 @@ const opDeleteObjects = "DeleteObjects"
1348 1415
1349// DeleteObjectsRequest generates a "aws/request.Request" representing the 1416// DeleteObjectsRequest generates a "aws/request.Request" representing the
1350// client's request for the DeleteObjects operation. The "output" return 1417// client's request for the DeleteObjects operation. The "output" return
1351// value can be used to capture response data after the request's "Send" method 1418// value will be populated with the request's response once the request completes
1352// is called. 1419// successfuly.
1420//
1421// Use "Send" method on the returned Request to send the API call to the service.
1422// the "output" return value is not valid until after Send returns without error.
1353// 1423//
1354// See DeleteObjects for usage and error information. 1424// See DeleteObjects for more information on using the DeleteObjects
1425// API call, and error handling.
1355// 1426//
1356// Creating a request object using this method should be used when you want to inject 1427// This method is useful when you want to inject custom logic or configuration
1357// custom logic into the request's lifecycle using a custom handler, or if you want to 1428// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1358// access properties on the request object before or after sending the request. If
1359// you just want the service response, call the DeleteObjects method directly
1360// instead.
1361// 1429//
1362// Note: You must call the "Send" method on the returned request object in order
1363// to execute the request.
1364// 1430//
1365// // Example sending a request using the DeleteObjectsRequest method. 1431// // Example sending a request using the DeleteObjectsRequest method.
1366// req, resp := client.DeleteObjectsRequest(params) 1432// req, resp := client.DeleteObjectsRequest(params)
@@ -1370,7 +1436,7 @@ const opDeleteObjects = "DeleteObjects"
1370// fmt.Println(resp) 1436// fmt.Println(resp)
1371// } 1437// }
1372// 1438//
1373// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects 1439// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
1374func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { 1440func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) {
1375 op := &request.Operation{ 1441 op := &request.Operation{
1376 Name: opDeleteObjects, 1442 Name: opDeleteObjects,
@@ -1398,7 +1464,7 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque
1398// 1464//
1399// See the AWS API reference guide for Amazon Simple Storage Service's 1465// See the AWS API reference guide for Amazon Simple Storage Service's
1400// API operation DeleteObjects for usage and error information. 1466// API operation DeleteObjects for usage and error information.
1401// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects 1467// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
1402func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { 1468func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) {
1403 req, out := c.DeleteObjectsRequest(input) 1469 req, out := c.DeleteObjectsRequest(input)
1404 return out, req.Send() 1470 return out, req.Send()
@@ -1424,19 +1490,18 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
1424 1490
1425// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the 1491// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
1426// client's request for the GetBucketAccelerateConfiguration operation. The "output" return 1492// client's request for the GetBucketAccelerateConfiguration operation. The "output" return
1427// value can be used to capture response data after the request's "Send" method 1493// value will be populated with the request's response once the request completes
1428// is called. 1494// successfuly.
1429// 1495//
1430// See GetBucketAccelerateConfiguration for usage and error information. 1496// Use "Send" method on the returned Request to send the API call to the service.
1497// the "output" return value is not valid until after Send returns without error.
1431// 1498//
1432// Creating a request object using this method should be used when you want to inject 1499// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration
1433// custom logic into the request's lifecycle using a custom handler, or if you want to 1500// API call, and error handling.
1434// access properties on the request object before or after sending the request. If 1501//
1435// you just want the service response, call the GetBucketAccelerateConfiguration method directly 1502// This method is useful when you want to inject custom logic or configuration
1436// instead. 1503// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1437// 1504//
1438// Note: You must call the "Send" method on the returned request object in order
1439// to execute the request.
1440// 1505//
1441// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. 1506// // Example sending a request using the GetBucketAccelerateConfigurationRequest method.
1442// req, resp := client.GetBucketAccelerateConfigurationRequest(params) 1507// req, resp := client.GetBucketAccelerateConfigurationRequest(params)
@@ -1446,7 +1511,7 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
1446// fmt.Println(resp) 1511// fmt.Println(resp)
1447// } 1512// }
1448// 1513//
1449// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration 1514// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
1450func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { 1515func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) {
1451 op := &request.Operation{ 1516 op := &request.Operation{
1452 Name: opGetBucketAccelerateConfiguration, 1517 Name: opGetBucketAccelerateConfiguration,
@@ -1473,7 +1538,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC
1473// 1538//
1474// See the AWS API reference guide for Amazon Simple Storage Service's 1539// See the AWS API reference guide for Amazon Simple Storage Service's
1475// API operation GetBucketAccelerateConfiguration for usage and error information. 1540// API operation GetBucketAccelerateConfiguration for usage and error information.
1476// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration 1541// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
1477func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { 1542func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) {
1478 req, out := c.GetBucketAccelerateConfigurationRequest(input) 1543 req, out := c.GetBucketAccelerateConfigurationRequest(input)
1479 return out, req.Send() 1544 return out, req.Send()
@@ -1499,19 +1564,18 @@ const opGetBucketAcl = "GetBucketAcl"
1499 1564
1500// GetBucketAclRequest generates a "aws/request.Request" representing the 1565// GetBucketAclRequest generates a "aws/request.Request" representing the
1501// client's request for the GetBucketAcl operation. The "output" return 1566// client's request for the GetBucketAcl operation. The "output" return
1502// value can be used to capture response data after the request's "Send" method 1567// value will be populated with the request's response once the request completes
1503// is called. 1568// successfuly.
1569//
1570// Use "Send" method on the returned Request to send the API call to the service.
1571// the "output" return value is not valid until after Send returns without error.
1504// 1572//
1505// See GetBucketAcl for usage and error information. 1573// See GetBucketAcl for more information on using the GetBucketAcl
1574// API call, and error handling.
1506// 1575//
1507// Creating a request object using this method should be used when you want to inject 1576// This method is useful when you want to inject custom logic or configuration
1508// custom logic into the request's lifecycle using a custom handler, or if you want to 1577// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1509// access properties on the request object before or after sending the request. If
1510// you just want the service response, call the GetBucketAcl method directly
1511// instead.
1512// 1578//
1513// Note: You must call the "Send" method on the returned request object in order
1514// to execute the request.
1515// 1579//
1516// // Example sending a request using the GetBucketAclRequest method. 1580// // Example sending a request using the GetBucketAclRequest method.
1517// req, resp := client.GetBucketAclRequest(params) 1581// req, resp := client.GetBucketAclRequest(params)
@@ -1521,7 +1585,7 @@ const opGetBucketAcl = "GetBucketAcl"
1521// fmt.Println(resp) 1585// fmt.Println(resp)
1522// } 1586// }
1523// 1587//
1524// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl 1588// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
1525func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { 1589func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) {
1526 op := &request.Operation{ 1590 op := &request.Operation{
1527 Name: opGetBucketAcl, 1591 Name: opGetBucketAcl,
@@ -1548,7 +1612,7 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request
1548// 1612//
1549// See the AWS API reference guide for Amazon Simple Storage Service's 1613// See the AWS API reference guide for Amazon Simple Storage Service's
1550// API operation GetBucketAcl for usage and error information. 1614// API operation GetBucketAcl for usage and error information.
1551// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl 1615// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
1552func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { 1616func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) {
1553 req, out := c.GetBucketAclRequest(input) 1617 req, out := c.GetBucketAclRequest(input)
1554 return out, req.Send() 1618 return out, req.Send()
@@ -1574,19 +1638,18 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration"
1574 1638
1575// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the 1639// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
1576// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return 1640// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return
1577// value can be used to capture response data after the request's "Send" method 1641// value will be populated with the request's response once the request completes
1578// is called. 1642// successfuly.
1579// 1643//
1580// See GetBucketAnalyticsConfiguration for usage and error information. 1644// Use "Send" method on the returned Request to send the API call to the service.
1645// the "output" return value is not valid until after Send returns without error.
1581// 1646//
1582// Creating a request object using this method should be used when you want to inject 1647// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration
1583// custom logic into the request's lifecycle using a custom handler, or if you want to 1648// API call, and error handling.
1584// access properties on the request object before or after sending the request. If 1649//
1585// you just want the service response, call the GetBucketAnalyticsConfiguration method directly 1650// This method is useful when you want to inject custom logic or configuration
1586// instead. 1651// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1587// 1652//
1588// Note: You must call the "Send" method on the returned request object in order
1589// to execute the request.
1590// 1653//
1591// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. 1654// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method.
1592// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) 1655// req, resp := client.GetBucketAnalyticsConfigurationRequest(params)
@@ -1596,7 +1659,7 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration"
1596// fmt.Println(resp) 1659// fmt.Println(resp)
1597// } 1660// }
1598// 1661//
1599// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration 1662// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
1600func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { 1663func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) {
1601 op := &request.Operation{ 1664 op := &request.Operation{
1602 Name: opGetBucketAnalyticsConfiguration, 1665 Name: opGetBucketAnalyticsConfiguration,
@@ -1624,7 +1687,7 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon
1624// 1687//
1625// See the AWS API reference guide for Amazon Simple Storage Service's 1688// See the AWS API reference guide for Amazon Simple Storage Service's
1626// API operation GetBucketAnalyticsConfiguration for usage and error information. 1689// API operation GetBucketAnalyticsConfiguration for usage and error information.
1627// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration 1690// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
1628func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { 1691func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) {
1629 req, out := c.GetBucketAnalyticsConfigurationRequest(input) 1692 req, out := c.GetBucketAnalyticsConfigurationRequest(input)
1630 return out, req.Send() 1693 return out, req.Send()
@@ -1650,19 +1713,18 @@ const opGetBucketCors = "GetBucketCors"
1650 1713
1651// GetBucketCorsRequest generates a "aws/request.Request" representing the 1714// GetBucketCorsRequest generates a "aws/request.Request" representing the
1652// client's request for the GetBucketCors operation. The "output" return 1715// client's request for the GetBucketCors operation. The "output" return
1653// value can be used to capture response data after the request's "Send" method 1716// value will be populated with the request's response once the request completes
1654// is called. 1717// successfuly.
1718//
1719// Use "Send" method on the returned Request to send the API call to the service.
1720// the "output" return value is not valid until after Send returns without error.
1655// 1721//
1656// See GetBucketCors for usage and error information. 1722// See GetBucketCors for more information on using the GetBucketCors
1723// API call, and error handling.
1657// 1724//
1658// Creating a request object using this method should be used when you want to inject 1725// This method is useful when you want to inject custom logic or configuration
1659// custom logic into the request's lifecycle using a custom handler, or if you want to 1726// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1660// access properties on the request object before or after sending the request. If
1661// you just want the service response, call the GetBucketCors method directly
1662// instead.
1663// 1727//
1664// Note: You must call the "Send" method on the returned request object in order
1665// to execute the request.
1666// 1728//
1667// // Example sending a request using the GetBucketCorsRequest method. 1729// // Example sending a request using the GetBucketCorsRequest method.
1668// req, resp := client.GetBucketCorsRequest(params) 1730// req, resp := client.GetBucketCorsRequest(params)
@@ -1672,7 +1734,7 @@ const opGetBucketCors = "GetBucketCors"
1672// fmt.Println(resp) 1734// fmt.Println(resp)
1673// } 1735// }
1674// 1736//
1675// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors 1737// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
1676func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { 1738func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) {
1677 op := &request.Operation{ 1739 op := &request.Operation{
1678 Name: opGetBucketCors, 1740 Name: opGetBucketCors,
@@ -1699,7 +1761,7 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque
1699// 1761//
1700// See the AWS API reference guide for Amazon Simple Storage Service's 1762// See the AWS API reference guide for Amazon Simple Storage Service's
1701// API operation GetBucketCors for usage and error information. 1763// API operation GetBucketCors for usage and error information.
1702// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors 1764// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
1703func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { 1765func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) {
1704 req, out := c.GetBucketCorsRequest(input) 1766 req, out := c.GetBucketCorsRequest(input)
1705 return out, req.Send() 1767 return out, req.Send()
@@ -1721,23 +1783,96 @@ func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput
1721 return out, req.Send() 1783 return out, req.Send()
1722} 1784}
1723 1785
1786const opGetBucketEncryption = "GetBucketEncryption"
1787
1788// GetBucketEncryptionRequest generates a "aws/request.Request" representing the
1789// client's request for the GetBucketEncryption operation. The "output" return
1790// value will be populated with the request's response once the request completes
1791// successfuly.
1792//
1793// Use "Send" method on the returned Request to send the API call to the service.
1794// the "output" return value is not valid until after Send returns without error.
1795//
1796// See GetBucketEncryption for more information on using the GetBucketEncryption
1797// API call, and error handling.
1798//
1799// This method is useful when you want to inject custom logic or configuration
1800// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1801//
1802//
1803// // Example sending a request using the GetBucketEncryptionRequest method.
1804// req, resp := client.GetBucketEncryptionRequest(params)
1805//
1806// err := req.Send()
1807// if err == nil { // resp is now filled
1808// fmt.Println(resp)
1809// }
1810//
1811// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption
1812func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) {
1813 op := &request.Operation{
1814 Name: opGetBucketEncryption,
1815 HTTPMethod: "GET",
1816 HTTPPath: "/{Bucket}?encryption",
1817 }
1818
1819 if input == nil {
1820 input = &GetBucketEncryptionInput{}
1821 }
1822
1823 output = &GetBucketEncryptionOutput{}
1824 req = c.newRequest(op, input, output)
1825 return
1826}
1827
1828// GetBucketEncryption API operation for Amazon Simple Storage Service.
1829//
1830// Returns the server-side encryption configuration of a bucket.
1831//
1832// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1833// with awserr.Error's Code and Message methods to get detailed information about
1834// the error.
1835//
1836// See the AWS API reference guide for Amazon Simple Storage Service's
1837// API operation GetBucketEncryption for usage and error information.
1838// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption
1839func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) {
1840 req, out := c.GetBucketEncryptionRequest(input)
1841 return out, req.Send()
1842}
1843
1844// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of
1845// the ability to pass a context and additional request options.
1846//
1847// See GetBucketEncryption for details on how to use this API operation.
1848//
1849// The context must be non-nil and will be used for request cancellation. If
1850// the context is nil a panic will occur. In the future the SDK may create
1851// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1852// for more information on using Contexts.
1853func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) {
1854 req, out := c.GetBucketEncryptionRequest(input)
1855 req.SetContext(ctx)
1856 req.ApplyOptions(opts...)
1857 return out, req.Send()
1858}
1859
1724const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" 1860const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration"
1725 1861
1726// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the 1862// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
1727// client's request for the GetBucketInventoryConfiguration operation. The "output" return 1863// client's request for the GetBucketInventoryConfiguration operation. The "output" return
1728// value can be used to capture response data after the request's "Send" method 1864// value will be populated with the request's response once the request completes
1729// is called. 1865// successfuly.
1730// 1866//
1731// See GetBucketInventoryConfiguration for usage and error information. 1867// Use "Send" method on the returned Request to send the API call to the service.
1868// the "output" return value is not valid until after Send returns without error.
1732// 1869//
1733// Creating a request object using this method should be used when you want to inject 1870// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration
1734// custom logic into the request's lifecycle using a custom handler, or if you want to 1871// API call, and error handling.
1735// access properties on the request object before or after sending the request. If 1872//
1736// you just want the service response, call the GetBucketInventoryConfiguration method directly 1873// This method is useful when you want to inject custom logic or configuration
1737// instead. 1874// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1738// 1875//
1739// Note: You must call the "Send" method on the returned request object in order
1740// to execute the request.
1741// 1876//
1742// // Example sending a request using the GetBucketInventoryConfigurationRequest method. 1877// // Example sending a request using the GetBucketInventoryConfigurationRequest method.
1743// req, resp := client.GetBucketInventoryConfigurationRequest(params) 1878// req, resp := client.GetBucketInventoryConfigurationRequest(params)
@@ -1747,7 +1882,7 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration"
1747// fmt.Println(resp) 1882// fmt.Println(resp)
1748// } 1883// }
1749// 1884//
1750// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration 1885// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
1751func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { 1886func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) {
1752 op := &request.Operation{ 1887 op := &request.Operation{
1753 Name: opGetBucketInventoryConfiguration, 1888 Name: opGetBucketInventoryConfiguration,
@@ -1775,7 +1910,7 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon
1775// 1910//
1776// See the AWS API reference guide for Amazon Simple Storage Service's 1911// See the AWS API reference guide for Amazon Simple Storage Service's
1777// API operation GetBucketInventoryConfiguration for usage and error information. 1912// API operation GetBucketInventoryConfiguration for usage and error information.
1778// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration 1913// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
1779func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { 1914func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) {
1780 req, out := c.GetBucketInventoryConfigurationRequest(input) 1915 req, out := c.GetBucketInventoryConfigurationRequest(input)
1781 return out, req.Send() 1916 return out, req.Send()
@@ -1801,19 +1936,18 @@ const opGetBucketLifecycle = "GetBucketLifecycle"
1801 1936
1802// GetBucketLifecycleRequest generates a "aws/request.Request" representing the 1937// GetBucketLifecycleRequest generates a "aws/request.Request" representing the
1803// client's request for the GetBucketLifecycle operation. The "output" return 1938// client's request for the GetBucketLifecycle operation. The "output" return
1804// value can be used to capture response data after the request's "Send" method 1939// value will be populated with the request's response once the request completes
1805// is called. 1940// successfuly.
1941//
1942// Use "Send" method on the returned Request to send the API call to the service.
1943// the "output" return value is not valid until after Send returns without error.
1806// 1944//
1807// See GetBucketLifecycle for usage and error information. 1945// See GetBucketLifecycle for more information on using the GetBucketLifecycle
1946// API call, and error handling.
1808// 1947//
1809// Creating a request object using this method should be used when you want to inject 1948// This method is useful when you want to inject custom logic or configuration
1810// custom logic into the request's lifecycle using a custom handler, or if you want to 1949// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1811// access properties on the request object before or after sending the request. If
1812// you just want the service response, call the GetBucketLifecycle method directly
1813// instead.
1814// 1950//
1815// Note: You must call the "Send" method on the returned request object in order
1816// to execute the request.
1817// 1951//
1818// // Example sending a request using the GetBucketLifecycleRequest method. 1952// // Example sending a request using the GetBucketLifecycleRequest method.
1819// req, resp := client.GetBucketLifecycleRequest(params) 1953// req, resp := client.GetBucketLifecycleRequest(params)
@@ -1823,7 +1957,7 @@ const opGetBucketLifecycle = "GetBucketLifecycle"
1823// fmt.Println(resp) 1957// fmt.Println(resp)
1824// } 1958// }
1825// 1959//
1826// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle 1960// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
1827func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { 1961func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) {
1828 if c.Client.Config.Logger != nil { 1962 if c.Client.Config.Logger != nil {
1829 c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") 1963 c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated")
@@ -1853,7 +1987,7 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req
1853// 1987//
1854// See the AWS API reference guide for Amazon Simple Storage Service's 1988// See the AWS API reference guide for Amazon Simple Storage Service's
1855// API operation GetBucketLifecycle for usage and error information. 1989// API operation GetBucketLifecycle for usage and error information.
1856// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle 1990// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
1857func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { 1991func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
1858 req, out := c.GetBucketLifecycleRequest(input) 1992 req, out := c.GetBucketLifecycleRequest(input)
1859 return out, req.Send() 1993 return out, req.Send()
@@ -1879,19 +2013,18 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
1879 2013
1880// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the 2014// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
1881// client's request for the GetBucketLifecycleConfiguration operation. The "output" return 2015// client's request for the GetBucketLifecycleConfiguration operation. The "output" return
1882// value can be used to capture response data after the request's "Send" method 2016// value will be populated with the request's response once the request completes
1883// is called. 2017// successfuly.
1884// 2018//
1885// See GetBucketLifecycleConfiguration for usage and error information. 2019// Use "Send" method on the returned Request to send the API call to the service.
2020// the "output" return value is not valid until after Send returns without error.
1886// 2021//
1887// Creating a request object using this method should be used when you want to inject 2022// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration
1888// custom logic into the request's lifecycle using a custom handler, or if you want to 2023// API call, and error handling.
1889// access properties on the request object before or after sending the request. If 2024//
1890// you just want the service response, call the GetBucketLifecycleConfiguration method directly 2025// This method is useful when you want to inject custom logic or configuration
1891// instead. 2026// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1892// 2027//
1893// Note: You must call the "Send" method on the returned request object in order
1894// to execute the request.
1895// 2028//
1896// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. 2029// // Example sending a request using the GetBucketLifecycleConfigurationRequest method.
1897// req, resp := client.GetBucketLifecycleConfigurationRequest(params) 2030// req, resp := client.GetBucketLifecycleConfigurationRequest(params)
@@ -1901,7 +2034,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
1901// fmt.Println(resp) 2034// fmt.Println(resp)
1902// } 2035// }
1903// 2036//
1904// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration 2037// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
1905func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { 2038func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) {
1906 op := &request.Operation{ 2039 op := &request.Operation{
1907 Name: opGetBucketLifecycleConfiguration, 2040 Name: opGetBucketLifecycleConfiguration,
@@ -1928,7 +2061,7 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon
1928// 2061//
1929// See the AWS API reference guide for Amazon Simple Storage Service's 2062// See the AWS API reference guide for Amazon Simple Storage Service's
1930// API operation GetBucketLifecycleConfiguration for usage and error information. 2063// API operation GetBucketLifecycleConfiguration for usage and error information.
1931// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration 2064// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
1932func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { 2065func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) {
1933 req, out := c.GetBucketLifecycleConfigurationRequest(input) 2066 req, out := c.GetBucketLifecycleConfigurationRequest(input)
1934 return out, req.Send() 2067 return out, req.Send()
@@ -1954,19 +2087,18 @@ const opGetBucketLocation = "GetBucketLocation"
1954 2087
1955// GetBucketLocationRequest generates a "aws/request.Request" representing the 2088// GetBucketLocationRequest generates a "aws/request.Request" representing the
1956// client's request for the GetBucketLocation operation. The "output" return 2089// client's request for the GetBucketLocation operation. The "output" return
1957// value can be used to capture response data after the request's "Send" method 2090// value will be populated with the request's response once the request completes
1958// is called. 2091// successfuly.
2092//
2093// Use "Send" method on the returned Request to send the API call to the service.
2094// the "output" return value is not valid until after Send returns without error.
1959// 2095//
1960// See GetBucketLocation for usage and error information. 2096// See GetBucketLocation for more information on using the GetBucketLocation
2097// API call, and error handling.
1961// 2098//
1962// Creating a request object using this method should be used when you want to inject 2099// This method is useful when you want to inject custom logic or configuration
1963// custom logic into the request's lifecycle using a custom handler, or if you want to 2100// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1964// access properties on the request object before or after sending the request. If
1965// you just want the service response, call the GetBucketLocation method directly
1966// instead.
1967// 2101//
1968// Note: You must call the "Send" method on the returned request object in order
1969// to execute the request.
1970// 2102//
1971// // Example sending a request using the GetBucketLocationRequest method. 2103// // Example sending a request using the GetBucketLocationRequest method.
1972// req, resp := client.GetBucketLocationRequest(params) 2104// req, resp := client.GetBucketLocationRequest(params)
@@ -1976,7 +2108,7 @@ const opGetBucketLocation = "GetBucketLocation"
1976// fmt.Println(resp) 2108// fmt.Println(resp)
1977// } 2109// }
1978// 2110//
1979// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation 2111// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
1980func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { 2112func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) {
1981 op := &request.Operation{ 2113 op := &request.Operation{
1982 Name: opGetBucketLocation, 2114 Name: opGetBucketLocation,
@@ -2003,7 +2135,7 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque
2003// 2135//
2004// See the AWS API reference guide for Amazon Simple Storage Service's 2136// See the AWS API reference guide for Amazon Simple Storage Service's
2005// API operation GetBucketLocation for usage and error information. 2137// API operation GetBucketLocation for usage and error information.
2006// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation 2138// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
2007func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { 2139func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) {
2008 req, out := c.GetBucketLocationRequest(input) 2140 req, out := c.GetBucketLocationRequest(input)
2009 return out, req.Send() 2141 return out, req.Send()
@@ -2029,19 +2161,18 @@ const opGetBucketLogging = "GetBucketLogging"
2029 2161
2030// GetBucketLoggingRequest generates a "aws/request.Request" representing the 2162// GetBucketLoggingRequest generates a "aws/request.Request" representing the
2031// client's request for the GetBucketLogging operation. The "output" return 2163// client's request for the GetBucketLogging operation. The "output" return
2032// value can be used to capture response data after the request's "Send" method 2164// value will be populated with the request's response once the request completes
2033// is called. 2165// successfuly.
2034// 2166//
2035// See GetBucketLogging for usage and error information. 2167// Use "Send" method on the returned Request to send the API call to the service.
2168// the "output" return value is not valid until after Send returns without error.
2036// 2169//
2037// Creating a request object using this method should be used when you want to inject 2170// See GetBucketLogging for more information on using the GetBucketLogging
2038// custom logic into the request's lifecycle using a custom handler, or if you want to 2171// API call, and error handling.
2039// access properties on the request object before or after sending the request. If 2172//
2040// you just want the service response, call the GetBucketLogging method directly 2173// This method is useful when you want to inject custom logic or configuration
2041// instead. 2174// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2042// 2175//
2043// Note: You must call the "Send" method on the returned request object in order
2044// to execute the request.
2045// 2176//
2046// // Example sending a request using the GetBucketLoggingRequest method. 2177// // Example sending a request using the GetBucketLoggingRequest method.
2047// req, resp := client.GetBucketLoggingRequest(params) 2178// req, resp := client.GetBucketLoggingRequest(params)
@@ -2051,7 +2182,7 @@ const opGetBucketLogging = "GetBucketLogging"
2051// fmt.Println(resp) 2182// fmt.Println(resp)
2052// } 2183// }
2053// 2184//
2054// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging 2185// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
2055func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { 2186func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) {
2056 op := &request.Operation{ 2187 op := &request.Operation{
2057 Name: opGetBucketLogging, 2188 Name: opGetBucketLogging,
@@ -2079,7 +2210,7 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request
2079// 2210//
2080// See the AWS API reference guide for Amazon Simple Storage Service's 2211// See the AWS API reference guide for Amazon Simple Storage Service's
2081// API operation GetBucketLogging for usage and error information. 2212// API operation GetBucketLogging for usage and error information.
2082// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging 2213// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
2083func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { 2214func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) {
2084 req, out := c.GetBucketLoggingRequest(input) 2215 req, out := c.GetBucketLoggingRequest(input)
2085 return out, req.Send() 2216 return out, req.Send()
@@ -2105,19 +2236,18 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration"
2105 2236
2106// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the 2237// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
2107// client's request for the GetBucketMetricsConfiguration operation. The "output" return 2238// client's request for the GetBucketMetricsConfiguration operation. The "output" return
2108// value can be used to capture response data after the request's "Send" method 2239// value will be populated with the request's response once the request completes
2109// is called. 2240// successfuly.
2241//
2242// Use "Send" method on the returned Request to send the API call to the service.
2243// the "output" return value is not valid until after Send returns without error.
2110// 2244//
2111// See GetBucketMetricsConfiguration for usage and error information. 2245// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration
2246// API call, and error handling.
2112// 2247//
2113// Creating a request object using this method should be used when you want to inject 2248// This method is useful when you want to inject custom logic or configuration
2114// custom logic into the request's lifecycle using a custom handler, or if you want to 2249// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2115// access properties on the request object before or after sending the request. If
2116// you just want the service response, call the GetBucketMetricsConfiguration method directly
2117// instead.
2118// 2250//
2119// Note: You must call the "Send" method on the returned request object in order
2120// to execute the request.
2121// 2251//
2122// // Example sending a request using the GetBucketMetricsConfigurationRequest method. 2252// // Example sending a request using the GetBucketMetricsConfigurationRequest method.
2123// req, resp := client.GetBucketMetricsConfigurationRequest(params) 2253// req, resp := client.GetBucketMetricsConfigurationRequest(params)
@@ -2127,7 +2257,7 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration"
2127// fmt.Println(resp) 2257// fmt.Println(resp)
2128// } 2258// }
2129// 2259//
2130// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration 2260// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
2131func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { 2261func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) {
2132 op := &request.Operation{ 2262 op := &request.Operation{
2133 Name: opGetBucketMetricsConfiguration, 2263 Name: opGetBucketMetricsConfiguration,
@@ -2155,7 +2285,7 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu
2155// 2285//
2156// See the AWS API reference guide for Amazon Simple Storage Service's 2286// See the AWS API reference guide for Amazon Simple Storage Service's
2157// API operation GetBucketMetricsConfiguration for usage and error information. 2287// API operation GetBucketMetricsConfiguration for usage and error information.
2158// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration 2288// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
2159func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { 2289func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) {
2160 req, out := c.GetBucketMetricsConfigurationRequest(input) 2290 req, out := c.GetBucketMetricsConfigurationRequest(input)
2161 return out, req.Send() 2291 return out, req.Send()
@@ -2181,19 +2311,18 @@ const opGetBucketNotification = "GetBucketNotification"
2181 2311
2182// GetBucketNotificationRequest generates a "aws/request.Request" representing the 2312// GetBucketNotificationRequest generates a "aws/request.Request" representing the
2183// client's request for the GetBucketNotification operation. The "output" return 2313// client's request for the GetBucketNotification operation. The "output" return
2184// value can be used to capture response data after the request's "Send" method 2314// value will be populated with the request's response once the request completes
2185// is called. 2315// successfuly.
2186// 2316//
2187// See GetBucketNotification for usage and error information. 2317// Use "Send" method on the returned Request to send the API call to the service.
2318// the "output" return value is not valid until after Send returns without error.
2188// 2319//
2189// Creating a request object using this method should be used when you want to inject 2320// See GetBucketNotification for more information on using the GetBucketNotification
2190// custom logic into the request's lifecycle using a custom handler, or if you want to 2321// API call, and error handling.
2191// access properties on the request object before or after sending the request. If 2322//
2192// you just want the service response, call the GetBucketNotification method directly 2323// This method is useful when you want to inject custom logic or configuration
2193// instead. 2324// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2194// 2325//
2195// Note: You must call the "Send" method on the returned request object in order
2196// to execute the request.
2197// 2326//
2198// // Example sending a request using the GetBucketNotificationRequest method. 2327// // Example sending a request using the GetBucketNotificationRequest method.
2199// req, resp := client.GetBucketNotificationRequest(params) 2328// req, resp := client.GetBucketNotificationRequest(params)
@@ -2203,7 +2332,7 @@ const opGetBucketNotification = "GetBucketNotification"
2203// fmt.Println(resp) 2332// fmt.Println(resp)
2204// } 2333// }
2205// 2334//
2206// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification 2335// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
2207func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { 2336func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) {
2208 if c.Client.Config.Logger != nil { 2337 if c.Client.Config.Logger != nil {
2209 c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") 2338 c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated")
@@ -2233,7 +2362,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat
2233// 2362//
2234// See the AWS API reference guide for Amazon Simple Storage Service's 2363// See the AWS API reference guide for Amazon Simple Storage Service's
2235// API operation GetBucketNotification for usage and error information. 2364// API operation GetBucketNotification for usage and error information.
2236// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification 2365// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
2237func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { 2366func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
2238 req, out := c.GetBucketNotificationRequest(input) 2367 req, out := c.GetBucketNotificationRequest(input)
2239 return out, req.Send() 2368 return out, req.Send()
@@ -2259,19 +2388,18 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration
2259 2388
2260// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the 2389// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
2261// client's request for the GetBucketNotificationConfiguration operation. The "output" return 2390// client's request for the GetBucketNotificationConfiguration operation. The "output" return
2262// value can be used to capture response data after the request's "Send" method 2391// value will be populated with the request's response once the request completes
2263// is called. 2392// successfuly.
2393//
2394// Use "Send" method on the returned Request to send the API call to the service.
2395// the "output" return value is not valid until after Send returns without error.
2264// 2396//
2265// See GetBucketNotificationConfiguration for usage and error information. 2397// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration
2398// API call, and error handling.
2266// 2399//
2267// Creating a request object using this method should be used when you want to inject 2400// This method is useful when you want to inject custom logic or configuration
2268// custom logic into the request's lifecycle using a custom handler, or if you want to 2401// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2269// access properties on the request object before or after sending the request. If
2270// you just want the service response, call the GetBucketNotificationConfiguration method directly
2271// instead.
2272// 2402//
2273// Note: You must call the "Send" method on the returned request object in order
2274// to execute the request.
2275// 2403//
2276// // Example sending a request using the GetBucketNotificationConfigurationRequest method. 2404// // Example sending a request using the GetBucketNotificationConfigurationRequest method.
2277// req, resp := client.GetBucketNotificationConfigurationRequest(params) 2405// req, resp := client.GetBucketNotificationConfigurationRequest(params)
@@ -2281,7 +2409,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration
2281// fmt.Println(resp) 2409// fmt.Println(resp)
2282// } 2410// }
2283// 2411//
2284// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration 2412// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
2285func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { 2413func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) {
2286 op := &request.Operation{ 2414 op := &request.Operation{
2287 Name: opGetBucketNotificationConfiguration, 2415 Name: opGetBucketNotificationConfiguration,
@@ -2308,7 +2436,7 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat
2308// 2436//
2309// See the AWS API reference guide for Amazon Simple Storage Service's 2437// See the AWS API reference guide for Amazon Simple Storage Service's
2310// API operation GetBucketNotificationConfiguration for usage and error information. 2438// API operation GetBucketNotificationConfiguration for usage and error information.
2311// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration 2439// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
2312func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { 2440func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) {
2313 req, out := c.GetBucketNotificationConfigurationRequest(input) 2441 req, out := c.GetBucketNotificationConfigurationRequest(input)
2314 return out, req.Send() 2442 return out, req.Send()
@@ -2334,19 +2462,18 @@ const opGetBucketPolicy = "GetBucketPolicy"
2334 2462
2335// GetBucketPolicyRequest generates a "aws/request.Request" representing the 2463// GetBucketPolicyRequest generates a "aws/request.Request" representing the
2336// client's request for the GetBucketPolicy operation. The "output" return 2464// client's request for the GetBucketPolicy operation. The "output" return
2337// value can be used to capture response data after the request's "Send" method 2465// value will be populated with the request's response once the request completes
2338// is called. 2466// successfuly.
2339// 2467//
2340// See GetBucketPolicy for usage and error information. 2468// Use "Send" method on the returned Request to send the API call to the service.
2469// the "output" return value is not valid until after Send returns without error.
2341// 2470//
2342// Creating a request object using this method should be used when you want to inject 2471// See GetBucketPolicy for more information on using the GetBucketPolicy
2343// custom logic into the request's lifecycle using a custom handler, or if you want to 2472// API call, and error handling.
2344// access properties on the request object before or after sending the request. If 2473//
2345// you just want the service response, call the GetBucketPolicy method directly 2474// This method is useful when you want to inject custom logic or configuration
2346// instead. 2475// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2347// 2476//
2348// Note: You must call the "Send" method on the returned request object in order
2349// to execute the request.
2350// 2477//
2351// // Example sending a request using the GetBucketPolicyRequest method. 2478// // Example sending a request using the GetBucketPolicyRequest method.
2352// req, resp := client.GetBucketPolicyRequest(params) 2479// req, resp := client.GetBucketPolicyRequest(params)
@@ -2356,7 +2483,7 @@ const opGetBucketPolicy = "GetBucketPolicy"
2356// fmt.Println(resp) 2483// fmt.Println(resp)
2357// } 2484// }
2358// 2485//
2359// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy 2486// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
2360func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { 2487func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) {
2361 op := &request.Operation{ 2488 op := &request.Operation{
2362 Name: opGetBucketPolicy, 2489 Name: opGetBucketPolicy,
@@ -2383,7 +2510,7 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R
2383// 2510//
2384// See the AWS API reference guide for Amazon Simple Storage Service's 2511// See the AWS API reference guide for Amazon Simple Storage Service's
2385// API operation GetBucketPolicy for usage and error information. 2512// API operation GetBucketPolicy for usage and error information.
2386// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy 2513// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
2387func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { 2514func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) {
2388 req, out := c.GetBucketPolicyRequest(input) 2515 req, out := c.GetBucketPolicyRequest(input)
2389 return out, req.Send() 2516 return out, req.Send()
@@ -2409,19 +2536,18 @@ const opGetBucketReplication = "GetBucketReplication"
2409 2536
2410// GetBucketReplicationRequest generates a "aws/request.Request" representing the 2537// GetBucketReplicationRequest generates a "aws/request.Request" representing the
2411// client's request for the GetBucketReplication operation. The "output" return 2538// client's request for the GetBucketReplication operation. The "output" return
2412// value can be used to capture response data after the request's "Send" method 2539// value will be populated with the request's response once the request completes
2413// is called. 2540// successfuly.
2541//
2542// Use "Send" method on the returned Request to send the API call to the service.
2543// the "output" return value is not valid until after Send returns without error.
2414// 2544//
2415// See GetBucketReplication for usage and error information. 2545// See GetBucketReplication for more information on using the GetBucketReplication
2546// API call, and error handling.
2416// 2547//
2417// Creating a request object using this method should be used when you want to inject 2548// This method is useful when you want to inject custom logic or configuration
2418// custom logic into the request's lifecycle using a custom handler, or if you want to 2549// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2419// access properties on the request object before or after sending the request. If
2420// you just want the service response, call the GetBucketReplication method directly
2421// instead.
2422// 2550//
2423// Note: You must call the "Send" method on the returned request object in order
2424// to execute the request.
2425// 2551//
2426// // Example sending a request using the GetBucketReplicationRequest method. 2552// // Example sending a request using the GetBucketReplicationRequest method.
2427// req, resp := client.GetBucketReplicationRequest(params) 2553// req, resp := client.GetBucketReplicationRequest(params)
@@ -2431,7 +2557,7 @@ const opGetBucketReplication = "GetBucketReplication"
2431// fmt.Println(resp) 2557// fmt.Println(resp)
2432// } 2558// }
2433// 2559//
2434// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication 2560// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
2435func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { 2561func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) {
2436 op := &request.Operation{ 2562 op := &request.Operation{
2437 Name: opGetBucketReplication, 2563 Name: opGetBucketReplication,
@@ -2458,7 +2584,7 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req
2458// 2584//
2459// See the AWS API reference guide for Amazon Simple Storage Service's 2585// See the AWS API reference guide for Amazon Simple Storage Service's
2460// API operation GetBucketReplication for usage and error information. 2586// API operation GetBucketReplication for usage and error information.
2461// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication 2587// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
2462func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { 2588func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
2463 req, out := c.GetBucketReplicationRequest(input) 2589 req, out := c.GetBucketReplicationRequest(input)
2464 return out, req.Send() 2590 return out, req.Send()
@@ -2484,19 +2610,18 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment"
2484 2610
2485// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the 2611// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the
2486// client's request for the GetBucketRequestPayment operation. The "output" return 2612// client's request for the GetBucketRequestPayment operation. The "output" return
2487// value can be used to capture response data after the request's "Send" method 2613// value will be populated with the request's response once the request completes
2488// is called. 2614// successfuly.
2489// 2615//
2490// See GetBucketRequestPayment for usage and error information. 2616// Use "Send" method on the returned Request to send the API call to the service.
2617// the "output" return value is not valid until after Send returns without error.
2491// 2618//
2492// Creating a request object using this method should be used when you want to inject 2619// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment
2493// custom logic into the request's lifecycle using a custom handler, or if you want to 2620// API call, and error handling.
2494// access properties on the request object before or after sending the request. If 2621//
2495// you just want the service response, call the GetBucketRequestPayment method directly 2622// This method is useful when you want to inject custom logic or configuration
2496// instead. 2623// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2497// 2624//
2498// Note: You must call the "Send" method on the returned request object in order
2499// to execute the request.
2500// 2625//
2501// // Example sending a request using the GetBucketRequestPaymentRequest method. 2626// // Example sending a request using the GetBucketRequestPaymentRequest method.
2502// req, resp := client.GetBucketRequestPaymentRequest(params) 2627// req, resp := client.GetBucketRequestPaymentRequest(params)
@@ -2506,7 +2631,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment"
2506// fmt.Println(resp) 2631// fmt.Println(resp)
2507// } 2632// }
2508// 2633//
2509// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment 2634// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
2510func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { 2635func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) {
2511 op := &request.Operation{ 2636 op := &request.Operation{
2512 Name: opGetBucketRequestPayment, 2637 Name: opGetBucketRequestPayment,
@@ -2533,7 +2658,7 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput)
2533// 2658//
2534// See the AWS API reference guide for Amazon Simple Storage Service's 2659// See the AWS API reference guide for Amazon Simple Storage Service's
2535// API operation GetBucketRequestPayment for usage and error information. 2660// API operation GetBucketRequestPayment for usage and error information.
2536// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment 2661// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
2537func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { 2662func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) {
2538 req, out := c.GetBucketRequestPaymentRequest(input) 2663 req, out := c.GetBucketRequestPaymentRequest(input)
2539 return out, req.Send() 2664 return out, req.Send()
@@ -2559,19 +2684,18 @@ const opGetBucketTagging = "GetBucketTagging"
2559 2684
2560// GetBucketTaggingRequest generates a "aws/request.Request" representing the 2685// GetBucketTaggingRequest generates a "aws/request.Request" representing the
2561// client's request for the GetBucketTagging operation. The "output" return 2686// client's request for the GetBucketTagging operation. The "output" return
2562// value can be used to capture response data after the request's "Send" method 2687// value will be populated with the request's response once the request completes
2563// is called. 2688// successfuly.
2689//
2690// Use "Send" method on the returned Request to send the API call to the service.
2691// the "output" return value is not valid until after Send returns without error.
2564// 2692//
2565// See GetBucketTagging for usage and error information. 2693// See GetBucketTagging for more information on using the GetBucketTagging
2694// API call, and error handling.
2566// 2695//
2567// Creating a request object using this method should be used when you want to inject 2696// This method is useful when you want to inject custom logic or configuration
2568// custom logic into the request's lifecycle using a custom handler, or if you want to 2697// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2569// access properties on the request object before or after sending the request. If
2570// you just want the service response, call the GetBucketTagging method directly
2571// instead.
2572// 2698//
2573// Note: You must call the "Send" method on the returned request object in order
2574// to execute the request.
2575// 2699//
2576// // Example sending a request using the GetBucketTaggingRequest method. 2700// // Example sending a request using the GetBucketTaggingRequest method.
2577// req, resp := client.GetBucketTaggingRequest(params) 2701// req, resp := client.GetBucketTaggingRequest(params)
@@ -2581,7 +2705,7 @@ const opGetBucketTagging = "GetBucketTagging"
2581// fmt.Println(resp) 2705// fmt.Println(resp)
2582// } 2706// }
2583// 2707//
2584// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging 2708// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
2585func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { 2709func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) {
2586 op := &request.Operation{ 2710 op := &request.Operation{
2587 Name: opGetBucketTagging, 2711 Name: opGetBucketTagging,
@@ -2608,7 +2732,7 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request
2608// 2732//
2609// See the AWS API reference guide for Amazon Simple Storage Service's 2733// See the AWS API reference guide for Amazon Simple Storage Service's
2610// API operation GetBucketTagging for usage and error information. 2734// API operation GetBucketTagging for usage and error information.
2611// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging 2735// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
2612func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { 2736func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) {
2613 req, out := c.GetBucketTaggingRequest(input) 2737 req, out := c.GetBucketTaggingRequest(input)
2614 return out, req.Send() 2738 return out, req.Send()
@@ -2634,19 +2758,18 @@ const opGetBucketVersioning = "GetBucketVersioning"
2634 2758
2635// GetBucketVersioningRequest generates a "aws/request.Request" representing the 2759// GetBucketVersioningRequest generates a "aws/request.Request" representing the
2636// client's request for the GetBucketVersioning operation. The "output" return 2760// client's request for the GetBucketVersioning operation. The "output" return
2637// value can be used to capture response data after the request's "Send" method 2761// value will be populated with the request's response once the request completes
2638// is called. 2762// successfuly.
2639// 2763//
2640// See GetBucketVersioning for usage and error information. 2764// Use "Send" method on the returned Request to send the API call to the service.
2765// the "output" return value is not valid until after Send returns without error.
2641// 2766//
2642// Creating a request object using this method should be used when you want to inject 2767// See GetBucketVersioning for more information on using the GetBucketVersioning
2643// custom logic into the request's lifecycle using a custom handler, or if you want to 2768// API call, and error handling.
2644// access properties on the request object before or after sending the request. If 2769//
2645// you just want the service response, call the GetBucketVersioning method directly 2770// This method is useful when you want to inject custom logic or configuration
2646// instead. 2771// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2647// 2772//
2648// Note: You must call the "Send" method on the returned request object in order
2649// to execute the request.
2650// 2773//
2651// // Example sending a request using the GetBucketVersioningRequest method. 2774// // Example sending a request using the GetBucketVersioningRequest method.
2652// req, resp := client.GetBucketVersioningRequest(params) 2775// req, resp := client.GetBucketVersioningRequest(params)
@@ -2656,7 +2779,7 @@ const opGetBucketVersioning = "GetBucketVersioning"
2656// fmt.Println(resp) 2779// fmt.Println(resp)
2657// } 2780// }
2658// 2781//
2659// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning 2782// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
2660func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { 2783func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) {
2661 op := &request.Operation{ 2784 op := &request.Operation{
2662 Name: opGetBucketVersioning, 2785 Name: opGetBucketVersioning,
@@ -2683,7 +2806,7 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r
2683// 2806//
2684// See the AWS API reference guide for Amazon Simple Storage Service's 2807// See the AWS API reference guide for Amazon Simple Storage Service's
2685// API operation GetBucketVersioning for usage and error information. 2808// API operation GetBucketVersioning for usage and error information.
2686// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning 2809// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
2687func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { 2810func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) {
2688 req, out := c.GetBucketVersioningRequest(input) 2811 req, out := c.GetBucketVersioningRequest(input)
2689 return out, req.Send() 2812 return out, req.Send()
@@ -2709,19 +2832,18 @@ const opGetBucketWebsite = "GetBucketWebsite"
2709 2832
2710// GetBucketWebsiteRequest generates a "aws/request.Request" representing the 2833// GetBucketWebsiteRequest generates a "aws/request.Request" representing the
2711// client's request for the GetBucketWebsite operation. The "output" return 2834// client's request for the GetBucketWebsite operation. The "output" return
2712// value can be used to capture response data after the request's "Send" method 2835// value will be populated with the request's response once the request completes
2713// is called. 2836// successfuly.
2837//
2838// Use "Send" method on the returned Request to send the API call to the service.
2839// the "output" return value is not valid until after Send returns without error.
2714// 2840//
2715// See GetBucketWebsite for usage and error information. 2841// See GetBucketWebsite for more information on using the GetBucketWebsite
2842// API call, and error handling.
2716// 2843//
2717// Creating a request object using this method should be used when you want to inject 2844// This method is useful when you want to inject custom logic or configuration
2718// custom logic into the request's lifecycle using a custom handler, or if you want to 2845// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2719// access properties on the request object before or after sending the request. If
2720// you just want the service response, call the GetBucketWebsite method directly
2721// instead.
2722// 2846//
2723// Note: You must call the "Send" method on the returned request object in order
2724// to execute the request.
2725// 2847//
2726// // Example sending a request using the GetBucketWebsiteRequest method. 2848// // Example sending a request using the GetBucketWebsiteRequest method.
2727// req, resp := client.GetBucketWebsiteRequest(params) 2849// req, resp := client.GetBucketWebsiteRequest(params)
@@ -2731,7 +2853,7 @@ const opGetBucketWebsite = "GetBucketWebsite"
2731// fmt.Println(resp) 2853// fmt.Println(resp)
2732// } 2854// }
2733// 2855//
2734// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite 2856// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
2735func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { 2857func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) {
2736 op := &request.Operation{ 2858 op := &request.Operation{
2737 Name: opGetBucketWebsite, 2859 Name: opGetBucketWebsite,
@@ -2758,7 +2880,7 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request
2758// 2880//
2759// See the AWS API reference guide for Amazon Simple Storage Service's 2881// See the AWS API reference guide for Amazon Simple Storage Service's
2760// API operation GetBucketWebsite for usage and error information. 2882// API operation GetBucketWebsite for usage and error information.
2761// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite 2883// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
2762func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { 2884func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) {
2763 req, out := c.GetBucketWebsiteRequest(input) 2885 req, out := c.GetBucketWebsiteRequest(input)
2764 return out, req.Send() 2886 return out, req.Send()
@@ -2784,19 +2906,18 @@ const opGetObject = "GetObject"
2784 2906
2785// GetObjectRequest generates a "aws/request.Request" representing the 2907// GetObjectRequest generates a "aws/request.Request" representing the
2786// client's request for the GetObject operation. The "output" return 2908// client's request for the GetObject operation. The "output" return
2787// value can be used to capture response data after the request's "Send" method 2909// value will be populated with the request's response once the request completes
2788// is called. 2910// successfuly.
2911//
2912// Use "Send" method on the returned Request to send the API call to the service.
2913// the "output" return value is not valid until after Send returns without error.
2789// 2914//
2790// See GetObject for usage and error information. 2915// See GetObject for more information on using the GetObject
2916// API call, and error handling.
2791// 2917//
2792// Creating a request object using this method should be used when you want to inject 2918// This method is useful when you want to inject custom logic or configuration
2793// custom logic into the request's lifecycle using a custom handler, or if you want to 2919// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2794// access properties on the request object before or after sending the request. If
2795// you just want the service response, call the GetObject method directly
2796// instead.
2797// 2920//
2798// Note: You must call the "Send" method on the returned request object in order
2799// to execute the request.
2800// 2921//
2801// // Example sending a request using the GetObjectRequest method. 2922// // Example sending a request using the GetObjectRequest method.
2802// req, resp := client.GetObjectRequest(params) 2923// req, resp := client.GetObjectRequest(params)
@@ -2806,7 +2927,7 @@ const opGetObject = "GetObject"
2806// fmt.Println(resp) 2927// fmt.Println(resp)
2807// } 2928// }
2808// 2929//
2809// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject 2930// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
2810func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { 2931func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) {
2811 op := &request.Operation{ 2932 op := &request.Operation{
2812 Name: opGetObject, 2933 Name: opGetObject,
@@ -2838,7 +2959,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp
2838// * ErrCodeNoSuchKey "NoSuchKey" 2959// * ErrCodeNoSuchKey "NoSuchKey"
2839// The specified key does not exist. 2960// The specified key does not exist.
2840// 2961//
2841// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject 2962// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
2842func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { 2963func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
2843 req, out := c.GetObjectRequest(input) 2964 req, out := c.GetObjectRequest(input)
2844 return out, req.Send() 2965 return out, req.Send()
@@ -2864,19 +2985,18 @@ const opGetObjectAcl = "GetObjectAcl"
2864 2985
2865// GetObjectAclRequest generates a "aws/request.Request" representing the 2986// GetObjectAclRequest generates a "aws/request.Request" representing the
2866// client's request for the GetObjectAcl operation. The "output" return 2987// client's request for the GetObjectAcl operation. The "output" return
2867// value can be used to capture response data after the request's "Send" method 2988// value will be populated with the request's response once the request completes
2868// is called. 2989// successfuly.
2869// 2990//
2870// See GetObjectAcl for usage and error information. 2991// Use "Send" method on the returned Request to send the API call to the service.
2992// the "output" return value is not valid until after Send returns without error.
2871// 2993//
2872// Creating a request object using this method should be used when you want to inject 2994// See GetObjectAcl for more information on using the GetObjectAcl
2873// custom logic into the request's lifecycle using a custom handler, or if you want to 2995// API call, and error handling.
2874// access properties on the request object before or after sending the request. If 2996//
2875// you just want the service response, call the GetObjectAcl method directly 2997// This method is useful when you want to inject custom logic or configuration
2876// instead. 2998// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2877// 2999//
2878// Note: You must call the "Send" method on the returned request object in order
2879// to execute the request.
2880// 3000//
2881// // Example sending a request using the GetObjectAclRequest method. 3001// // Example sending a request using the GetObjectAclRequest method.
2882// req, resp := client.GetObjectAclRequest(params) 3002// req, resp := client.GetObjectAclRequest(params)
@@ -2886,7 +3006,7 @@ const opGetObjectAcl = "GetObjectAcl"
2886// fmt.Println(resp) 3006// fmt.Println(resp)
2887// } 3007// }
2888// 3008//
2889// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl 3009// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
2890func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { 3010func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) {
2891 op := &request.Operation{ 3011 op := &request.Operation{
2892 Name: opGetObjectAcl, 3012 Name: opGetObjectAcl,
@@ -2918,7 +3038,7 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request
2918// * ErrCodeNoSuchKey "NoSuchKey" 3038// * ErrCodeNoSuchKey "NoSuchKey"
2919// The specified key does not exist. 3039// The specified key does not exist.
2920// 3040//
2921// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl 3041// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
2922func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { 3042func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) {
2923 req, out := c.GetObjectAclRequest(input) 3043 req, out := c.GetObjectAclRequest(input)
2924 return out, req.Send() 3044 return out, req.Send()
@@ -2944,19 +3064,18 @@ const opGetObjectTagging = "GetObjectTagging"
2944 3064
2945// GetObjectTaggingRequest generates a "aws/request.Request" representing the 3065// GetObjectTaggingRequest generates a "aws/request.Request" representing the
2946// client's request for the GetObjectTagging operation. The "output" return 3066// client's request for the GetObjectTagging operation. The "output" return
2947// value can be used to capture response data after the request's "Send" method 3067// value will be populated with the request's response once the request completes
2948// is called. 3068// successfuly.
3069//
3070// Use "Send" method on the returned Request to send the API call to the service.
3071// the "output" return value is not valid until after Send returns without error.
2949// 3072//
2950// See GetObjectTagging for usage and error information. 3073// See GetObjectTagging for more information on using the GetObjectTagging
3074// API call, and error handling.
2951// 3075//
2952// Creating a request object using this method should be used when you want to inject 3076// This method is useful when you want to inject custom logic or configuration
2953// custom logic into the request's lifecycle using a custom handler, or if you want to 3077// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2954// access properties on the request object before or after sending the request. If
2955// you just want the service response, call the GetObjectTagging method directly
2956// instead.
2957// 3078//
2958// Note: You must call the "Send" method on the returned request object in order
2959// to execute the request.
2960// 3079//
2961// // Example sending a request using the GetObjectTaggingRequest method. 3080// // Example sending a request using the GetObjectTaggingRequest method.
2962// req, resp := client.GetObjectTaggingRequest(params) 3081// req, resp := client.GetObjectTaggingRequest(params)
@@ -2966,7 +3085,7 @@ const opGetObjectTagging = "GetObjectTagging"
2966// fmt.Println(resp) 3085// fmt.Println(resp)
2967// } 3086// }
2968// 3087//
2969// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging 3088// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
2970func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { 3089func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) {
2971 op := &request.Operation{ 3090 op := &request.Operation{
2972 Name: opGetObjectTagging, 3091 Name: opGetObjectTagging,
@@ -2993,7 +3112,7 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request
2993// 3112//
2994// See the AWS API reference guide for Amazon Simple Storage Service's 3113// See the AWS API reference guide for Amazon Simple Storage Service's
2995// API operation GetObjectTagging for usage and error information. 3114// API operation GetObjectTagging for usage and error information.
2996// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging 3115// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
2997func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { 3116func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) {
2998 req, out := c.GetObjectTaggingRequest(input) 3117 req, out := c.GetObjectTaggingRequest(input)
2999 return out, req.Send() 3118 return out, req.Send()
@@ -3019,19 +3138,18 @@ const opGetObjectTorrent = "GetObjectTorrent"
3019 3138
3020// GetObjectTorrentRequest generates a "aws/request.Request" representing the 3139// GetObjectTorrentRequest generates a "aws/request.Request" representing the
3021// client's request for the GetObjectTorrent operation. The "output" return 3140// client's request for the GetObjectTorrent operation. The "output" return
3022// value can be used to capture response data after the request's "Send" method 3141// value will be populated with the request's response once the request completes
3023// is called. 3142// successfuly.
3024// 3143//
3025// See GetObjectTorrent for usage and error information. 3144// Use "Send" method on the returned Request to send the API call to the service.
3145// the "output" return value is not valid until after Send returns without error.
3026// 3146//
3027// Creating a request object using this method should be used when you want to inject 3147// See GetObjectTorrent for more information on using the GetObjectTorrent
3028// custom logic into the request's lifecycle using a custom handler, or if you want to 3148// API call, and error handling.
3029// access properties on the request object before or after sending the request. If 3149//
3030// you just want the service response, call the GetObjectTorrent method directly 3150// This method is useful when you want to inject custom logic or configuration
3031// instead. 3151// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3032// 3152//
3033// Note: You must call the "Send" method on the returned request object in order
3034// to execute the request.
3035// 3153//
3036// // Example sending a request using the GetObjectTorrentRequest method. 3154// // Example sending a request using the GetObjectTorrentRequest method.
3037// req, resp := client.GetObjectTorrentRequest(params) 3155// req, resp := client.GetObjectTorrentRequest(params)
@@ -3041,7 +3159,7 @@ const opGetObjectTorrent = "GetObjectTorrent"
3041// fmt.Println(resp) 3159// fmt.Println(resp)
3042// } 3160// }
3043// 3161//
3044// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent 3162// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
3045func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { 3163func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) {
3046 op := &request.Operation{ 3164 op := &request.Operation{
3047 Name: opGetObjectTorrent, 3165 Name: opGetObjectTorrent,
@@ -3068,7 +3186,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request
3068// 3186//
3069// See the AWS API reference guide for Amazon Simple Storage Service's 3187// See the AWS API reference guide for Amazon Simple Storage Service's
3070// API operation GetObjectTorrent for usage and error information. 3188// API operation GetObjectTorrent for usage and error information.
3071// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent 3189// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
3072func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { 3190func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) {
3073 req, out := c.GetObjectTorrentRequest(input) 3191 req, out := c.GetObjectTorrentRequest(input)
3074 return out, req.Send() 3192 return out, req.Send()
@@ -3094,19 +3212,18 @@ const opHeadBucket = "HeadBucket"
3094 3212
3095// HeadBucketRequest generates a "aws/request.Request" representing the 3213// HeadBucketRequest generates a "aws/request.Request" representing the
3096// client's request for the HeadBucket operation. The "output" return 3214// client's request for the HeadBucket operation. The "output" return
3097// value can be used to capture response data after the request's "Send" method 3215// value will be populated with the request's response once the request completes
3098// is called. 3216// successfuly.
3217//
3218// Use "Send" method on the returned Request to send the API call to the service.
3219// the "output" return value is not valid until after Send returns without error.
3099// 3220//
3100// See HeadBucket for usage and error information. 3221// See HeadBucket for more information on using the HeadBucket
3222// API call, and error handling.
3101// 3223//
3102// Creating a request object using this method should be used when you want to inject 3224// This method is useful when you want to inject custom logic or configuration
3103// custom logic into the request's lifecycle using a custom handler, or if you want to 3225// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3104// access properties on the request object before or after sending the request. If
3105// you just want the service response, call the HeadBucket method directly
3106// instead.
3107// 3226//
3108// Note: You must call the "Send" method on the returned request object in order
3109// to execute the request.
3110// 3227//
3111// // Example sending a request using the HeadBucketRequest method. 3228// // Example sending a request using the HeadBucketRequest method.
3112// req, resp := client.HeadBucketRequest(params) 3229// req, resp := client.HeadBucketRequest(params)
@@ -3116,7 +3233,7 @@ const opHeadBucket = "HeadBucket"
3116// fmt.Println(resp) 3233// fmt.Println(resp)
3117// } 3234// }
3118// 3235//
3119// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket 3236// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
3120func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { 3237func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) {
3121 op := &request.Operation{ 3238 op := &request.Operation{
3122 Name: opHeadBucket, 3239 Name: opHeadBucket,
@@ -3151,7 +3268,7 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou
3151// * ErrCodeNoSuchBucket "NoSuchBucket" 3268// * ErrCodeNoSuchBucket "NoSuchBucket"
3152// The specified bucket does not exist. 3269// The specified bucket does not exist.
3153// 3270//
3154// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket 3271// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
3155func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { 3272func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) {
3156 req, out := c.HeadBucketRequest(input) 3273 req, out := c.HeadBucketRequest(input)
3157 return out, req.Send() 3274 return out, req.Send()
@@ -3177,19 +3294,18 @@ const opHeadObject = "HeadObject"
3177 3294
3178// HeadObjectRequest generates a "aws/request.Request" representing the 3295// HeadObjectRequest generates a "aws/request.Request" representing the
3179// client's request for the HeadObject operation. The "output" return 3296// client's request for the HeadObject operation. The "output" return
3180// value can be used to capture response data after the request's "Send" method 3297// value will be populated with the request's response once the request completes
3181// is called. 3298// successfuly.
3182// 3299//
3183// See HeadObject for usage and error information. 3300// Use "Send" method on the returned Request to send the API call to the service.
3301// the "output" return value is not valid until after Send returns without error.
3184// 3302//
3185// Creating a request object using this method should be used when you want to inject 3303// See HeadObject for more information on using the HeadObject
3186// custom logic into the request's lifecycle using a custom handler, or if you want to 3304// API call, and error handling.
3187// access properties on the request object before or after sending the request. If 3305//
3188// you just want the service response, call the HeadObject method directly 3306// This method is useful when you want to inject custom logic or configuration
3189// instead. 3307// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3190// 3308//
3191// Note: You must call the "Send" method on the returned request object in order
3192// to execute the request.
3193// 3309//
3194// // Example sending a request using the HeadObjectRequest method. 3310// // Example sending a request using the HeadObjectRequest method.
3195// req, resp := client.HeadObjectRequest(params) 3311// req, resp := client.HeadObjectRequest(params)
@@ -3199,7 +3315,7 @@ const opHeadObject = "HeadObject"
3199// fmt.Println(resp) 3315// fmt.Println(resp)
3200// } 3316// }
3201// 3317//
3202// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject 3318// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
3203func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { 3319func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) {
3204 op := &request.Operation{ 3320 op := &request.Operation{
3205 Name: opHeadObject, 3321 Name: opHeadObject,
@@ -3231,7 +3347,7 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou
3231// 3347//
3232// See the AWS API reference guide for Amazon Simple Storage Service's 3348// See the AWS API reference guide for Amazon Simple Storage Service's
3233// API operation HeadObject for usage and error information. 3349// API operation HeadObject for usage and error information.
3234// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject 3350// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
3235func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { 3351func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
3236 req, out := c.HeadObjectRequest(input) 3352 req, out := c.HeadObjectRequest(input)
3237 return out, req.Send() 3353 return out, req.Send()
@@ -3257,19 +3373,18 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations"
3257 3373
3258// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the 3374// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the
3259// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return 3375// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return
3260// value can be used to capture response data after the request's "Send" method 3376// value will be populated with the request's response once the request completes
3261// is called. 3377// successfuly.
3378//
3379// Use "Send" method on the returned Request to send the API call to the service.
3380// the "output" return value is not valid until after Send returns without error.
3262// 3381//
3263// See ListBucketAnalyticsConfigurations for usage and error information. 3382// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations
3383// API call, and error handling.
3264// 3384//
3265// Creating a request object using this method should be used when you want to inject 3385// This method is useful when you want to inject custom logic or configuration
3266// custom logic into the request's lifecycle using a custom handler, or if you want to 3386// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3267// access properties on the request object before or after sending the request. If
3268// you just want the service response, call the ListBucketAnalyticsConfigurations method directly
3269// instead.
3270// 3387//
3271// Note: You must call the "Send" method on the returned request object in order
3272// to execute the request.
3273// 3388//
3274// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. 3389// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method.
3275// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) 3390// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params)
@@ -3279,7 +3394,7 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations"
3279// fmt.Println(resp) 3394// fmt.Println(resp)
3280// } 3395// }
3281// 3396//
3282// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations 3397// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
3283func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { 3398func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) {
3284 op := &request.Operation{ 3399 op := &request.Operation{
3285 Name: opListBucketAnalyticsConfigurations, 3400 Name: opListBucketAnalyticsConfigurations,
@@ -3306,7 +3421,7 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics
3306// 3421//
3307// See the AWS API reference guide for Amazon Simple Storage Service's 3422// See the AWS API reference guide for Amazon Simple Storage Service's
3308// API operation ListBucketAnalyticsConfigurations for usage and error information. 3423// API operation ListBucketAnalyticsConfigurations for usage and error information.
3309// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations 3424// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
3310func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { 3425func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) {
3311 req, out := c.ListBucketAnalyticsConfigurationsRequest(input) 3426 req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
3312 return out, req.Send() 3427 return out, req.Send()
@@ -3332,19 +3447,18 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations"
3332 3447
3333// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the 3448// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the
3334// client's request for the ListBucketInventoryConfigurations operation. The "output" return 3449// client's request for the ListBucketInventoryConfigurations operation. The "output" return
3335// value can be used to capture response data after the request's "Send" method 3450// value will be populated with the request's response once the request completes
3336// is called. 3451// successfuly.
3452//
3453// Use "Send" method on the returned Request to send the API call to the service.
3454// the "output" return value is not valid until after Send returns without error.
3337// 3455//
3338// See ListBucketInventoryConfigurations for usage and error information. 3456// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations
3457// API call, and error handling.
3339// 3458//
3340// Creating a request object using this method should be used when you want to inject 3459// This method is useful when you want to inject custom logic or configuration
3341// custom logic into the request's lifecycle using a custom handler, or if you want to 3460// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3342// access properties on the request object before or after sending the request. If
3343// you just want the service response, call the ListBucketInventoryConfigurations method directly
3344// instead.
3345// 3461//
3346// Note: You must call the "Send" method on the returned request object in order
3347// to execute the request.
3348// 3462//
3349// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. 3463// // Example sending a request using the ListBucketInventoryConfigurationsRequest method.
3350// req, resp := client.ListBucketInventoryConfigurationsRequest(params) 3464// req, resp := client.ListBucketInventoryConfigurationsRequest(params)
@@ -3354,7 +3468,7 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations"
3354// fmt.Println(resp) 3468// fmt.Println(resp)
3355// } 3469// }
3356// 3470//
3357// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations 3471// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
3358func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { 3472func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) {
3359 op := &request.Operation{ 3473 op := &request.Operation{
3360 Name: opListBucketInventoryConfigurations, 3474 Name: opListBucketInventoryConfigurations,
@@ -3381,7 +3495,7 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory
3381// 3495//
3382// See the AWS API reference guide for Amazon Simple Storage Service's 3496// See the AWS API reference guide for Amazon Simple Storage Service's
3383// API operation ListBucketInventoryConfigurations for usage and error information. 3497// API operation ListBucketInventoryConfigurations for usage and error information.
3384// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations 3498// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
3385func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { 3499func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) {
3386 req, out := c.ListBucketInventoryConfigurationsRequest(input) 3500 req, out := c.ListBucketInventoryConfigurationsRequest(input)
3387 return out, req.Send() 3501 return out, req.Send()
@@ -3407,19 +3521,18 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations"
3407 3521
3408// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the 3522// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the
3409// client's request for the ListBucketMetricsConfigurations operation. The "output" return 3523// client's request for the ListBucketMetricsConfigurations operation. The "output" return
3410// value can be used to capture response data after the request's "Send" method 3524// value will be populated with the request's response once the request completes
3411// is called. 3525// successfuly.
3412// 3526//
3413// See ListBucketMetricsConfigurations for usage and error information. 3527// Use "Send" method on the returned Request to send the API call to the service.
3528// the "output" return value is not valid until after Send returns without error.
3414// 3529//
3415// Creating a request object using this method should be used when you want to inject 3530// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations
3416// custom logic into the request's lifecycle using a custom handler, or if you want to 3531// API call, and error handling.
3417// access properties on the request object before or after sending the request. If 3532//
3418// you just want the service response, call the ListBucketMetricsConfigurations method directly 3533// This method is useful when you want to inject custom logic or configuration
3419// instead. 3534// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3420// 3535//
3421// Note: You must call the "Send" method on the returned request object in order
3422// to execute the request.
3423// 3536//
3424// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. 3537// // Example sending a request using the ListBucketMetricsConfigurationsRequest method.
3425// req, resp := client.ListBucketMetricsConfigurationsRequest(params) 3538// req, resp := client.ListBucketMetricsConfigurationsRequest(params)
@@ -3429,7 +3542,7 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations"
3429// fmt.Println(resp) 3542// fmt.Println(resp)
3430// } 3543// }
3431// 3544//
3432// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations 3545// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
3433func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { 3546func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) {
3434 op := &request.Operation{ 3547 op := &request.Operation{
3435 Name: opListBucketMetricsConfigurations, 3548 Name: opListBucketMetricsConfigurations,
@@ -3456,7 +3569,7 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf
3456// 3569//
3457// See the AWS API reference guide for Amazon Simple Storage Service's 3570// See the AWS API reference guide for Amazon Simple Storage Service's
3458// API operation ListBucketMetricsConfigurations for usage and error information. 3571// API operation ListBucketMetricsConfigurations for usage and error information.
3459// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations 3572// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
3460func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { 3573func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) {
3461 req, out := c.ListBucketMetricsConfigurationsRequest(input) 3574 req, out := c.ListBucketMetricsConfigurationsRequest(input)
3462 return out, req.Send() 3575 return out, req.Send()
@@ -3482,19 +3595,18 @@ const opListBuckets = "ListBuckets"
3482 3595
3483// ListBucketsRequest generates a "aws/request.Request" representing the 3596// ListBucketsRequest generates a "aws/request.Request" representing the
3484// client's request for the ListBuckets operation. The "output" return 3597// client's request for the ListBuckets operation. The "output" return
3485// value can be used to capture response data after the request's "Send" method 3598// value will be populated with the request's response once the request completes
3486// is called. 3599// successfuly.
3600//
3601// Use "Send" method on the returned Request to send the API call to the service.
3602// the "output" return value is not valid until after Send returns without error.
3487// 3603//
3488// See ListBuckets for usage and error information. 3604// See ListBuckets for more information on using the ListBuckets
3605// API call, and error handling.
3489// 3606//
3490// Creating a request object using this method should be used when you want to inject 3607// This method is useful when you want to inject custom logic or configuration
3491// custom logic into the request's lifecycle using a custom handler, or if you want to 3608// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3492// access properties on the request object before or after sending the request. If
3493// you just want the service response, call the ListBuckets method directly
3494// instead.
3495// 3609//
3496// Note: You must call the "Send" method on the returned request object in order
3497// to execute the request.
3498// 3610//
3499// // Example sending a request using the ListBucketsRequest method. 3611// // Example sending a request using the ListBucketsRequest method.
3500// req, resp := client.ListBucketsRequest(params) 3612// req, resp := client.ListBucketsRequest(params)
@@ -3504,7 +3616,7 @@ const opListBuckets = "ListBuckets"
3504// fmt.Println(resp) 3616// fmt.Println(resp)
3505// } 3617// }
3506// 3618//
3507// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets 3619// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
3508func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { 3620func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) {
3509 op := &request.Operation{ 3621 op := &request.Operation{
3510 Name: opListBuckets, 3622 Name: opListBuckets,
@@ -3531,7 +3643,7 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request,
3531// 3643//
3532// See the AWS API reference guide for Amazon Simple Storage Service's 3644// See the AWS API reference guide for Amazon Simple Storage Service's
3533// API operation ListBuckets for usage and error information. 3645// API operation ListBuckets for usage and error information.
3534// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets 3646// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
3535func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { 3647func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
3536 req, out := c.ListBucketsRequest(input) 3648 req, out := c.ListBucketsRequest(input)
3537 return out, req.Send() 3649 return out, req.Send()
@@ -3557,19 +3669,18 @@ const opListMultipartUploads = "ListMultipartUploads"
3557 3669
3558// ListMultipartUploadsRequest generates a "aws/request.Request" representing the 3670// ListMultipartUploadsRequest generates a "aws/request.Request" representing the
3559// client's request for the ListMultipartUploads operation. The "output" return 3671// client's request for the ListMultipartUploads operation. The "output" return
3560// value can be used to capture response data after the request's "Send" method 3672// value will be populated with the request's response once the request completes
3561// is called. 3673// successfuly.
3562// 3674//
3563// See ListMultipartUploads for usage and error information. 3675// Use "Send" method on the returned Request to send the API call to the service.
3676// the "output" return value is not valid until after Send returns without error.
3564// 3677//
3565// Creating a request object using this method should be used when you want to inject 3678// See ListMultipartUploads for more information on using the ListMultipartUploads
3566// custom logic into the request's lifecycle using a custom handler, or if you want to 3679// API call, and error handling.
3567// access properties on the request object before or after sending the request. If 3680//
3568// you just want the service response, call the ListMultipartUploads method directly 3681// This method is useful when you want to inject custom logic or configuration
3569// instead. 3682// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3570// 3683//
3571// Note: You must call the "Send" method on the returned request object in order
3572// to execute the request.
3573// 3684//
3574// // Example sending a request using the ListMultipartUploadsRequest method. 3685// // Example sending a request using the ListMultipartUploadsRequest method.
3575// req, resp := client.ListMultipartUploadsRequest(params) 3686// req, resp := client.ListMultipartUploadsRequest(params)
@@ -3579,7 +3690,7 @@ const opListMultipartUploads = "ListMultipartUploads"
3579// fmt.Println(resp) 3690// fmt.Println(resp)
3580// } 3691// }
3581// 3692//
3582// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads 3693// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
3583func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { 3694func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) {
3584 op := &request.Operation{ 3695 op := &request.Operation{
3585 Name: opListMultipartUploads, 3696 Name: opListMultipartUploads,
@@ -3612,7 +3723,7 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req
3612// 3723//
3613// See the AWS API reference guide for Amazon Simple Storage Service's 3724// See the AWS API reference guide for Amazon Simple Storage Service's
3614// API operation ListMultipartUploads for usage and error information. 3725// API operation ListMultipartUploads for usage and error information.
3615// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads 3726// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
3616func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { 3727func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) {
3617 req, out := c.ListMultipartUploadsRequest(input) 3728 req, out := c.ListMultipartUploadsRequest(input)
3618 return out, req.Send() 3729 return out, req.Send()
@@ -3688,19 +3799,18 @@ const opListObjectVersions = "ListObjectVersions"
3688 3799
3689// ListObjectVersionsRequest generates a "aws/request.Request" representing the 3800// ListObjectVersionsRequest generates a "aws/request.Request" representing the
3690// client's request for the ListObjectVersions operation. The "output" return 3801// client's request for the ListObjectVersions operation. The "output" return
3691// value can be used to capture response data after the request's "Send" method 3802// value will be populated with the request's response once the request completes
3692// is called. 3803// successfuly.
3804//
3805// Use "Send" method on the returned Request to send the API call to the service.
3806// the "output" return value is not valid until after Send returns without error.
3693// 3807//
3694// See ListObjectVersions for usage and error information. 3808// See ListObjectVersions for more information on using the ListObjectVersions
3809// API call, and error handling.
3695// 3810//
3696// Creating a request object using this method should be used when you want to inject 3811// This method is useful when you want to inject custom logic or configuration
3697// custom logic into the request's lifecycle using a custom handler, or if you want to 3812// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3698// access properties on the request object before or after sending the request. If
3699// you just want the service response, call the ListObjectVersions method directly
3700// instead.
3701// 3813//
3702// Note: You must call the "Send" method on the returned request object in order
3703// to execute the request.
3704// 3814//
3705// // Example sending a request using the ListObjectVersionsRequest method. 3815// // Example sending a request using the ListObjectVersionsRequest method.
3706// req, resp := client.ListObjectVersionsRequest(params) 3816// req, resp := client.ListObjectVersionsRequest(params)
@@ -3710,7 +3820,7 @@ const opListObjectVersions = "ListObjectVersions"
3710// fmt.Println(resp) 3820// fmt.Println(resp)
3711// } 3821// }
3712// 3822//
3713// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions 3823// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
3714func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { 3824func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) {
3715 op := &request.Operation{ 3825 op := &request.Operation{
3716 Name: opListObjectVersions, 3826 Name: opListObjectVersions,
@@ -3743,7 +3853,7 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req
3743// 3853//
3744// See the AWS API reference guide for Amazon Simple Storage Service's 3854// See the AWS API reference guide for Amazon Simple Storage Service's
3745// API operation ListObjectVersions for usage and error information. 3855// API operation ListObjectVersions for usage and error information.
3746// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions 3856// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
3747func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { 3857func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) {
3748 req, out := c.ListObjectVersionsRequest(input) 3858 req, out := c.ListObjectVersionsRequest(input)
3749 return out, req.Send() 3859 return out, req.Send()
@@ -3819,19 +3929,18 @@ const opListObjects = "ListObjects"
3819 3929
3820// ListObjectsRequest generates a "aws/request.Request" representing the 3930// ListObjectsRequest generates a "aws/request.Request" representing the
3821// client's request for the ListObjects operation. The "output" return 3931// client's request for the ListObjects operation. The "output" return
3822// value can be used to capture response data after the request's "Send" method 3932// value will be populated with the request's response once the request completes
3823// is called. 3933// successfuly.
3824// 3934//
3825// See ListObjects for usage and error information. 3935// Use "Send" method on the returned Request to send the API call to the service.
3936// the "output" return value is not valid until after Send returns without error.
3826// 3937//
3827// Creating a request object using this method should be used when you want to inject 3938// See ListObjects for more information on using the ListObjects
3828// custom logic into the request's lifecycle using a custom handler, or if you want to 3939// API call, and error handling.
3829// access properties on the request object before or after sending the request. If 3940//
3830// you just want the service response, call the ListObjects method directly 3941// This method is useful when you want to inject custom logic or configuration
3831// instead. 3942// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3832// 3943//
3833// Note: You must call the "Send" method on the returned request object in order
3834// to execute the request.
3835// 3944//
3836// // Example sending a request using the ListObjectsRequest method. 3945// // Example sending a request using the ListObjectsRequest method.
3837// req, resp := client.ListObjectsRequest(params) 3946// req, resp := client.ListObjectsRequest(params)
@@ -3841,7 +3950,7 @@ const opListObjects = "ListObjects"
3841// fmt.Println(resp) 3950// fmt.Println(resp)
3842// } 3951// }
3843// 3952//
3844// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects 3953// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
3845func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { 3954func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) {
3846 op := &request.Operation{ 3955 op := &request.Operation{
3847 Name: opListObjects, 3956 Name: opListObjects,
@@ -3881,7 +3990,7 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request,
3881// * ErrCodeNoSuchBucket "NoSuchBucket" 3990// * ErrCodeNoSuchBucket "NoSuchBucket"
3882// The specified bucket does not exist. 3991// The specified bucket does not exist.
3883// 3992//
3884// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects 3993// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
3885func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { 3994func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
3886 req, out := c.ListObjectsRequest(input) 3995 req, out := c.ListObjectsRequest(input)
3887 return out, req.Send() 3996 return out, req.Send()
@@ -3957,19 +4066,18 @@ const opListObjectsV2 = "ListObjectsV2"
3957 4066
3958// ListObjectsV2Request generates a "aws/request.Request" representing the 4067// ListObjectsV2Request generates a "aws/request.Request" representing the
3959// client's request for the ListObjectsV2 operation. The "output" return 4068// client's request for the ListObjectsV2 operation. The "output" return
3960// value can be used to capture response data after the request's "Send" method 4069// value will be populated with the request's response once the request completes
3961// is called. 4070// successfuly.
4071//
4072// Use "Send" method on the returned Request to send the API call to the service.
4073// the "output" return value is not valid until after Send returns without error.
3962// 4074//
3963// See ListObjectsV2 for usage and error information. 4075// See ListObjectsV2 for more information on using the ListObjectsV2
4076// API call, and error handling.
3964// 4077//
3965// Creating a request object using this method should be used when you want to inject 4078// This method is useful when you want to inject custom logic or configuration
3966// custom logic into the request's lifecycle using a custom handler, or if you want to 4079// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3967// access properties on the request object before or after sending the request. If
3968// you just want the service response, call the ListObjectsV2 method directly
3969// instead.
3970// 4080//
3971// Note: You must call the "Send" method on the returned request object in order
3972// to execute the request.
3973// 4081//
3974// // Example sending a request using the ListObjectsV2Request method. 4082// // Example sending a request using the ListObjectsV2Request method.
3975// req, resp := client.ListObjectsV2Request(params) 4083// req, resp := client.ListObjectsV2Request(params)
@@ -3979,7 +4087,7 @@ const opListObjectsV2 = "ListObjectsV2"
3979// fmt.Println(resp) 4087// fmt.Println(resp)
3980// } 4088// }
3981// 4089//
3982// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 4090// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
3983func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { 4091func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) {
3984 op := &request.Operation{ 4092 op := &request.Operation{
3985 Name: opListObjectsV2, 4093 Name: opListObjectsV2,
@@ -4020,7 +4128,7 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque
4020// * ErrCodeNoSuchBucket "NoSuchBucket" 4128// * ErrCodeNoSuchBucket "NoSuchBucket"
4021// The specified bucket does not exist. 4129// The specified bucket does not exist.
4022// 4130//
4023// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 4131// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
4024func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { 4132func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) {
4025 req, out := c.ListObjectsV2Request(input) 4133 req, out := c.ListObjectsV2Request(input)
4026 return out, req.Send() 4134 return out, req.Send()
@@ -4096,19 +4204,18 @@ const opListParts = "ListParts"
4096 4204
4097// ListPartsRequest generates a "aws/request.Request" representing the 4205// ListPartsRequest generates a "aws/request.Request" representing the
4098// client's request for the ListParts operation. The "output" return 4206// client's request for the ListParts operation. The "output" return
4099// value can be used to capture response data after the request's "Send" method 4207// value will be populated with the request's response once the request completes
4100// is called. 4208// successfuly.
4209//
4210// Use "Send" method on the returned Request to send the API call to the service.
4211// the "output" return value is not valid until after Send returns without error.
4101// 4212//
4102// See ListParts for usage and error information. 4213// See ListParts for more information on using the ListParts
4214// API call, and error handling.
4103// 4215//
4104// Creating a request object using this method should be used when you want to inject 4216// This method is useful when you want to inject custom logic or configuration
4105// custom logic into the request's lifecycle using a custom handler, or if you want to 4217// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4106// access properties on the request object before or after sending the request. If
4107// you just want the service response, call the ListParts method directly
4108// instead.
4109// 4218//
4110// Note: You must call the "Send" method on the returned request object in order
4111// to execute the request.
4112// 4219//
4113// // Example sending a request using the ListPartsRequest method. 4220// // Example sending a request using the ListPartsRequest method.
4114// req, resp := client.ListPartsRequest(params) 4221// req, resp := client.ListPartsRequest(params)
@@ -4118,7 +4225,7 @@ const opListParts = "ListParts"
4118// fmt.Println(resp) 4225// fmt.Println(resp)
4119// } 4226// }
4120// 4227//
4121// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts 4228// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
4122func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { 4229func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) {
4123 op := &request.Operation{ 4230 op := &request.Operation{
4124 Name: opListParts, 4231 Name: opListParts,
@@ -4151,7 +4258,7 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp
4151// 4258//
4152// See the AWS API reference guide for Amazon Simple Storage Service's 4259// See the AWS API reference guide for Amazon Simple Storage Service's
4153// API operation ListParts for usage and error information. 4260// API operation ListParts for usage and error information.
4154// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts 4261// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
4155func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { 4262func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
4156 req, out := c.ListPartsRequest(input) 4263 req, out := c.ListPartsRequest(input)
4157 return out, req.Send() 4264 return out, req.Send()
@@ -4227,19 +4334,18 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
4227 4334
4228// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the 4335// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
4229// client's request for the PutBucketAccelerateConfiguration operation. The "output" return 4336// client's request for the PutBucketAccelerateConfiguration operation. The "output" return
4230// value can be used to capture response data after the request's "Send" method 4337// value will be populated with the request's response once the request completes
4231// is called. 4338// successfuly.
4232// 4339//
4233// See PutBucketAccelerateConfiguration for usage and error information. 4340// Use "Send" method on the returned Request to send the API call to the service.
4341// the "output" return value is not valid until after Send returns without error.
4234// 4342//
4235// Creating a request object using this method should be used when you want to inject 4343// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration
4236// custom logic into the request's lifecycle using a custom handler, or if you want to 4344// API call, and error handling.
4237// access properties on the request object before or after sending the request. If 4345//
4238// you just want the service response, call the PutBucketAccelerateConfiguration method directly 4346// This method is useful when you want to inject custom logic or configuration
4239// instead. 4347// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4240// 4348//
4241// Note: You must call the "Send" method on the returned request object in order
4242// to execute the request.
4243// 4349//
4244// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. 4350// // Example sending a request using the PutBucketAccelerateConfigurationRequest method.
4245// req, resp := client.PutBucketAccelerateConfigurationRequest(params) 4351// req, resp := client.PutBucketAccelerateConfigurationRequest(params)
@@ -4249,7 +4355,7 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
4249// fmt.Println(resp) 4355// fmt.Println(resp)
4250// } 4356// }
4251// 4357//
4252// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration 4358// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
4253func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { 4359func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) {
4254 op := &request.Operation{ 4360 op := &request.Operation{
4255 Name: opPutBucketAccelerateConfiguration, 4361 Name: opPutBucketAccelerateConfiguration,
@@ -4278,7 +4384,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC
4278// 4384//
4279// See the AWS API reference guide for Amazon Simple Storage Service's 4385// See the AWS API reference guide for Amazon Simple Storage Service's
4280// API operation PutBucketAccelerateConfiguration for usage and error information. 4386// API operation PutBucketAccelerateConfiguration for usage and error information.
4281// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration 4387// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
4282func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { 4388func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) {
4283 req, out := c.PutBucketAccelerateConfigurationRequest(input) 4389 req, out := c.PutBucketAccelerateConfigurationRequest(input)
4284 return out, req.Send() 4390 return out, req.Send()
@@ -4304,19 +4410,18 @@ const opPutBucketAcl = "PutBucketAcl"
4304 4410
4305// PutBucketAclRequest generates a "aws/request.Request" representing the 4411// PutBucketAclRequest generates a "aws/request.Request" representing the
4306// client's request for the PutBucketAcl operation. The "output" return 4412// client's request for the PutBucketAcl operation. The "output" return
4307// value can be used to capture response data after the request's "Send" method 4413// value will be populated with the request's response once the request completes
4308// is called. 4414// successfuly.
4415//
4416// Use "Send" method on the returned Request to send the API call to the service.
4417// the "output" return value is not valid until after Send returns without error.
4309// 4418//
4310// See PutBucketAcl for usage and error information. 4419// See PutBucketAcl for more information on using the PutBucketAcl
4420// API call, and error handling.
4311// 4421//
4312// Creating a request object using this method should be used when you want to inject 4422// This method is useful when you want to inject custom logic or configuration
4313// custom logic into the request's lifecycle using a custom handler, or if you want to 4423// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4314// access properties on the request object before or after sending the request. If
4315// you just want the service response, call the PutBucketAcl method directly
4316// instead.
4317// 4424//
4318// Note: You must call the "Send" method on the returned request object in order
4319// to execute the request.
4320// 4425//
4321// // Example sending a request using the PutBucketAclRequest method. 4426// // Example sending a request using the PutBucketAclRequest method.
4322// req, resp := client.PutBucketAclRequest(params) 4427// req, resp := client.PutBucketAclRequest(params)
@@ -4326,7 +4431,7 @@ const opPutBucketAcl = "PutBucketAcl"
4326// fmt.Println(resp) 4431// fmt.Println(resp)
4327// } 4432// }
4328// 4433//
4329// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl 4434// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
4330func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { 4435func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) {
4331 op := &request.Operation{ 4436 op := &request.Operation{
4332 Name: opPutBucketAcl, 4437 Name: opPutBucketAcl,
@@ -4355,7 +4460,7 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request
4355// 4460//
4356// See the AWS API reference guide for Amazon Simple Storage Service's 4461// See the AWS API reference guide for Amazon Simple Storage Service's
4357// API operation PutBucketAcl for usage and error information. 4462// API operation PutBucketAcl for usage and error information.
4358// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl 4463// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
4359func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { 4464func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) {
4360 req, out := c.PutBucketAclRequest(input) 4465 req, out := c.PutBucketAclRequest(input)
4361 return out, req.Send() 4466 return out, req.Send()
@@ -4381,19 +4486,18 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration"
4381 4486
4382// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the 4487// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
4383// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return 4488// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return
4384// value can be used to capture response data after the request's "Send" method 4489// value will be populated with the request's response once the request completes
4385// is called. 4490// successfuly.
4386// 4491//
4387// See PutBucketAnalyticsConfiguration for usage and error information. 4492// Use "Send" method on the returned Request to send the API call to the service.
4493// the "output" return value is not valid until after Send returns without error.
4388// 4494//
4389// Creating a request object using this method should be used when you want to inject 4495// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration
4390// custom logic into the request's lifecycle using a custom handler, or if you want to 4496// API call, and error handling.
4391// access properties on the request object before or after sending the request. If 4497//
4392// you just want the service response, call the PutBucketAnalyticsConfiguration method directly 4498// This method is useful when you want to inject custom logic or configuration
4393// instead. 4499// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4394// 4500//
4395// Note: You must call the "Send" method on the returned request object in order
4396// to execute the request.
4397// 4501//
4398// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. 4502// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method.
4399// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) 4503// req, resp := client.PutBucketAnalyticsConfigurationRequest(params)
@@ -4403,7 +4507,7 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration"
4403// fmt.Println(resp) 4507// fmt.Println(resp)
4404// } 4508// }
4405// 4509//
4406// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration 4510// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
4407func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { 4511func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) {
4408 op := &request.Operation{ 4512 op := &request.Operation{
4409 Name: opPutBucketAnalyticsConfiguration, 4513 Name: opPutBucketAnalyticsConfiguration,
@@ -4433,7 +4537,7 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon
4433// 4537//
4434// See the AWS API reference guide for Amazon Simple Storage Service's 4538// See the AWS API reference guide for Amazon Simple Storage Service's
4435// API operation PutBucketAnalyticsConfiguration for usage and error information. 4539// API operation PutBucketAnalyticsConfiguration for usage and error information.
4436// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration 4540// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
4437func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { 4541func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) {
4438 req, out := c.PutBucketAnalyticsConfigurationRequest(input) 4542 req, out := c.PutBucketAnalyticsConfigurationRequest(input)
4439 return out, req.Send() 4543 return out, req.Send()
@@ -4459,19 +4563,18 @@ const opPutBucketCors = "PutBucketCors"
4459 4563
4460// PutBucketCorsRequest generates a "aws/request.Request" representing the 4564// PutBucketCorsRequest generates a "aws/request.Request" representing the
4461// client's request for the PutBucketCors operation. The "output" return 4565// client's request for the PutBucketCors operation. The "output" return
4462// value can be used to capture response data after the request's "Send" method 4566// value will be populated with the request's response once the request completes
4463// is called. 4567// successfuly.
4568//
4569// Use "Send" method on the returned Request to send the API call to the service.
4570// the "output" return value is not valid until after Send returns without error.
4464// 4571//
4465// See PutBucketCors for usage and error information. 4572// See PutBucketCors for more information on using the PutBucketCors
4573// API call, and error handling.
4466// 4574//
4467// Creating a request object using this method should be used when you want to inject 4575// This method is useful when you want to inject custom logic or configuration
4468// custom logic into the request's lifecycle using a custom handler, or if you want to 4576// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4469// access properties on the request object before or after sending the request. If
4470// you just want the service response, call the PutBucketCors method directly
4471// instead.
4472// 4577//
4473// Note: You must call the "Send" method on the returned request object in order
4474// to execute the request.
4475// 4578//
4476// // Example sending a request using the PutBucketCorsRequest method. 4579// // Example sending a request using the PutBucketCorsRequest method.
4477// req, resp := client.PutBucketCorsRequest(params) 4580// req, resp := client.PutBucketCorsRequest(params)
@@ -4481,7 +4584,7 @@ const opPutBucketCors = "PutBucketCors"
4481// fmt.Println(resp) 4584// fmt.Println(resp)
4482// } 4585// }
4483// 4586//
4484// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors 4587// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
4485func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { 4588func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) {
4486 op := &request.Operation{ 4589 op := &request.Operation{
4487 Name: opPutBucketCors, 4590 Name: opPutBucketCors,
@@ -4510,7 +4613,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque
4510// 4613//
4511// See the AWS API reference guide for Amazon Simple Storage Service's 4614// See the AWS API reference guide for Amazon Simple Storage Service's
4512// API operation PutBucketCors for usage and error information. 4615// API operation PutBucketCors for usage and error information.
4513// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors 4616// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
4514func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { 4617func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) {
4515 req, out := c.PutBucketCorsRequest(input) 4618 req, out := c.PutBucketCorsRequest(input)
4516 return out, req.Send() 4619 return out, req.Send()
@@ -4532,23 +4635,99 @@ func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput
4532 return out, req.Send() 4635 return out, req.Send()
4533} 4636}
4534 4637
4638const opPutBucketEncryption = "PutBucketEncryption"
4639
4640// PutBucketEncryptionRequest generates a "aws/request.Request" representing the
4641// client's request for the PutBucketEncryption operation. The "output" return
4642// value will be populated with the request's response once the request completes
4643// successfuly.
4644//
4645// Use "Send" method on the returned Request to send the API call to the service.
4646// the "output" return value is not valid until after Send returns without error.
4647//
4648// See PutBucketEncryption for more information on using the PutBucketEncryption
4649// API call, and error handling.
4650//
4651// This method is useful when you want to inject custom logic or configuration
4652// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4653//
4654//
4655// // Example sending a request using the PutBucketEncryptionRequest method.
4656// req, resp := client.PutBucketEncryptionRequest(params)
4657//
4658// err := req.Send()
4659// if err == nil { // resp is now filled
4660// fmt.Println(resp)
4661// }
4662//
4663// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption
4664func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) {
4665 op := &request.Operation{
4666 Name: opPutBucketEncryption,
4667 HTTPMethod: "PUT",
4668 HTTPPath: "/{Bucket}?encryption",
4669 }
4670
4671 if input == nil {
4672 input = &PutBucketEncryptionInput{}
4673 }
4674
4675 output = &PutBucketEncryptionOutput{}
4676 req = c.newRequest(op, input, output)
4677 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
4678 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
4679 return
4680}
4681
4682// PutBucketEncryption API operation for Amazon Simple Storage Service.
4683//
4684// Creates a new server-side encryption configuration (or replaces an existing
4685// one, if present).
4686//
4687// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4688// with awserr.Error's Code and Message methods to get detailed information about
4689// the error.
4690//
4691// See the AWS API reference guide for Amazon Simple Storage Service's
4692// API operation PutBucketEncryption for usage and error information.
4693// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption
4694func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) {
4695 req, out := c.PutBucketEncryptionRequest(input)
4696 return out, req.Send()
4697}
4698
4699// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of
4700// the ability to pass a context and additional request options.
4701//
4702// See PutBucketEncryption for details on how to use this API operation.
4703//
4704// The context must be non-nil and will be used for request cancellation. If
4705// the context is nil a panic will occur. In the future the SDK may create
4706// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4707// for more information on using Contexts.
4708func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) {
4709 req, out := c.PutBucketEncryptionRequest(input)
4710 req.SetContext(ctx)
4711 req.ApplyOptions(opts...)
4712 return out, req.Send()
4713}
4714
4535const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" 4715const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration"
4536 4716
4537// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the 4717// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
4538// client's request for the PutBucketInventoryConfiguration operation. The "output" return 4718// client's request for the PutBucketInventoryConfiguration operation. The "output" return
4539// value can be used to capture response data after the request's "Send" method 4719// value will be populated with the request's response once the request completes
4540// is called. 4720// successfuly.
4541// 4721//
4542// See PutBucketInventoryConfiguration for usage and error information. 4722// Use "Send" method on the returned Request to send the API call to the service.
4723// the "output" return value is not valid until after Send returns without error.
4543// 4724//
4544// Creating a request object using this method should be used when you want to inject 4725// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration
4545// custom logic into the request's lifecycle using a custom handler, or if you want to 4726// API call, and error handling.
4546// access properties on the request object before or after sending the request. If 4727//
4547// you just want the service response, call the PutBucketInventoryConfiguration method directly 4728// This method is useful when you want to inject custom logic or configuration
4548// instead. 4729// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4549// 4730//
4550// Note: You must call the "Send" method on the returned request object in order
4551// to execute the request.
4552// 4731//
4553// // Example sending a request using the PutBucketInventoryConfigurationRequest method. 4732// // Example sending a request using the PutBucketInventoryConfigurationRequest method.
4554// req, resp := client.PutBucketInventoryConfigurationRequest(params) 4733// req, resp := client.PutBucketInventoryConfigurationRequest(params)
@@ -4558,7 +4737,7 @@ const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration"
4558// fmt.Println(resp) 4737// fmt.Println(resp)
4559// } 4738// }
4560// 4739//
4561// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration 4740// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
4562func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { 4741func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) {
4563 op := &request.Operation{ 4742 op := &request.Operation{
4564 Name: opPutBucketInventoryConfiguration, 4743 Name: opPutBucketInventoryConfiguration,
@@ -4588,7 +4767,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon
4588// 4767//
4589// See the AWS API reference guide for Amazon Simple Storage Service's 4768// See the AWS API reference guide for Amazon Simple Storage Service's
4590// API operation PutBucketInventoryConfiguration for usage and error information. 4769// API operation PutBucketInventoryConfiguration for usage and error information.
4591// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration 4770// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
4592func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { 4771func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) {
4593 req, out := c.PutBucketInventoryConfigurationRequest(input) 4772 req, out := c.PutBucketInventoryConfigurationRequest(input)
4594 return out, req.Send() 4773 return out, req.Send()
@@ -4614,19 +4793,18 @@ const opPutBucketLifecycle = "PutBucketLifecycle"
4614 4793
4615// PutBucketLifecycleRequest generates a "aws/request.Request" representing the 4794// PutBucketLifecycleRequest generates a "aws/request.Request" representing the
4616// client's request for the PutBucketLifecycle operation. The "output" return 4795// client's request for the PutBucketLifecycle operation. The "output" return
4617// value can be used to capture response data after the request's "Send" method 4796// value will be populated with the request's response once the request completes
4618// is called. 4797// successfuly.
4798//
4799// Use "Send" method on the returned Request to send the API call to the service.
4800// the "output" return value is not valid until after Send returns without error.
4619// 4801//
4620// See PutBucketLifecycle for usage and error information. 4802// See PutBucketLifecycle for more information on using the PutBucketLifecycle
4803// API call, and error handling.
4621// 4804//
4622// Creating a request object using this method should be used when you want to inject 4805// This method is useful when you want to inject custom logic or configuration
4623// custom logic into the request's lifecycle using a custom handler, or if you want to 4806// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4624// access properties on the request object before or after sending the request. If
4625// you just want the service response, call the PutBucketLifecycle method directly
4626// instead.
4627// 4807//
4628// Note: You must call the "Send" method on the returned request object in order
4629// to execute the request.
4630// 4808//
4631// // Example sending a request using the PutBucketLifecycleRequest method. 4809// // Example sending a request using the PutBucketLifecycleRequest method.
4632// req, resp := client.PutBucketLifecycleRequest(params) 4810// req, resp := client.PutBucketLifecycleRequest(params)
@@ -4636,7 +4814,7 @@ const opPutBucketLifecycle = "PutBucketLifecycle"
4636// fmt.Println(resp) 4814// fmt.Println(resp)
4637// } 4815// }
4638// 4816//
4639// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle 4817// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
4640func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { 4818func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) {
4641 if c.Client.Config.Logger != nil { 4819 if c.Client.Config.Logger != nil {
4642 c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") 4820 c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated")
@@ -4668,7 +4846,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
4668// 4846//
4669// See the AWS API reference guide for Amazon Simple Storage Service's 4847// See the AWS API reference guide for Amazon Simple Storage Service's
4670// API operation PutBucketLifecycle for usage and error information. 4848// API operation PutBucketLifecycle for usage and error information.
4671// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle 4849// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
4672func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { 4850func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
4673 req, out := c.PutBucketLifecycleRequest(input) 4851 req, out := c.PutBucketLifecycleRequest(input)
4674 return out, req.Send() 4852 return out, req.Send()
@@ -4694,19 +4872,18 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
4694 4872
4695// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the 4873// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
4696// client's request for the PutBucketLifecycleConfiguration operation. The "output" return 4874// client's request for the PutBucketLifecycleConfiguration operation. The "output" return
4697// value can be used to capture response data after the request's "Send" method 4875// value will be populated with the request's response once the request completes
4698// is called. 4876// successfuly.
4699// 4877//
4700// See PutBucketLifecycleConfiguration for usage and error information. 4878// Use "Send" method on the returned Request to send the API call to the service.
4879// the "output" return value is not valid until after Send returns without error.
4701// 4880//
4702// Creating a request object using this method should be used when you want to inject 4881// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration
4703// custom logic into the request's lifecycle using a custom handler, or if you want to 4882// API call, and error handling.
4704// access properties on the request object before or after sending the request. If 4883//
4705// you just want the service response, call the PutBucketLifecycleConfiguration method directly 4884// This method is useful when you want to inject custom logic or configuration
4706// instead. 4885// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4707// 4886//
4708// Note: You must call the "Send" method on the returned request object in order
4709// to execute the request.
4710// 4887//
4711// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. 4888// // Example sending a request using the PutBucketLifecycleConfigurationRequest method.
4712// req, resp := client.PutBucketLifecycleConfigurationRequest(params) 4889// req, resp := client.PutBucketLifecycleConfigurationRequest(params)
@@ -4716,7 +4893,7 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
4716// fmt.Println(resp) 4893// fmt.Println(resp)
4717// } 4894// }
4718// 4895//
4719// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration 4896// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
4720func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { 4897func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) {
4721 op := &request.Operation{ 4898 op := &request.Operation{
4722 Name: opPutBucketLifecycleConfiguration, 4899 Name: opPutBucketLifecycleConfiguration,
@@ -4746,7 +4923,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
4746// 4923//
4747// See the AWS API reference guide for Amazon Simple Storage Service's 4924// See the AWS API reference guide for Amazon Simple Storage Service's
4748// API operation PutBucketLifecycleConfiguration for usage and error information. 4925// API operation PutBucketLifecycleConfiguration for usage and error information.
4749// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration 4926// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
4750func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { 4927func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
4751 req, out := c.PutBucketLifecycleConfigurationRequest(input) 4928 req, out := c.PutBucketLifecycleConfigurationRequest(input)
4752 return out, req.Send() 4929 return out, req.Send()
@@ -4772,19 +4949,18 @@ const opPutBucketLogging = "PutBucketLogging"
4772 4949
4773// PutBucketLoggingRequest generates a "aws/request.Request" representing the 4950// PutBucketLoggingRequest generates a "aws/request.Request" representing the
4774// client's request for the PutBucketLogging operation. The "output" return 4951// client's request for the PutBucketLogging operation. The "output" return
4775// value can be used to capture response data after the request's "Send" method 4952// value will be populated with the request's response once the request completes
4776// is called. 4953// successfuly.
4954//
4955// Use "Send" method on the returned Request to send the API call to the service.
4956// the "output" return value is not valid until after Send returns without error.
4777// 4957//
4778// See PutBucketLogging for usage and error information. 4958// See PutBucketLogging for more information on using the PutBucketLogging
4959// API call, and error handling.
4779// 4960//
4780// Creating a request object using this method should be used when you want to inject 4961// This method is useful when you want to inject custom logic or configuration
4781// custom logic into the request's lifecycle using a custom handler, or if you want to 4962// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4782// access properties on the request object before or after sending the request. If
4783// you just want the service response, call the PutBucketLogging method directly
4784// instead.
4785// 4963//
4786// Note: You must call the "Send" method on the returned request object in order
4787// to execute the request.
4788// 4964//
4789// // Example sending a request using the PutBucketLoggingRequest method. 4965// // Example sending a request using the PutBucketLoggingRequest method.
4790// req, resp := client.PutBucketLoggingRequest(params) 4966// req, resp := client.PutBucketLoggingRequest(params)
@@ -4794,7 +4970,7 @@ const opPutBucketLogging = "PutBucketLogging"
4794// fmt.Println(resp) 4970// fmt.Println(resp)
4795// } 4971// }
4796// 4972//
4797// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging 4973// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
4798func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { 4974func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) {
4799 op := &request.Operation{ 4975 op := &request.Operation{
4800 Name: opPutBucketLogging, 4976 Name: opPutBucketLogging,
@@ -4825,7 +5001,7 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request
4825// 5001//
4826// See the AWS API reference guide for Amazon Simple Storage Service's 5002// See the AWS API reference guide for Amazon Simple Storage Service's
4827// API operation PutBucketLogging for usage and error information. 5003// API operation PutBucketLogging for usage and error information.
4828// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging 5004// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
4829func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { 5005func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) {
4830 req, out := c.PutBucketLoggingRequest(input) 5006 req, out := c.PutBucketLoggingRequest(input)
4831 return out, req.Send() 5007 return out, req.Send()
@@ -4851,19 +5027,18 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration"
4851 5027
4852// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the 5028// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
4853// client's request for the PutBucketMetricsConfiguration operation. The "output" return 5029// client's request for the PutBucketMetricsConfiguration operation. The "output" return
4854// value can be used to capture response data after the request's "Send" method 5030// value will be populated with the request's response once the request completes
4855// is called. 5031// successfuly.
5032//
5033// Use "Send" method on the returned Request to send the API call to the service.
5034// the "output" return value is not valid until after Send returns without error.
4856// 5035//
4857// See PutBucketMetricsConfiguration for usage and error information. 5036// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration
5037// API call, and error handling.
4858// 5038//
4859// Creating a request object using this method should be used when you want to inject 5039// This method is useful when you want to inject custom logic or configuration
4860// custom logic into the request's lifecycle using a custom handler, or if you want to 5040// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4861// access properties on the request object before or after sending the request. If
4862// you just want the service response, call the PutBucketMetricsConfiguration method directly
4863// instead.
4864// 5041//
4865// Note: You must call the "Send" method on the returned request object in order
4866// to execute the request.
4867// 5042//
4868// // Example sending a request using the PutBucketMetricsConfigurationRequest method. 5043// // Example sending a request using the PutBucketMetricsConfigurationRequest method.
4869// req, resp := client.PutBucketMetricsConfigurationRequest(params) 5044// req, resp := client.PutBucketMetricsConfigurationRequest(params)
@@ -4873,7 +5048,7 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration"
4873// fmt.Println(resp) 5048// fmt.Println(resp)
4874// } 5049// }
4875// 5050//
4876// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration 5051// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
4877func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { 5052func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) {
4878 op := &request.Operation{ 5053 op := &request.Operation{
4879 Name: opPutBucketMetricsConfiguration, 5054 Name: opPutBucketMetricsConfiguration,
@@ -4903,7 +5078,7 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu
4903// 5078//
4904// See the AWS API reference guide for Amazon Simple Storage Service's 5079// See the AWS API reference guide for Amazon Simple Storage Service's
4905// API operation PutBucketMetricsConfiguration for usage and error information. 5080// API operation PutBucketMetricsConfiguration for usage and error information.
4906// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration 5081// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
4907func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { 5082func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) {
4908 req, out := c.PutBucketMetricsConfigurationRequest(input) 5083 req, out := c.PutBucketMetricsConfigurationRequest(input)
4909 return out, req.Send() 5084 return out, req.Send()
@@ -4929,19 +5104,18 @@ const opPutBucketNotification = "PutBucketNotification"
4929 5104
4930// PutBucketNotificationRequest generates a "aws/request.Request" representing the 5105// PutBucketNotificationRequest generates a "aws/request.Request" representing the
4931// client's request for the PutBucketNotification operation. The "output" return 5106// client's request for the PutBucketNotification operation. The "output" return
4932// value can be used to capture response data after the request's "Send" method 5107// value will be populated with the request's response once the request completes
4933// is called. 5108// successfuly.
4934// 5109//
4935// See PutBucketNotification for usage and error information. 5110// Use "Send" method on the returned Request to send the API call to the service.
5111// the "output" return value is not valid until after Send returns without error.
4936// 5112//
4937// Creating a request object using this method should be used when you want to inject 5113// See PutBucketNotification for more information on using the PutBucketNotification
4938// custom logic into the request's lifecycle using a custom handler, or if you want to 5114// API call, and error handling.
4939// access properties on the request object before or after sending the request. If 5115//
4940// you just want the service response, call the PutBucketNotification method directly 5116// This method is useful when you want to inject custom logic or configuration
4941// instead. 5117// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4942// 5118//
4943// Note: You must call the "Send" method on the returned request object in order
4944// to execute the request.
4945// 5119//
4946// // Example sending a request using the PutBucketNotificationRequest method. 5120// // Example sending a request using the PutBucketNotificationRequest method.
4947// req, resp := client.PutBucketNotificationRequest(params) 5121// req, resp := client.PutBucketNotificationRequest(params)
@@ -4951,7 +5125,7 @@ const opPutBucketNotification = "PutBucketNotification"
4951// fmt.Println(resp) 5125// fmt.Println(resp)
4952// } 5126// }
4953// 5127//
4954// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification 5128// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
4955func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { 5129func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) {
4956 if c.Client.Config.Logger != nil { 5130 if c.Client.Config.Logger != nil {
4957 c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") 5131 c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated")
@@ -4983,7 +5157,7 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re
4983// 5157//
4984// See the AWS API reference guide for Amazon Simple Storage Service's 5158// See the AWS API reference guide for Amazon Simple Storage Service's
4985// API operation PutBucketNotification for usage and error information. 5159// API operation PutBucketNotification for usage and error information.
4986// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification 5160// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
4987func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { 5161func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
4988 req, out := c.PutBucketNotificationRequest(input) 5162 req, out := c.PutBucketNotificationRequest(input)
4989 return out, req.Send() 5163 return out, req.Send()
@@ -5009,19 +5183,18 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration
5009 5183
5010// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the 5184// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
5011// client's request for the PutBucketNotificationConfiguration operation. The "output" return 5185// client's request for the PutBucketNotificationConfiguration operation. The "output" return
5012// value can be used to capture response data after the request's "Send" method 5186// value will be populated with the request's response once the request completes
5013// is called. 5187// successfuly.
5188//
5189// Use "Send" method on the returned Request to send the API call to the service.
5190// the "output" return value is not valid until after Send returns without error.
5014// 5191//
5015// See PutBucketNotificationConfiguration for usage and error information. 5192// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration
5193// API call, and error handling.
5016// 5194//
5017// Creating a request object using this method should be used when you want to inject 5195// This method is useful when you want to inject custom logic or configuration
5018// custom logic into the request's lifecycle using a custom handler, or if you want to 5196// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5019// access properties on the request object before or after sending the request. If
5020// you just want the service response, call the PutBucketNotificationConfiguration method directly
5021// instead.
5022// 5197//
5023// Note: You must call the "Send" method on the returned request object in order
5024// to execute the request.
5025// 5198//
5026// // Example sending a request using the PutBucketNotificationConfigurationRequest method. 5199// // Example sending a request using the PutBucketNotificationConfigurationRequest method.
5027// req, resp := client.PutBucketNotificationConfigurationRequest(params) 5200// req, resp := client.PutBucketNotificationConfigurationRequest(params)
@@ -5031,7 +5204,7 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration
5031// fmt.Println(resp) 5204// fmt.Println(resp)
5032// } 5205// }
5033// 5206//
5034// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration 5207// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
5035func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { 5208func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) {
5036 op := &request.Operation{ 5209 op := &request.Operation{
5037 Name: opPutBucketNotificationConfiguration, 5210 Name: opPutBucketNotificationConfiguration,
@@ -5060,7 +5233,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat
5060// 5233//
5061// See the AWS API reference guide for Amazon Simple Storage Service's 5234// See the AWS API reference guide for Amazon Simple Storage Service's
5062// API operation PutBucketNotificationConfiguration for usage and error information. 5235// API operation PutBucketNotificationConfiguration for usage and error information.
5063// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration 5236// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
5064func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { 5237func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) {
5065 req, out := c.PutBucketNotificationConfigurationRequest(input) 5238 req, out := c.PutBucketNotificationConfigurationRequest(input)
5066 return out, req.Send() 5239 return out, req.Send()
@@ -5086,19 +5259,18 @@ const opPutBucketPolicy = "PutBucketPolicy"
5086 5259
5087// PutBucketPolicyRequest generates a "aws/request.Request" representing the 5260// PutBucketPolicyRequest generates a "aws/request.Request" representing the
5088// client's request for the PutBucketPolicy operation. The "output" return 5261// client's request for the PutBucketPolicy operation. The "output" return
5089// value can be used to capture response data after the request's "Send" method 5262// value will be populated with the request's response once the request completes
5090// is called. 5263// successfuly.
5091// 5264//
5092// See PutBucketPolicy for usage and error information. 5265// Use "Send" method on the returned Request to send the API call to the service.
5266// the "output" return value is not valid until after Send returns without error.
5093// 5267//
5094// Creating a request object using this method should be used when you want to inject 5268// See PutBucketPolicy for more information on using the PutBucketPolicy
5095// custom logic into the request's lifecycle using a custom handler, or if you want to 5269// API call, and error handling.
5096// access properties on the request object before or after sending the request. If 5270//
5097// you just want the service response, call the PutBucketPolicy method directly 5271// This method is useful when you want to inject custom logic or configuration
5098// instead. 5272// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5099// 5273//
5100// Note: You must call the "Send" method on the returned request object in order
5101// to execute the request.
5102// 5274//
5103// // Example sending a request using the PutBucketPolicyRequest method. 5275// // Example sending a request using the PutBucketPolicyRequest method.
5104// req, resp := client.PutBucketPolicyRequest(params) 5276// req, resp := client.PutBucketPolicyRequest(params)
@@ -5108,7 +5280,7 @@ const opPutBucketPolicy = "PutBucketPolicy"
5108// fmt.Println(resp) 5280// fmt.Println(resp)
5109// } 5281// }
5110// 5282//
5111// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy 5283// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
5112func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { 5284func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) {
5113 op := &request.Operation{ 5285 op := &request.Operation{
5114 Name: opPutBucketPolicy, 5286 Name: opPutBucketPolicy,
@@ -5138,7 +5310,7 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R
5138// 5310//
5139// See the AWS API reference guide for Amazon Simple Storage Service's 5311// See the AWS API reference guide for Amazon Simple Storage Service's
5140// API operation PutBucketPolicy for usage and error information. 5312// API operation PutBucketPolicy for usage and error information.
5141// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy 5313// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
5142func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { 5314func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) {
5143 req, out := c.PutBucketPolicyRequest(input) 5315 req, out := c.PutBucketPolicyRequest(input)
5144 return out, req.Send() 5316 return out, req.Send()
@@ -5164,19 +5336,18 @@ const opPutBucketReplication = "PutBucketReplication"
5164 5336
5165// PutBucketReplicationRequest generates a "aws/request.Request" representing the 5337// PutBucketReplicationRequest generates a "aws/request.Request" representing the
5166// client's request for the PutBucketReplication operation. The "output" return 5338// client's request for the PutBucketReplication operation. The "output" return
5167// value can be used to capture response data after the request's "Send" method 5339// value will be populated with the request's response once the request completes
5168// is called. 5340// successfuly.
5341//
5342// Use "Send" method on the returned Request to send the API call to the service.
5343// the "output" return value is not valid until after Send returns without error.
5169// 5344//
5170// See PutBucketReplication for usage and error information. 5345// See PutBucketReplication for more information on using the PutBucketReplication
5346// API call, and error handling.
5171// 5347//
5172// Creating a request object using this method should be used when you want to inject 5348// This method is useful when you want to inject custom logic or configuration
5173// custom logic into the request's lifecycle using a custom handler, or if you want to 5349// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5174// access properties on the request object before or after sending the request. If
5175// you just want the service response, call the PutBucketReplication method directly
5176// instead.
5177// 5350//
5178// Note: You must call the "Send" method on the returned request object in order
5179// to execute the request.
5180// 5351//
5181// // Example sending a request using the PutBucketReplicationRequest method. 5352// // Example sending a request using the PutBucketReplicationRequest method.
5182// req, resp := client.PutBucketReplicationRequest(params) 5353// req, resp := client.PutBucketReplicationRequest(params)
@@ -5186,7 +5357,7 @@ const opPutBucketReplication = "PutBucketReplication"
5186// fmt.Println(resp) 5357// fmt.Println(resp)
5187// } 5358// }
5188// 5359//
5189// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication 5360// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
5190func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { 5361func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) {
5191 op := &request.Operation{ 5362 op := &request.Operation{
5192 Name: opPutBucketReplication, 5363 Name: opPutBucketReplication,
@@ -5216,7 +5387,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
5216// 5387//
5217// See the AWS API reference guide for Amazon Simple Storage Service's 5388// See the AWS API reference guide for Amazon Simple Storage Service's
5218// API operation PutBucketReplication for usage and error information. 5389// API operation PutBucketReplication for usage and error information.
5219// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication 5390// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
5220func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { 5391func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
5221 req, out := c.PutBucketReplicationRequest(input) 5392 req, out := c.PutBucketReplicationRequest(input)
5222 return out, req.Send() 5393 return out, req.Send()
@@ -5242,19 +5413,18 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment"
5242 5413
5243// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the 5414// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the
5244// client's request for the PutBucketRequestPayment operation. The "output" return 5415// client's request for the PutBucketRequestPayment operation. The "output" return
5245// value can be used to capture response data after the request's "Send" method 5416// value will be populated with the request's response once the request completes
5246// is called. 5417// successfuly.
5247// 5418//
5248// See PutBucketRequestPayment for usage and error information. 5419// Use "Send" method on the returned Request to send the API call to the service.
5420// the "output" return value is not valid until after Send returns without error.
5249// 5421//
5250// Creating a request object using this method should be used when you want to inject 5422// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment
5251// custom logic into the request's lifecycle using a custom handler, or if you want to 5423// API call, and error handling.
5252// access properties on the request object before or after sending the request. If 5424//
5253// you just want the service response, call the PutBucketRequestPayment method directly 5425// This method is useful when you want to inject custom logic or configuration
5254// instead. 5426// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5255// 5427//
5256// Note: You must call the "Send" method on the returned request object in order
5257// to execute the request.
5258// 5428//
5259// // Example sending a request using the PutBucketRequestPaymentRequest method. 5429// // Example sending a request using the PutBucketRequestPaymentRequest method.
5260// req, resp := client.PutBucketRequestPaymentRequest(params) 5430// req, resp := client.PutBucketRequestPaymentRequest(params)
@@ -5264,7 +5434,7 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment"
5264// fmt.Println(resp) 5434// fmt.Println(resp)
5265// } 5435// }
5266// 5436//
5267// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment 5437// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
5268func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { 5438func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) {
5269 op := &request.Operation{ 5439 op := &request.Operation{
5270 Name: opPutBucketRequestPayment, 5440 Name: opPutBucketRequestPayment,
@@ -5297,7 +5467,7 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput)
5297// 5467//
5298// See the AWS API reference guide for Amazon Simple Storage Service's 5468// See the AWS API reference guide for Amazon Simple Storage Service's
5299// API operation PutBucketRequestPayment for usage and error information. 5469// API operation PutBucketRequestPayment for usage and error information.
5300// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment 5470// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
5301func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { 5471func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) {
5302 req, out := c.PutBucketRequestPaymentRequest(input) 5472 req, out := c.PutBucketRequestPaymentRequest(input)
5303 return out, req.Send() 5473 return out, req.Send()
@@ -5323,19 +5493,18 @@ const opPutBucketTagging = "PutBucketTagging"
5323 5493
5324// PutBucketTaggingRequest generates a "aws/request.Request" representing the 5494// PutBucketTaggingRequest generates a "aws/request.Request" representing the
5325// client's request for the PutBucketTagging operation. The "output" return 5495// client's request for the PutBucketTagging operation. The "output" return
5326// value can be used to capture response data after the request's "Send" method 5496// value will be populated with the request's response once the request completes
5327// is called. 5497// successfuly.
5498//
5499// Use "Send" method on the returned Request to send the API call to the service.
5500// the "output" return value is not valid until after Send returns without error.
5328// 5501//
5329// See PutBucketTagging for usage and error information. 5502// See PutBucketTagging for more information on using the PutBucketTagging
5503// API call, and error handling.
5330// 5504//
5331// Creating a request object using this method should be used when you want to inject 5505// This method is useful when you want to inject custom logic or configuration
5332// custom logic into the request's lifecycle using a custom handler, or if you want to 5506// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5333// access properties on the request object before or after sending the request. If
5334// you just want the service response, call the PutBucketTagging method directly
5335// instead.
5336// 5507//
5337// Note: You must call the "Send" method on the returned request object in order
5338// to execute the request.
5339// 5508//
5340// // Example sending a request using the PutBucketTaggingRequest method. 5509// // Example sending a request using the PutBucketTaggingRequest method.
5341// req, resp := client.PutBucketTaggingRequest(params) 5510// req, resp := client.PutBucketTaggingRequest(params)
@@ -5345,7 +5514,7 @@ const opPutBucketTagging = "PutBucketTagging"
5345// fmt.Println(resp) 5514// fmt.Println(resp)
5346// } 5515// }
5347// 5516//
5348// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging 5517// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
5349func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { 5518func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) {
5350 op := &request.Operation{ 5519 op := &request.Operation{
5351 Name: opPutBucketTagging, 5520 Name: opPutBucketTagging,
@@ -5374,7 +5543,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request
5374// 5543//
5375// See the AWS API reference guide for Amazon Simple Storage Service's 5544// See the AWS API reference guide for Amazon Simple Storage Service's
5376// API operation PutBucketTagging for usage and error information. 5545// API operation PutBucketTagging for usage and error information.
5377// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging 5546// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
5378func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { 5547func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) {
5379 req, out := c.PutBucketTaggingRequest(input) 5548 req, out := c.PutBucketTaggingRequest(input)
5380 return out, req.Send() 5549 return out, req.Send()
@@ -5400,19 +5569,18 @@ const opPutBucketVersioning = "PutBucketVersioning"
5400 5569
5401// PutBucketVersioningRequest generates a "aws/request.Request" representing the 5570// PutBucketVersioningRequest generates a "aws/request.Request" representing the
5402// client's request for the PutBucketVersioning operation. The "output" return 5571// client's request for the PutBucketVersioning operation. The "output" return
5403// value can be used to capture response data after the request's "Send" method 5572// value will be populated with the request's response once the request completes
5404// is called. 5573// successfuly.
5574//
5575// Use "Send" method on the returned Request to send the API call to the service.
5576// the "output" return value is not valid until after Send returns without error.
5405// 5577//
5406// See PutBucketVersioning for usage and error information. 5578// See PutBucketVersioning for more information on using the PutBucketVersioning
5579// API call, and error handling.
5407// 5580//
5408// Creating a request object using this method should be used when you want to inject 5581// This method is useful when you want to inject custom logic or configuration
5409// custom logic into the request's lifecycle using a custom handler, or if you want to 5582// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5410// access properties on the request object before or after sending the request. If
5411// you just want the service response, call the PutBucketVersioning method directly
5412// instead.
5413// 5583//
5414// Note: You must call the "Send" method on the returned request object in order
5415// to execute the request.
5416// 5584//
5417// // Example sending a request using the PutBucketVersioningRequest method. 5585// // Example sending a request using the PutBucketVersioningRequest method.
5418// req, resp := client.PutBucketVersioningRequest(params) 5586// req, resp := client.PutBucketVersioningRequest(params)
@@ -5422,7 +5590,7 @@ const opPutBucketVersioning = "PutBucketVersioning"
5422// fmt.Println(resp) 5590// fmt.Println(resp)
5423// } 5591// }
5424// 5592//
5425// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning 5593// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
5426func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { 5594func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) {
5427 op := &request.Operation{ 5595 op := &request.Operation{
5428 Name: opPutBucketVersioning, 5596 Name: opPutBucketVersioning,
@@ -5452,7 +5620,7 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r
5452// 5620//
5453// See the AWS API reference guide for Amazon Simple Storage Service's 5621// See the AWS API reference guide for Amazon Simple Storage Service's
5454// API operation PutBucketVersioning for usage and error information. 5622// API operation PutBucketVersioning for usage and error information.
5455// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning 5623// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
5456func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { 5624func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) {
5457 req, out := c.PutBucketVersioningRequest(input) 5625 req, out := c.PutBucketVersioningRequest(input)
5458 return out, req.Send() 5626 return out, req.Send()
@@ -5478,19 +5646,18 @@ const opPutBucketWebsite = "PutBucketWebsite"
5478 5646
5479// PutBucketWebsiteRequest generates a "aws/request.Request" representing the 5647// PutBucketWebsiteRequest generates a "aws/request.Request" representing the
5480// client's request for the PutBucketWebsite operation. The "output" return 5648// client's request for the PutBucketWebsite operation. The "output" return
5481// value can be used to capture response data after the request's "Send" method 5649// value will be populated with the request's response once the request completes
5482// is called. 5650// successfuly.
5483// 5651//
5484// See PutBucketWebsite for usage and error information. 5652// Use "Send" method on the returned Request to send the API call to the service.
5653// the "output" return value is not valid until after Send returns without error.
5485// 5654//
5486// Creating a request object using this method should be used when you want to inject 5655// See PutBucketWebsite for more information on using the PutBucketWebsite
5487// custom logic into the request's lifecycle using a custom handler, or if you want to 5656// API call, and error handling.
5488// access properties on the request object before or after sending the request. If 5657//
5489// you just want the service response, call the PutBucketWebsite method directly 5658// This method is useful when you want to inject custom logic or configuration
5490// instead. 5659// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5491// 5660//
5492// Note: You must call the "Send" method on the returned request object in order
5493// to execute the request.
5494// 5661//
5495// // Example sending a request using the PutBucketWebsiteRequest method. 5662// // Example sending a request using the PutBucketWebsiteRequest method.
5496// req, resp := client.PutBucketWebsiteRequest(params) 5663// req, resp := client.PutBucketWebsiteRequest(params)
@@ -5500,7 +5667,7 @@ const opPutBucketWebsite = "PutBucketWebsite"
5500// fmt.Println(resp) 5667// fmt.Println(resp)
5501// } 5668// }
5502// 5669//
5503// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite 5670// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
5504func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { 5671func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) {
5505 op := &request.Operation{ 5672 op := &request.Operation{
5506 Name: opPutBucketWebsite, 5673 Name: opPutBucketWebsite,
@@ -5529,7 +5696,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request
5529// 5696//
5530// See the AWS API reference guide for Amazon Simple Storage Service's 5697// See the AWS API reference guide for Amazon Simple Storage Service's
5531// API operation PutBucketWebsite for usage and error information. 5698// API operation PutBucketWebsite for usage and error information.
5532// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite 5699// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
5533func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { 5700func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) {
5534 req, out := c.PutBucketWebsiteRequest(input) 5701 req, out := c.PutBucketWebsiteRequest(input)
5535 return out, req.Send() 5702 return out, req.Send()
@@ -5555,19 +5722,18 @@ const opPutObject = "PutObject"
5555 5722
5556// PutObjectRequest generates a "aws/request.Request" representing the 5723// PutObjectRequest generates a "aws/request.Request" representing the
5557// client's request for the PutObject operation. The "output" return 5724// client's request for the PutObject operation. The "output" return
5558// value can be used to capture response data after the request's "Send" method 5725// value will be populated with the request's response once the request completes
5559// is called. 5726// successfuly.
5727//
5728// Use "Send" method on the returned Request to send the API call to the service.
5729// the "output" return value is not valid until after Send returns without error.
5560// 5730//
5561// See PutObject for usage and error information. 5731// See PutObject for more information on using the PutObject
5732// API call, and error handling.
5562// 5733//
5563// Creating a request object using this method should be used when you want to inject 5734// This method is useful when you want to inject custom logic or configuration
5564// custom logic into the request's lifecycle using a custom handler, or if you want to 5735// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5565// access properties on the request object before or after sending the request. If
5566// you just want the service response, call the PutObject method directly
5567// instead.
5568// 5736//
5569// Note: You must call the "Send" method on the returned request object in order
5570// to execute the request.
5571// 5737//
5572// // Example sending a request using the PutObjectRequest method. 5738// // Example sending a request using the PutObjectRequest method.
5573// req, resp := client.PutObjectRequest(params) 5739// req, resp := client.PutObjectRequest(params)
@@ -5577,7 +5743,7 @@ const opPutObject = "PutObject"
5577// fmt.Println(resp) 5743// fmt.Println(resp)
5578// } 5744// }
5579// 5745//
5580// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject 5746// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
5581func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { 5747func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) {
5582 op := &request.Operation{ 5748 op := &request.Operation{
5583 Name: opPutObject, 5749 Name: opPutObject,
@@ -5604,7 +5770,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp
5604// 5770//
5605// See the AWS API reference guide for Amazon Simple Storage Service's 5771// See the AWS API reference guide for Amazon Simple Storage Service's
5606// API operation PutObject for usage and error information. 5772// API operation PutObject for usage and error information.
5607// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject 5773// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
5608func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { 5774func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) {
5609 req, out := c.PutObjectRequest(input) 5775 req, out := c.PutObjectRequest(input)
5610 return out, req.Send() 5776 return out, req.Send()
@@ -5630,19 +5796,18 @@ const opPutObjectAcl = "PutObjectAcl"
5630 5796
5631// PutObjectAclRequest generates a "aws/request.Request" representing the 5797// PutObjectAclRequest generates a "aws/request.Request" representing the
5632// client's request for the PutObjectAcl operation. The "output" return 5798// client's request for the PutObjectAcl operation. The "output" return
5633// value can be used to capture response data after the request's "Send" method 5799// value will be populated with the request's response once the request completes
5634// is called. 5800// successfuly.
5635// 5801//
5636// See PutObjectAcl for usage and error information. 5802// Use "Send" method on the returned Request to send the API call to the service.
5803// the "output" return value is not valid until after Send returns without error.
5637// 5804//
5638// Creating a request object using this method should be used when you want to inject 5805// See PutObjectAcl for more information on using the PutObjectAcl
5639// custom logic into the request's lifecycle using a custom handler, or if you want to 5806// API call, and error handling.
5640// access properties on the request object before or after sending the request. If 5807//
5641// you just want the service response, call the PutObjectAcl method directly 5808// This method is useful when you want to inject custom logic or configuration
5642// instead. 5809// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5643// 5810//
5644// Note: You must call the "Send" method on the returned request object in order
5645// to execute the request.
5646// 5811//
5647// // Example sending a request using the PutObjectAclRequest method. 5812// // Example sending a request using the PutObjectAclRequest method.
5648// req, resp := client.PutObjectAclRequest(params) 5813// req, resp := client.PutObjectAclRequest(params)
@@ -5652,7 +5817,7 @@ const opPutObjectAcl = "PutObjectAcl"
5652// fmt.Println(resp) 5817// fmt.Println(resp)
5653// } 5818// }
5654// 5819//
5655// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl 5820// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
5656func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { 5821func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) {
5657 op := &request.Operation{ 5822 op := &request.Operation{
5658 Name: opPutObjectAcl, 5823 Name: opPutObjectAcl,
@@ -5685,7 +5850,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request
5685// * ErrCodeNoSuchKey "NoSuchKey" 5850// * ErrCodeNoSuchKey "NoSuchKey"
5686// The specified key does not exist. 5851// The specified key does not exist.
5687// 5852//
5688// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl 5853// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
5689func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { 5854func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) {
5690 req, out := c.PutObjectAclRequest(input) 5855 req, out := c.PutObjectAclRequest(input)
5691 return out, req.Send() 5856 return out, req.Send()
@@ -5711,19 +5876,18 @@ const opPutObjectTagging = "PutObjectTagging"
5711 5876
5712// PutObjectTaggingRequest generates a "aws/request.Request" representing the 5877// PutObjectTaggingRequest generates a "aws/request.Request" representing the
5713// client's request for the PutObjectTagging operation. The "output" return 5878// client's request for the PutObjectTagging operation. The "output" return
5714// value can be used to capture response data after the request's "Send" method 5879// value will be populated with the request's response once the request completes
5715// is called. 5880// successfuly.
5881//
5882// Use "Send" method on the returned Request to send the API call to the service.
5883// the "output" return value is not valid until after Send returns without error.
5716// 5884//
5717// See PutObjectTagging for usage and error information. 5885// See PutObjectTagging for more information on using the PutObjectTagging
5886// API call, and error handling.
5718// 5887//
5719// Creating a request object using this method should be used when you want to inject 5888// This method is useful when you want to inject custom logic or configuration
5720// custom logic into the request's lifecycle using a custom handler, or if you want to 5889// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5721// access properties on the request object before or after sending the request. If
5722// you just want the service response, call the PutObjectTagging method directly
5723// instead.
5724// 5890//
5725// Note: You must call the "Send" method on the returned request object in order
5726// to execute the request.
5727// 5891//
5728// // Example sending a request using the PutObjectTaggingRequest method. 5892// // Example sending a request using the PutObjectTaggingRequest method.
5729// req, resp := client.PutObjectTaggingRequest(params) 5893// req, resp := client.PutObjectTaggingRequest(params)
@@ -5733,7 +5897,7 @@ const opPutObjectTagging = "PutObjectTagging"
5733// fmt.Println(resp) 5897// fmt.Println(resp)
5734// } 5898// }
5735// 5899//
5736// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging 5900// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
5737func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { 5901func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) {
5738 op := &request.Operation{ 5902 op := &request.Operation{
5739 Name: opPutObjectTagging, 5903 Name: opPutObjectTagging,
@@ -5760,7 +5924,7 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request
5760// 5924//
5761// See the AWS API reference guide for Amazon Simple Storage Service's 5925// See the AWS API reference guide for Amazon Simple Storage Service's
5762// API operation PutObjectTagging for usage and error information. 5926// API operation PutObjectTagging for usage and error information.
5763// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging 5927// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
5764func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { 5928func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) {
5765 req, out := c.PutObjectTaggingRequest(input) 5929 req, out := c.PutObjectTaggingRequest(input)
5766 return out, req.Send() 5930 return out, req.Send()
@@ -5786,19 +5950,18 @@ const opRestoreObject = "RestoreObject"
5786 5950
5787// RestoreObjectRequest generates a "aws/request.Request" representing the 5951// RestoreObjectRequest generates a "aws/request.Request" representing the
5788// client's request for the RestoreObject operation. The "output" return 5952// client's request for the RestoreObject operation. The "output" return
5789// value can be used to capture response data after the request's "Send" method 5953// value will be populated with the request's response once the request completes
5790// is called. 5954// successfuly.
5791// 5955//
5792// See RestoreObject for usage and error information. 5956// Use "Send" method on the returned Request to send the API call to the service.
5957// the "output" return value is not valid until after Send returns without error.
5793// 5958//
5794// Creating a request object using this method should be used when you want to inject 5959// See RestoreObject for more information on using the RestoreObject
5795// custom logic into the request's lifecycle using a custom handler, or if you want to 5960// API call, and error handling.
5796// access properties on the request object before or after sending the request. If 5961//
5797// you just want the service response, call the RestoreObject method directly 5962// This method is useful when you want to inject custom logic or configuration
5798// instead. 5963// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5799// 5964//
5800// Note: You must call the "Send" method on the returned request object in order
5801// to execute the request.
5802// 5965//
5803// // Example sending a request using the RestoreObjectRequest method. 5966// // Example sending a request using the RestoreObjectRequest method.
5804// req, resp := client.RestoreObjectRequest(params) 5967// req, resp := client.RestoreObjectRequest(params)
@@ -5808,7 +5971,7 @@ const opRestoreObject = "RestoreObject"
5808// fmt.Println(resp) 5971// fmt.Println(resp)
5809// } 5972// }
5810// 5973//
5811// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject 5974// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
5812func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { 5975func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) {
5813 op := &request.Operation{ 5976 op := &request.Operation{
5814 Name: opRestoreObject, 5977 Name: opRestoreObject,
@@ -5840,7 +6003,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
5840// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" 6003// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError"
5841// This operation is not allowed against this storage tier 6004// This operation is not allowed against this storage tier
5842// 6005//
5843// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject 6006// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
5844func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { 6007func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
5845 req, out := c.RestoreObjectRequest(input) 6008 req, out := c.RestoreObjectRequest(input)
5846 return out, req.Send() 6009 return out, req.Send()
@@ -5862,23 +6025,104 @@ func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput
5862 return out, req.Send() 6025 return out, req.Send()
5863} 6026}
5864 6027
6028const opSelectObjectContent = "SelectObjectContent"
6029
6030// SelectObjectContentRequest generates a "aws/request.Request" representing the
6031// client's request for the SelectObjectContent operation. The "output" return
6032// value will be populated with the request's response once the request completes
6033// successfuly.
6034//
6035// Use "Send" method on the returned Request to send the API call to the service.
6036// the "output" return value is not valid until after Send returns without error.
6037//
6038// See SelectObjectContent for more information on using the SelectObjectContent
6039// API call, and error handling.
6040//
6041// This method is useful when you want to inject custom logic or configuration
6042// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6043//
6044//
6045// // Example sending a request using the SelectObjectContentRequest method.
6046// req, resp := client.SelectObjectContentRequest(params)
6047//
6048// err := req.Send()
6049// if err == nil { // resp is now filled
6050// fmt.Println(resp)
6051// }
6052//
6053// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent
6054func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) {
6055 op := &request.Operation{
6056 Name: opSelectObjectContent,
6057 HTTPMethod: "POST",
6058 HTTPPath: "/{Bucket}/{Key+}?select&select-type=2",
6059 }
6060
6061 if input == nil {
6062 input = &SelectObjectContentInput{}
6063 }
6064
6065 output = &SelectObjectContentOutput{}
6066 req = c.newRequest(op, input, output)
6067 req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler)
6068 req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler)
6069 req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop)
6070 return
6071}
6072
6073// SelectObjectContent API operation for Amazon Simple Storage Service.
6074//
6075// This operation filters the contents of an Amazon S3 object based on a simple
6076// Structured Query Language (SQL) statement. In the request, along with the
6077// SQL expression, you must also specify a data serialization format (JSON or
6078// CSV) of the object. Amazon S3 uses this to parse object data into records,
6079// and returns only records that match the specified SQL expression. You must
6080// also specify the data serialization format for the response.
6081//
6082// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6083// with awserr.Error's Code and Message methods to get detailed information about
6084// the error.
6085//
6086// See the AWS API reference guide for Amazon Simple Storage Service's
6087// API operation SelectObjectContent for usage and error information.
6088// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent
6089func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) {
6090 req, out := c.SelectObjectContentRequest(input)
6091 return out, req.Send()
6092}
6093
6094// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of
6095// the ability to pass a context and additional request options.
6096//
6097// See SelectObjectContent for details on how to use this API operation.
6098//
6099// The context must be non-nil and will be used for request cancellation. If
6100// the context is nil a panic will occur. In the future the SDK may create
6101// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6102// for more information on using Contexts.
6103func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) {
6104 req, out := c.SelectObjectContentRequest(input)
6105 req.SetContext(ctx)
6106 req.ApplyOptions(opts...)
6107 return out, req.Send()
6108}
6109
5865const opUploadPart = "UploadPart" 6110const opUploadPart = "UploadPart"
5866 6111
5867// UploadPartRequest generates a "aws/request.Request" representing the 6112// UploadPartRequest generates a "aws/request.Request" representing the
5868// client's request for the UploadPart operation. The "output" return 6113// client's request for the UploadPart operation. The "output" return
5869// value can be used to capture response data after the request's "Send" method 6114// value will be populated with the request's response once the request completes
5870// is called. 6115// successfuly.
5871// 6116//
5872// See UploadPart for usage and error information. 6117// Use "Send" method on the returned Request to send the API call to the service.
6118// the "output" return value is not valid until after Send returns without error.
5873// 6119//
5874// Creating a request object using this method should be used when you want to inject 6120// See UploadPart for more information on using the UploadPart
5875// custom logic into the request's lifecycle using a custom handler, or if you want to 6121// API call, and error handling.
5876// access properties on the request object before or after sending the request. If 6122//
5877// you just want the service response, call the UploadPart method directly 6123// This method is useful when you want to inject custom logic or configuration
5878// instead. 6124// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5879// 6125//
5880// Note: You must call the "Send" method on the returned request object in order
5881// to execute the request.
5882// 6126//
5883// // Example sending a request using the UploadPartRequest method. 6127// // Example sending a request using the UploadPartRequest method.
5884// req, resp := client.UploadPartRequest(params) 6128// req, resp := client.UploadPartRequest(params)
@@ -5888,7 +6132,7 @@ const opUploadPart = "UploadPart"
5888// fmt.Println(resp) 6132// fmt.Println(resp)
5889// } 6133// }
5890// 6134//
5891// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart 6135// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
5892func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { 6136func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) {
5893 op := &request.Operation{ 6137 op := &request.Operation{
5894 Name: opUploadPart, 6138 Name: opUploadPart,
@@ -5921,7 +6165,7 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
5921// 6165//
5922// See the AWS API reference guide for Amazon Simple Storage Service's 6166// See the AWS API reference guide for Amazon Simple Storage Service's
5923// API operation UploadPart for usage and error information. 6167// API operation UploadPart for usage and error information.
5924// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart 6168// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
5925func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { 6169func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) {
5926 req, out := c.UploadPartRequest(input) 6170 req, out := c.UploadPartRequest(input)
5927 return out, req.Send() 6171 return out, req.Send()
@@ -5947,19 +6191,18 @@ const opUploadPartCopy = "UploadPartCopy"
5947 6191
5948// UploadPartCopyRequest generates a "aws/request.Request" representing the 6192// UploadPartCopyRequest generates a "aws/request.Request" representing the
5949// client's request for the UploadPartCopy operation. The "output" return 6193// client's request for the UploadPartCopy operation. The "output" return
5950// value can be used to capture response data after the request's "Send" method 6194// value will be populated with the request's response once the request completes
5951// is called. 6195// successfuly.
6196//
6197// Use "Send" method on the returned Request to send the API call to the service.
6198// the "output" return value is not valid until after Send returns without error.
5952// 6199//
5953// See UploadPartCopy for usage and error information. 6200// See UploadPartCopy for more information on using the UploadPartCopy
6201// API call, and error handling.
5954// 6202//
5955// Creating a request object using this method should be used when you want to inject 6203// This method is useful when you want to inject custom logic or configuration
5956// custom logic into the request's lifecycle using a custom handler, or if you want to 6204// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5957// access properties on the request object before or after sending the request. If
5958// you just want the service response, call the UploadPartCopy method directly
5959// instead.
5960// 6205//
5961// Note: You must call the "Send" method on the returned request object in order
5962// to execute the request.
5963// 6206//
5964// // Example sending a request using the UploadPartCopyRequest method. 6207// // Example sending a request using the UploadPartCopyRequest method.
5965// req, resp := client.UploadPartCopyRequest(params) 6208// req, resp := client.UploadPartCopyRequest(params)
@@ -5969,7 +6212,7 @@ const opUploadPartCopy = "UploadPartCopy"
5969// fmt.Println(resp) 6212// fmt.Println(resp)
5970// } 6213// }
5971// 6214//
5972// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy 6215// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
5973func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { 6216func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) {
5974 op := &request.Operation{ 6217 op := &request.Operation{
5975 Name: opUploadPartCopy, 6218 Name: opUploadPartCopy,
@@ -5996,7 +6239,7 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req
5996// 6239//
5997// See the AWS API reference guide for Amazon Simple Storage Service's 6240// See the AWS API reference guide for Amazon Simple Storage Service's
5998// API operation UploadPartCopy for usage and error information. 6241// API operation UploadPartCopy for usage and error information.
5999// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy 6242// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
6000func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { 6243func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) {
6001 req, out := c.UploadPartCopyRequest(input) 6244 req, out := c.UploadPartCopyRequest(input)
6002 return out, req.Send() 6245 return out, req.Send()
@@ -6020,7 +6263,6 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp
6020 6263
6021// Specifies the days since the initiation of an Incomplete Multipart Upload 6264// Specifies the days since the initiation of an Incomplete Multipart Upload
6022// that Lifecycle will wait before permanently removing all parts of the upload. 6265// that Lifecycle will wait before permanently removing all parts of the upload.
6023// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload
6024type AbortIncompleteMultipartUpload struct { 6266type AbortIncompleteMultipartUpload struct {
6025 _ struct{} `type:"structure"` 6267 _ struct{} `type:"structure"`
6026 6268
@@ -6045,7 +6287,6 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI
6045 return s 6287 return s
6046} 6288}
6047 6289
6048// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest
6049type AbortMultipartUploadInput struct { 6290type AbortMultipartUploadInput struct {
6050 _ struct{} `type:"structure"` 6291 _ struct{} `type:"structure"`
6051 6292
@@ -6103,6 +6344,13 @@ func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInp
6103 return s 6344 return s
6104} 6345}
6105 6346
6347func (s *AbortMultipartUploadInput) getBucket() (v string) {
6348 if s.Bucket == nil {
6349 return v
6350 }
6351 return *s.Bucket
6352}
6353
6106// SetKey sets the Key field's value. 6354// SetKey sets the Key field's value.
6107func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { 6355func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput {
6108 s.Key = &v 6356 s.Key = &v
@@ -6121,7 +6369,6 @@ func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadI
6121 return s 6369 return s
6122} 6370}
6123 6371
6124// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput
6125type AbortMultipartUploadOutput struct { 6372type AbortMultipartUploadOutput struct {
6126 _ struct{} `type:"structure"` 6373 _ struct{} `type:"structure"`
6127 6374
@@ -6146,7 +6393,6 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart
6146 return s 6393 return s
6147} 6394}
6148 6395
6149// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration
6150type AccelerateConfiguration struct { 6396type AccelerateConfiguration struct {
6151 _ struct{} `type:"structure"` 6397 _ struct{} `type:"structure"`
6152 6398
@@ -6170,7 +6416,6 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration {
6170 return s 6416 return s
6171} 6417}
6172 6418
6173// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy
6174type AccessControlPolicy struct { 6419type AccessControlPolicy struct {
6175 _ struct{} `type:"structure"` 6420 _ struct{} `type:"structure"`
6176 6421
@@ -6222,7 +6467,45 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy {
6222 return s 6467 return s
6223} 6468}
6224 6469
6225// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator 6470// Container for information regarding the access control for replicas.
6471type AccessControlTranslation struct {
6472 _ struct{} `type:"structure"`
6473
6474 // The override value for the owner of the replica object.
6475 //
6476 // Owner is a required field
6477 Owner *string `type:"string" required:"true" enum:"OwnerOverride"`
6478}
6479
6480// String returns the string representation
6481func (s AccessControlTranslation) String() string {
6482 return awsutil.Prettify(s)
6483}
6484
6485// GoString returns the string representation
6486func (s AccessControlTranslation) GoString() string {
6487 return s.String()
6488}
6489
6490// Validate inspects the fields of the type to determine if they are valid.
6491func (s *AccessControlTranslation) Validate() error {
6492 invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"}
6493 if s.Owner == nil {
6494 invalidParams.Add(request.NewErrParamRequired("Owner"))
6495 }
6496
6497 if invalidParams.Len() > 0 {
6498 return invalidParams
6499 }
6500 return nil
6501}
6502
6503// SetOwner sets the Owner field's value.
6504func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation {
6505 s.Owner = &v
6506 return s
6507}
6508
6226type AnalyticsAndOperator struct { 6509type AnalyticsAndOperator struct {
6227 _ struct{} `type:"structure"` 6510 _ struct{} `type:"structure"`
6228 6511
@@ -6275,7 +6558,6 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator {
6275 return s 6558 return s
6276} 6559}
6277 6560
6278// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration
6279type AnalyticsConfiguration struct { 6561type AnalyticsConfiguration struct {
6280 _ struct{} `type:"structure"` 6562 _ struct{} `type:"structure"`
6281 6563
@@ -6350,7 +6632,6 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis
6350 return s 6632 return s
6351} 6633}
6352 6634
6353// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination
6354type AnalyticsExportDestination struct { 6635type AnalyticsExportDestination struct {
6355 _ struct{} `type:"structure"` 6636 _ struct{} `type:"structure"`
6356 6637
@@ -6394,7 +6675,6 @@ func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3Bucket
6394 return s 6675 return s
6395} 6676}
6396 6677
6397// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter
6398type AnalyticsFilter struct { 6678type AnalyticsFilter struct {
6399 _ struct{} `type:"structure"` 6679 _ struct{} `type:"structure"`
6400 6680
@@ -6457,7 +6737,6 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter {
6457 return s 6737 return s
6458} 6738}
6459 6739
6460// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination
6461type AnalyticsS3BucketDestination struct { 6740type AnalyticsS3BucketDestination struct {
6462 _ struct{} `type:"structure"` 6741 _ struct{} `type:"structure"`
6463 6742
@@ -6512,6 +6791,13 @@ func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDes
6512 return s 6791 return s
6513} 6792}
6514 6793
6794func (s *AnalyticsS3BucketDestination) getBucket() (v string) {
6795 if s.Bucket == nil {
6796 return v
6797 }
6798 return *s.Bucket
6799}
6800
6515// SetBucketAccountId sets the BucketAccountId field's value. 6801// SetBucketAccountId sets the BucketAccountId field's value.
6516func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { 6802func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination {
6517 s.BucketAccountId = &v 6803 s.BucketAccountId = &v
@@ -6530,12 +6816,11 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes
6530 return s 6816 return s
6531} 6817}
6532 6818
6533// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket
6534type Bucket struct { 6819type Bucket struct {
6535 _ struct{} `type:"structure"` 6820 _ struct{} `type:"structure"`
6536 6821
6537 // Date the bucket was created. 6822 // Date the bucket was created.
6538 CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` 6823 CreationDate *time.Time `type:"timestamp"`
6539 6824
6540 // The name of the bucket. 6825 // The name of the bucket.
6541 Name *string `type:"string"` 6826 Name *string `type:"string"`
@@ -6563,7 +6848,6 @@ func (s *Bucket) SetName(v string) *Bucket {
6563 return s 6848 return s
6564} 6849}
6565 6850
6566// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration
6567type BucketLifecycleConfiguration struct { 6851type BucketLifecycleConfiguration struct {
6568 _ struct{} `type:"structure"` 6852 _ struct{} `type:"structure"`
6569 6853
@@ -6610,10 +6894,12 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec
6610 return s 6894 return s
6611} 6895}
6612 6896
6613// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus
6614type BucketLoggingStatus struct { 6897type BucketLoggingStatus struct {
6615 _ struct{} `type:"structure"` 6898 _ struct{} `type:"structure"`
6616 6899
6900 // Container for logging information. Presence of this element indicates that
6901 // logging is enabled. Parameters TargetBucket and TargetPrefix are required
6902 // in this case.
6617 LoggingEnabled *LoggingEnabled `type:"structure"` 6903 LoggingEnabled *LoggingEnabled `type:"structure"`
6618} 6904}
6619 6905
@@ -6648,7 +6934,6 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin
6648 return s 6934 return s
6649} 6935}
6650 6936
6651// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration
6652type CORSConfiguration struct { 6937type CORSConfiguration struct {
6653 _ struct{} `type:"structure"` 6938 _ struct{} `type:"structure"`
6654 6939
@@ -6695,7 +6980,6 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration {
6695 return s 6980 return s
6696} 6981}
6697 6982
6698// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule
6699type CORSRule struct { 6983type CORSRule struct {
6700 _ struct{} `type:"structure"` 6984 _ struct{} `type:"structure"`
6701 6985
@@ -6779,7 +7063,149 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule {
6779 return s 7063 return s
6780} 7064}
6781 7065
6782// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration 7066// Describes how a CSV-formatted input object is formatted.
7067type CSVInput struct {
7068 _ struct{} `type:"structure"`
7069
7070 // Specifies that CSV field values may contain quoted record delimiters and
7071 // such records should be allowed. Default value is FALSE. Setting this value
7072 // to TRUE may lower performance.
7073 AllowQuotedRecordDelimiter *bool `type:"boolean"`
7074
7075 // Single character used to indicate a row should be ignored when present at
7076 // the start of a row.
7077 Comments *string `type:"string"`
7078
7079 // Value used to separate individual fields in a record.
7080 FieldDelimiter *string `type:"string"`
7081
7082 // Describes the first line of input. Valid values: None, Ignore, Use.
7083 FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"`
7084
7085 // Value used for escaping where the field delimiter is part of the value.
7086 QuoteCharacter *string `type:"string"`
7087
7088 // Single character used for escaping the quote character inside an already
7089 // escaped value.
7090 QuoteEscapeCharacter *string `type:"string"`
7091
7092 // Value used to separate individual records.
7093 RecordDelimiter *string `type:"string"`
7094}
7095
7096// String returns the string representation
7097func (s CSVInput) String() string {
7098 return awsutil.Prettify(s)
7099}
7100
7101// GoString returns the string representation
7102func (s CSVInput) GoString() string {
7103 return s.String()
7104}
7105
7106// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value.
7107func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput {
7108 s.AllowQuotedRecordDelimiter = &v
7109 return s
7110}
7111
7112// SetComments sets the Comments field's value.
7113func (s *CSVInput) SetComments(v string) *CSVInput {
7114 s.Comments = &v
7115 return s
7116}
7117
7118// SetFieldDelimiter sets the FieldDelimiter field's value.
7119func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput {
7120 s.FieldDelimiter = &v
7121 return s
7122}
7123
7124// SetFileHeaderInfo sets the FileHeaderInfo field's value.
7125func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput {
7126 s.FileHeaderInfo = &v
7127 return s
7128}
7129
7130// SetQuoteCharacter sets the QuoteCharacter field's value.
7131func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput {
7132 s.QuoteCharacter = &v
7133 return s
7134}
7135
7136// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value.
7137func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput {
7138 s.QuoteEscapeCharacter = &v
7139 return s
7140}
7141
7142// SetRecordDelimiter sets the RecordDelimiter field's value.
7143func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput {
7144 s.RecordDelimiter = &v
7145 return s
7146}
7147
7148// Describes how CSV-formatted results are formatted.
7149type CSVOutput struct {
7150 _ struct{} `type:"structure"`
7151
7152 // Value used to separate individual fields in a record.
7153 FieldDelimiter *string `type:"string"`
7154
7155 // Value used for escaping where the field delimiter is part of the value.
7156 QuoteCharacter *string `type:"string"`
7157
7158 // Single character used for escaping the quote character inside an already
7159 // escaped value.
7160 QuoteEscapeCharacter *string `type:"string"`
7161
7162 // Indicates whether or not all output fields should be quoted.
7163 QuoteFields *string `type:"string" enum:"QuoteFields"`
7164
7165 // Value used to separate individual records.
7166 RecordDelimiter *string `type:"string"`
7167}
7168
7169// String returns the string representation
7170func (s CSVOutput) String() string {
7171 return awsutil.Prettify(s)
7172}
7173
7174// GoString returns the string representation
7175func (s CSVOutput) GoString() string {
7176 return s.String()
7177}
7178
7179// SetFieldDelimiter sets the FieldDelimiter field's value.
7180func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput {
7181 s.FieldDelimiter = &v
7182 return s
7183}
7184
7185// SetQuoteCharacter sets the QuoteCharacter field's value.
7186func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput {
7187 s.QuoteCharacter = &v
7188 return s
7189}
7190
7191// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value.
7192func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput {
7193 s.QuoteEscapeCharacter = &v
7194 return s
7195}
7196
7197// SetQuoteFields sets the QuoteFields field's value.
7198func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput {
7199 s.QuoteFields = &v
7200 return s
7201}
7202
7203// SetRecordDelimiter sets the RecordDelimiter field's value.
7204func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput {
7205 s.RecordDelimiter = &v
7206 return s
7207}
7208
6783type CloudFunctionConfiguration struct { 7209type CloudFunctionConfiguration struct {
6784 _ struct{} `type:"structure"` 7210 _ struct{} `type:"structure"`
6785 7211
@@ -6837,7 +7263,6 @@ func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionC
6837 return s 7263 return s
6838} 7264}
6839 7265
6840// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix
6841type CommonPrefix struct { 7266type CommonPrefix struct {
6842 _ struct{} `type:"structure"` 7267 _ struct{} `type:"structure"`
6843 7268
@@ -6860,7 +7285,6 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix {
6860 return s 7285 return s
6861} 7286}
6862 7287
6863// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest
6864type CompleteMultipartUploadInput struct { 7288type CompleteMultipartUploadInput struct {
6865 _ struct{} `type:"structure" payload:"MultipartUpload"` 7289 _ struct{} `type:"structure" payload:"MultipartUpload"`
6866 7290
@@ -6870,7 +7294,7 @@ type CompleteMultipartUploadInput struct {
6870 // Key is a required field 7294 // Key is a required field
6871 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` 7295 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
6872 7296
6873 MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` 7297 MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
6874 7298
6875 // Confirms that the requester knows that she or he will be charged for the 7299 // Confirms that the requester knows that she or he will be charged for the
6876 // request. Bucket owners need not specify this parameter in their requests. 7300 // request. Bucket owners need not specify this parameter in their requests.
@@ -6920,6 +7344,13 @@ func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUpl
6920 return s 7344 return s
6921} 7345}
6922 7346
7347func (s *CompleteMultipartUploadInput) getBucket() (v string) {
7348 if s.Bucket == nil {
7349 return v
7350 }
7351 return *s.Bucket
7352}
7353
6923// SetKey sets the Key field's value. 7354// SetKey sets the Key field's value.
6924func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { 7355func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput {
6925 s.Key = &v 7356 s.Key = &v
@@ -6944,7 +7375,6 @@ func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartU
6944 return s 7375 return s
6945} 7376}
6946 7377
6947// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput
6948type CompleteMultipartUploadOutput struct { 7378type CompleteMultipartUploadOutput struct {
6949 _ struct{} `type:"structure"` 7379 _ struct{} `type:"structure"`
6950 7380
@@ -6993,6 +7423,13 @@ func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUp
6993 return s 7423 return s
6994} 7424}
6995 7425
7426func (s *CompleteMultipartUploadOutput) getBucket() (v string) {
7427 if s.Bucket == nil {
7428 return v
7429 }
7430 return *s.Bucket
7431}
7432
6996// SetETag sets the ETag field's value. 7433// SetETag sets the ETag field's value.
6997func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { 7434func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput {
6998 s.ETag = &v 7435 s.ETag = &v
@@ -7041,7 +7478,6 @@ func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipar
7041 return s 7478 return s
7042} 7479}
7043 7480
7044// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload
7045type CompletedMultipartUpload struct { 7481type CompletedMultipartUpload struct {
7046 _ struct{} `type:"structure"` 7482 _ struct{} `type:"structure"`
7047 7483
@@ -7064,7 +7500,6 @@ func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultip
7064 return s 7500 return s
7065} 7501}
7066 7502
7067// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart
7068type CompletedPart struct { 7503type CompletedPart struct {
7069 _ struct{} `type:"structure"` 7504 _ struct{} `type:"structure"`
7070 7505
@@ -7098,7 +7533,6 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart {
7098 return s 7533 return s
7099} 7534}
7100 7535
7101// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition
7102type Condition struct { 7536type Condition struct {
7103 _ struct{} `type:"structure"` 7537 _ struct{} `type:"structure"`
7104 7538
@@ -7141,7 +7575,32 @@ func (s *Condition) SetKeyPrefixEquals(v string) *Condition {
7141 return s 7575 return s
7142} 7576}
7143 7577
7144// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest 7578type ContinuationEvent struct {
7579 _ struct{} `locationName:"ContinuationEvent" type:"structure"`
7580}
7581
7582// String returns the string representation
7583func (s ContinuationEvent) String() string {
7584 return awsutil.Prettify(s)
7585}
7586
7587// GoString returns the string representation
7588func (s ContinuationEvent) GoString() string {
7589 return s.String()
7590}
7591
7592// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events.
7593func (s *ContinuationEvent) eventSelectObjectContentEventStream() {}
7594
7595// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value.
7596// This method is only used internally within the SDK's EventStream handling.
7597func (s *ContinuationEvent) UnmarshalEvent(
7598 payloadUnmarshaler protocol.PayloadUnmarshaler,
7599 msg eventstream.Message,
7600) error {
7601 return nil
7602}
7603
7145type CopyObjectInput struct { 7604type CopyObjectInput struct {
7146 _ struct{} `type:"structure"` 7605 _ struct{} `type:"structure"`
7147 7606
@@ -7178,14 +7637,14 @@ type CopyObjectInput struct {
7178 CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` 7637 CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
7179 7638
7180 // Copies the object if it has been modified since the specified time. 7639 // Copies the object if it has been modified since the specified time.
7181 CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` 7640 CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
7182 7641
7183 // Copies the object if its entity tag (ETag) is different than the specified 7642 // Copies the object if its entity tag (ETag) is different than the specified
7184 // ETag. 7643 // ETag.
7185 CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` 7644 CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
7186 7645
7187 // Copies the object if it hasn't been modified since the specified time. 7646 // Copies the object if it hasn't been modified since the specified time.
7188 CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` 7647 CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
7189 7648
7190 // Specifies the algorithm to use when decrypting the source object (e.g., AES256). 7649 // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
7191 CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` 7650 CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
@@ -7201,7 +7660,7 @@ type CopyObjectInput struct {
7201 CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` 7660 CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
7202 7661
7203 // The date and time at which the object is no longer cacheable. 7662 // The date and time at which the object is no longer cacheable.
7204 Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` 7663 Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
7205 7664
7206 // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. 7665 // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
7207 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` 7666 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
@@ -7318,6 +7777,13 @@ func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput {
7318 return s 7777 return s
7319} 7778}
7320 7779
7780func (s *CopyObjectInput) getBucket() (v string) {
7781 if s.Bucket == nil {
7782 return v
7783 }
7784 return *s.Bucket
7785}
7786
7321// SetCacheControl sets the CacheControl field's value. 7787// SetCacheControl sets the CacheControl field's value.
7322func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { 7788func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput {
7323 s.CacheControl = &v 7789 s.CacheControl = &v
@@ -7390,6 +7856,13 @@ func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput
7390 return s 7856 return s
7391} 7857}
7392 7858
7859func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) {
7860 if s.CopySourceSSECustomerKey == nil {
7861 return v
7862 }
7863 return *s.CopySourceSSECustomerKey
7864}
7865
7393// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. 7866// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
7394func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { 7867func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput {
7395 s.CopySourceSSECustomerKeyMD5 = &v 7868 s.CopySourceSSECustomerKeyMD5 = &v
@@ -7462,6 +7935,13 @@ func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput {
7462 return s 7935 return s
7463} 7936}
7464 7937
7938func (s *CopyObjectInput) getSSECustomerKey() (v string) {
7939 if s.SSECustomerKey == nil {
7940 return v
7941 }
7942 return *s.SSECustomerKey
7943}
7944
7465// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. 7945// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
7466func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { 7946func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput {
7467 s.SSECustomerKeyMD5 = &v 7947 s.SSECustomerKeyMD5 = &v
@@ -7504,7 +7984,6 @@ func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput
7504 return s 7984 return s
7505} 7985}
7506 7986
7507// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput
7508type CopyObjectOutput struct { 7987type CopyObjectOutput struct {
7509 _ struct{} `type:"structure" payload:"CopyObjectResult"` 7988 _ struct{} `type:"structure" payload:"CopyObjectResult"`
7510 7989
@@ -7605,13 +8084,12 @@ func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput {
7605 return s 8084 return s
7606} 8085}
7607 8086
7608// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult
7609type CopyObjectResult struct { 8087type CopyObjectResult struct {
7610 _ struct{} `type:"structure"` 8088 _ struct{} `type:"structure"`
7611 8089
7612 ETag *string `type:"string"` 8090 ETag *string `type:"string"`
7613 8091
7614 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` 8092 LastModified *time.Time `type:"timestamp"`
7615} 8093}
7616 8094
7617// String returns the string representation 8095// String returns the string representation
@@ -7636,7 +8114,6 @@ func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult {
7636 return s 8114 return s
7637} 8115}
7638 8116
7639// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult
7640type CopyPartResult struct { 8117type CopyPartResult struct {
7641 _ struct{} `type:"structure"` 8118 _ struct{} `type:"structure"`
7642 8119
@@ -7644,7 +8121,7 @@ type CopyPartResult struct {
7644 ETag *string `type:"string"` 8121 ETag *string `type:"string"`
7645 8122
7646 // Date and time at which the object was uploaded. 8123 // Date and time at which the object was uploaded.
7647 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` 8124 LastModified *time.Time `type:"timestamp"`
7648} 8125}
7649 8126
7650// String returns the string representation 8127// String returns the string representation
@@ -7669,7 +8146,6 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult {
7669 return s 8146 return s
7670} 8147}
7671 8148
7672// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration
7673type CreateBucketConfiguration struct { 8149type CreateBucketConfiguration struct {
7674 _ struct{} `type:"structure"` 8150 _ struct{} `type:"structure"`
7675 8151
@@ -7694,7 +8170,6 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke
7694 return s 8170 return s
7695} 8171}
7696 8172
7697// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest
7698type CreateBucketInput struct { 8173type CreateBucketInput struct {
7699 _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` 8174 _ struct{} `type:"structure" payload:"CreateBucketConfiguration"`
7700 8175
@@ -7704,7 +8179,7 @@ type CreateBucketInput struct {
7704 // Bucket is a required field 8179 // Bucket is a required field
7705 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 8180 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
7706 8181
7707 CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` 8182 CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
7708 8183
7709 // Allows grantee the read, write, read ACP, and write ACP permissions on the 8184 // Allows grantee the read, write, read ACP, and write ACP permissions on the
7710 // bucket. 8185 // bucket.
@@ -7758,6 +8233,13 @@ func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput {
7758 return s 8233 return s
7759} 8234}
7760 8235
8236func (s *CreateBucketInput) getBucket() (v string) {
8237 if s.Bucket == nil {
8238 return v
8239 }
8240 return *s.Bucket
8241}
8242
7761// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. 8243// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value.
7762func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { 8244func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput {
7763 s.CreateBucketConfiguration = v 8245 s.CreateBucketConfiguration = v
@@ -7794,7 +8276,6 @@ func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput {
7794 return s 8276 return s
7795} 8277}
7796 8278
7797// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput
7798type CreateBucketOutput struct { 8279type CreateBucketOutput struct {
7799 _ struct{} `type:"structure"` 8280 _ struct{} `type:"structure"`
7800 8281
@@ -7817,7 +8298,6 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput {
7817 return s 8298 return s
7818} 8299}
7819 8300
7820// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest
7821type CreateMultipartUploadInput struct { 8301type CreateMultipartUploadInput struct {
7822 _ struct{} `type:"structure"` 8302 _ struct{} `type:"structure"`
7823 8303
@@ -7845,7 +8325,7 @@ type CreateMultipartUploadInput struct {
7845 ContentType *string `location:"header" locationName:"Content-Type" type:"string"` 8325 ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
7846 8326
7847 // The date and time at which the object is no longer cacheable. 8327 // The date and time at which the object is no longer cacheable.
7848 Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` 8328 Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
7849 8329
7850 // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. 8330 // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
7851 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` 8331 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
@@ -7899,6 +8379,9 @@ type CreateMultipartUploadInput struct {
7899 // The type of storage to use for the object. Defaults to 'STANDARD'. 8379 // The type of storage to use for the object. Defaults to 'STANDARD'.
7900 StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` 8380 StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
7901 8381
8382 // The tag-set for the object. The tag-set must be encoded as URL Query parameters
8383 Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
8384
7902 // If the bucket is configured as a website, redirects requests for this object 8385 // If the bucket is configured as a website, redirects requests for this object
7903 // to another object in the same bucket or to an external URL. Amazon S3 stores 8386 // to another object in the same bucket or to an external URL. Amazon S3 stores
7904 // the value of this header in the object metadata. 8387 // the value of this header in the object metadata.
@@ -7946,6 +8429,13 @@ func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadI
7946 return s 8429 return s
7947} 8430}
7948 8431
8432func (s *CreateMultipartUploadInput) getBucket() (v string) {
8433 if s.Bucket == nil {
8434 return v
8435 }
8436 return *s.Bucket
8437}
8438
7949// SetCacheControl sets the CacheControl field's value. 8439// SetCacheControl sets the CacheControl field's value.
7950func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { 8440func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput {
7951 s.CacheControl = &v 8441 s.CacheControl = &v
@@ -8036,6 +8526,13 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipar
8036 return s 8526 return s
8037} 8527}
8038 8528
8529func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) {
8530 if s.SSECustomerKey == nil {
8531 return v
8532 }
8533 return *s.SSECustomerKey
8534}
8535
8039// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. 8536// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
8040func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { 8537func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput {
8041 s.SSECustomerKeyMD5 = &v 8538 s.SSECustomerKeyMD5 = &v
@@ -8060,18 +8557,23 @@ func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartU
8060 return s 8557 return s
8061} 8558}
8062 8559
8560// SetTagging sets the Tagging field's value.
8561func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput {
8562 s.Tagging = &v
8563 return s
8564}
8565
8063// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. 8566// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
8064func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { 8567func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput {
8065 s.WebsiteRedirectLocation = &v 8568 s.WebsiteRedirectLocation = &v
8066 return s 8569 return s
8067} 8570}
8068 8571
8069// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput
8070type CreateMultipartUploadOutput struct { 8572type CreateMultipartUploadOutput struct {
8071 _ struct{} `type:"structure"` 8573 _ struct{} `type:"structure"`
8072 8574
8073 // Date when multipart upload will become eligible for abort operation by lifecycle. 8575 // Date when multipart upload will become eligible for abort operation by lifecycle.
8074 AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` 8576 AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
8075 8577
8076 // Id of the lifecycle rule that makes a multipart upload eligible for abort 8578 // Id of the lifecycle rule that makes a multipart upload eligible for abort
8077 // operation. 8579 // operation.
@@ -8137,6 +8639,13 @@ func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUpload
8137 return s 8639 return s
8138} 8640}
8139 8641
8642func (s *CreateMultipartUploadOutput) getBucket() (v string) {
8643 if s.Bucket == nil {
8644 return v
8645 }
8646 return *s.Bucket
8647}
8648
8140// SetKey sets the Key field's value. 8649// SetKey sets the Key field's value.
8141func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { 8650func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput {
8142 s.Key = &v 8651 s.Key = &v
@@ -8179,7 +8688,6 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo
8179 return s 8688 return s
8180} 8689}
8181 8690
8182// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete
8183type Delete struct { 8691type Delete struct {
8184 _ struct{} `type:"structure"` 8692 _ struct{} `type:"structure"`
8185 8693
@@ -8236,7 +8744,6 @@ func (s *Delete) SetQuiet(v bool) *Delete {
8236 return s 8744 return s
8237} 8745}
8238 8746
8239// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest
8240type DeleteBucketAnalyticsConfigurationInput struct { 8747type DeleteBucketAnalyticsConfigurationInput struct {
8241 _ struct{} `type:"structure"` 8748 _ struct{} `type:"structure"`
8242 8749
@@ -8283,13 +8790,19 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBuc
8283 return s 8790 return s
8284} 8791}
8285 8792
8793func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) {
8794 if s.Bucket == nil {
8795 return v
8796 }
8797 return *s.Bucket
8798}
8799
8286// SetId sets the Id field's value. 8800// SetId sets the Id field's value.
8287func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { 8801func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput {
8288 s.Id = &v 8802 s.Id = &v
8289 return s 8803 return s
8290} 8804}
8291 8805
8292// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput
8293type DeleteBucketAnalyticsConfigurationOutput struct { 8806type DeleteBucketAnalyticsConfigurationOutput struct {
8294 _ struct{} `type:"structure"` 8807 _ struct{} `type:"structure"`
8295} 8808}
@@ -8304,7 +8817,6 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string {
8304 return s.String() 8817 return s.String()
8305} 8818}
8306 8819
8307// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest
8308type DeleteBucketCorsInput struct { 8820type DeleteBucketCorsInput struct {
8309 _ struct{} `type:"structure"` 8821 _ struct{} `type:"structure"`
8310 8822
@@ -8341,7 +8853,13 @@ func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput {
8341 return s 8853 return s
8342} 8854}
8343 8855
8344// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput 8856func (s *DeleteBucketCorsInput) getBucket() (v string) {
8857 if s.Bucket == nil {
8858 return v
8859 }
8860 return *s.Bucket
8861}
8862
8345type DeleteBucketCorsOutput struct { 8863type DeleteBucketCorsOutput struct {
8346 _ struct{} `type:"structure"` 8864 _ struct{} `type:"structure"`
8347} 8865}
@@ -8356,7 +8874,66 @@ func (s DeleteBucketCorsOutput) GoString() string {
8356 return s.String() 8874 return s.String()
8357} 8875}
8358 8876
8359// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest 8877type DeleteBucketEncryptionInput struct {
8878 _ struct{} `type:"structure"`
8879
8880 // The name of the bucket containing the server-side encryption configuration
8881 // to delete.
8882 //
8883 // Bucket is a required field
8884 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8885}
8886
8887// String returns the string representation
8888func (s DeleteBucketEncryptionInput) String() string {
8889 return awsutil.Prettify(s)
8890}
8891
8892// GoString returns the string representation
8893func (s DeleteBucketEncryptionInput) GoString() string {
8894 return s.String()
8895}
8896
8897// Validate inspects the fields of the type to determine if they are valid.
8898func (s *DeleteBucketEncryptionInput) Validate() error {
8899 invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"}
8900 if s.Bucket == nil {
8901 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8902 }
8903
8904 if invalidParams.Len() > 0 {
8905 return invalidParams
8906 }
8907 return nil
8908}
8909
8910// SetBucket sets the Bucket field's value.
8911func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput {
8912 s.Bucket = &v
8913 return s
8914}
8915
8916func (s *DeleteBucketEncryptionInput) getBucket() (v string) {
8917 if s.Bucket == nil {
8918 return v
8919 }
8920 return *s.Bucket
8921}
8922
8923type DeleteBucketEncryptionOutput struct {
8924 _ struct{} `type:"structure"`
8925}
8926
8927// String returns the string representation
8928func (s DeleteBucketEncryptionOutput) String() string {
8929 return awsutil.Prettify(s)
8930}
8931
8932// GoString returns the string representation
8933func (s DeleteBucketEncryptionOutput) GoString() string {
8934 return s.String()
8935}
8936
8360type DeleteBucketInput struct { 8937type DeleteBucketInput struct {
8361 _ struct{} `type:"structure"` 8938 _ struct{} `type:"structure"`
8362 8939
@@ -8393,7 +8970,13 @@ func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput {
8393 return s 8970 return s
8394} 8971}
8395 8972
8396// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest 8973func (s *DeleteBucketInput) getBucket() (v string) {
8974 if s.Bucket == nil {
8975 return v
8976 }
8977 return *s.Bucket
8978}
8979
8397type DeleteBucketInventoryConfigurationInput struct { 8980type DeleteBucketInventoryConfigurationInput struct {
8398 _ struct{} `type:"structure"` 8981 _ struct{} `type:"structure"`
8399 8982
@@ -8440,13 +9023,19 @@ func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBuc
8440 return s 9023 return s
8441} 9024}
8442 9025
9026func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) {
9027 if s.Bucket == nil {
9028 return v
9029 }
9030 return *s.Bucket
9031}
9032
8443// SetId sets the Id field's value. 9033// SetId sets the Id field's value.
8444func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { 9034func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput {
8445 s.Id = &v 9035 s.Id = &v
8446 return s 9036 return s
8447} 9037}
8448 9038
8449// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput
8450type DeleteBucketInventoryConfigurationOutput struct { 9039type DeleteBucketInventoryConfigurationOutput struct {
8451 _ struct{} `type:"structure"` 9040 _ struct{} `type:"structure"`
8452} 9041}
@@ -8461,7 +9050,6 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string {
8461 return s.String() 9050 return s.String()
8462} 9051}
8463 9052
8464// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest
8465type DeleteBucketLifecycleInput struct { 9053type DeleteBucketLifecycleInput struct {
8466 _ struct{} `type:"structure"` 9054 _ struct{} `type:"structure"`
8467 9055
@@ -8498,7 +9086,13 @@ func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleI
8498 return s 9086 return s
8499} 9087}
8500 9088
8501// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput 9089func (s *DeleteBucketLifecycleInput) getBucket() (v string) {
9090 if s.Bucket == nil {
9091 return v
9092 }
9093 return *s.Bucket
9094}
9095
8502type DeleteBucketLifecycleOutput struct { 9096type DeleteBucketLifecycleOutput struct {
8503 _ struct{} `type:"structure"` 9097 _ struct{} `type:"structure"`
8504} 9098}
@@ -8513,7 +9107,6 @@ func (s DeleteBucketLifecycleOutput) GoString() string {
8513 return s.String() 9107 return s.String()
8514} 9108}
8515 9109
8516// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest
8517type DeleteBucketMetricsConfigurationInput struct { 9110type DeleteBucketMetricsConfigurationInput struct {
8518 _ struct{} `type:"structure"` 9111 _ struct{} `type:"structure"`
8519 9112
@@ -8560,13 +9153,19 @@ func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucke
8560 return s 9153 return s
8561} 9154}
8562 9155
9156func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) {
9157 if s.Bucket == nil {
9158 return v
9159 }
9160 return *s.Bucket
9161}
9162
8563// SetId sets the Id field's value. 9163// SetId sets the Id field's value.
8564func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { 9164func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput {
8565 s.Id = &v 9165 s.Id = &v
8566 return s 9166 return s
8567} 9167}
8568 9168
8569// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput
8570type DeleteBucketMetricsConfigurationOutput struct { 9169type DeleteBucketMetricsConfigurationOutput struct {
8571 _ struct{} `type:"structure"` 9170 _ struct{} `type:"structure"`
8572} 9171}
@@ -8581,7 +9180,6 @@ func (s DeleteBucketMetricsConfigurationOutput) GoString() string {
8581 return s.String() 9180 return s.String()
8582} 9181}
8583 9182
8584// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput
8585type DeleteBucketOutput struct { 9183type DeleteBucketOutput struct {
8586 _ struct{} `type:"structure"` 9184 _ struct{} `type:"structure"`
8587} 9185}
@@ -8596,7 +9194,6 @@ func (s DeleteBucketOutput) GoString() string {
8596 return s.String() 9194 return s.String()
8597} 9195}
8598 9196
8599// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest
8600type DeleteBucketPolicyInput struct { 9197type DeleteBucketPolicyInput struct {
8601 _ struct{} `type:"structure"` 9198 _ struct{} `type:"structure"`
8602 9199
@@ -8633,7 +9230,13 @@ func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput {
8633 return s 9230 return s
8634} 9231}
8635 9232
8636// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput 9233func (s *DeleteBucketPolicyInput) getBucket() (v string) {
9234 if s.Bucket == nil {
9235 return v
9236 }
9237 return *s.Bucket
9238}
9239
8637type DeleteBucketPolicyOutput struct { 9240type DeleteBucketPolicyOutput struct {
8638 _ struct{} `type:"structure"` 9241 _ struct{} `type:"structure"`
8639} 9242}
@@ -8648,7 +9251,6 @@ func (s DeleteBucketPolicyOutput) GoString() string {
8648 return s.String() 9251 return s.String()
8649} 9252}
8650 9253
8651// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest
8652type DeleteBucketReplicationInput struct { 9254type DeleteBucketReplicationInput struct {
8653 _ struct{} `type:"structure"` 9255 _ struct{} `type:"structure"`
8654 9256
@@ -8685,7 +9287,13 @@ func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicat
8685 return s 9287 return s
8686} 9288}
8687 9289
8688// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput 9290func (s *DeleteBucketReplicationInput) getBucket() (v string) {
9291 if s.Bucket == nil {
9292 return v
9293 }
9294 return *s.Bucket
9295}
9296
8689type DeleteBucketReplicationOutput struct { 9297type DeleteBucketReplicationOutput struct {
8690 _ struct{} `type:"structure"` 9298 _ struct{} `type:"structure"`
8691} 9299}
@@ -8700,7 +9308,6 @@ func (s DeleteBucketReplicationOutput) GoString() string {
8700 return s.String() 9308 return s.String()
8701} 9309}
8702 9310
8703// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest
8704type DeleteBucketTaggingInput struct { 9311type DeleteBucketTaggingInput struct {
8705 _ struct{} `type:"structure"` 9312 _ struct{} `type:"structure"`
8706 9313
@@ -8737,7 +9344,13 @@ func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput
8737 return s 9344 return s
8738} 9345}
8739 9346
8740// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput 9347func (s *DeleteBucketTaggingInput) getBucket() (v string) {
9348 if s.Bucket == nil {
9349 return v
9350 }
9351 return *s.Bucket
9352}
9353
8741type DeleteBucketTaggingOutput struct { 9354type DeleteBucketTaggingOutput struct {
8742 _ struct{} `type:"structure"` 9355 _ struct{} `type:"structure"`
8743} 9356}
@@ -8752,7 +9365,6 @@ func (s DeleteBucketTaggingOutput) GoString() string {
8752 return s.String() 9365 return s.String()
8753} 9366}
8754 9367
8755// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest
8756type DeleteBucketWebsiteInput struct { 9368type DeleteBucketWebsiteInput struct {
8757 _ struct{} `type:"structure"` 9369 _ struct{} `type:"structure"`
8758 9370
@@ -8789,7 +9401,13 @@ func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput
8789 return s 9401 return s
8790} 9402}
8791 9403
8792// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput 9404func (s *DeleteBucketWebsiteInput) getBucket() (v string) {
9405 if s.Bucket == nil {
9406 return v
9407 }
9408 return *s.Bucket
9409}
9410
8793type DeleteBucketWebsiteOutput struct { 9411type DeleteBucketWebsiteOutput struct {
8794 _ struct{} `type:"structure"` 9412 _ struct{} `type:"structure"`
8795} 9413}
@@ -8804,7 +9422,6 @@ func (s DeleteBucketWebsiteOutput) GoString() string {
8804 return s.String() 9422 return s.String()
8805} 9423}
8806 9424
8807// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry
8808type DeleteMarkerEntry struct { 9425type DeleteMarkerEntry struct {
8809 _ struct{} `type:"structure"` 9426 _ struct{} `type:"structure"`
8810 9427
@@ -8816,7 +9433,7 @@ type DeleteMarkerEntry struct {
8816 Key *string `min:"1" type:"string"` 9433 Key *string `min:"1" type:"string"`
8817 9434
8818 // Date and time the object was last modified. 9435 // Date and time the object was last modified.
8819 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` 9436 LastModified *time.Time `type:"timestamp"`
8820 9437
8821 Owner *Owner `type:"structure"` 9438 Owner *Owner `type:"structure"`
8822 9439
@@ -8864,7 +9481,6 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry {
8864 return s 9481 return s
8865} 9482}
8866 9483
8867// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest
8868type DeleteObjectInput struct { 9484type DeleteObjectInput struct {
8869 _ struct{} `type:"structure"` 9485 _ struct{} `type:"structure"`
8870 9486
@@ -8923,6 +9539,13 @@ func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput {
8923 return s 9539 return s
8924} 9540}
8925 9541
9542func (s *DeleteObjectInput) getBucket() (v string) {
9543 if s.Bucket == nil {
9544 return v
9545 }
9546 return *s.Bucket
9547}
9548
8926// SetKey sets the Key field's value. 9549// SetKey sets the Key field's value.
8927func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { 9550func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput {
8928 s.Key = &v 9551 s.Key = &v
@@ -8947,7 +9570,6 @@ func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput {
8947 return s 9570 return s
8948} 9571}
8949 9572
8950// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput
8951type DeleteObjectOutput struct { 9573type DeleteObjectOutput struct {
8952 _ struct{} `type:"structure"` 9574 _ struct{} `type:"structure"`
8953 9575
@@ -8992,7 +9614,6 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput {
8992 return s 9614 return s
8993} 9615}
8994 9616
8995// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest
8996type DeleteObjectTaggingInput struct { 9617type DeleteObjectTaggingInput struct {
8997 _ struct{} `type:"structure"` 9618 _ struct{} `type:"structure"`
8998 9619
@@ -9041,6 +9662,13 @@ func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput
9041 return s 9662 return s
9042} 9663}
9043 9664
9665func (s *DeleteObjectTaggingInput) getBucket() (v string) {
9666 if s.Bucket == nil {
9667 return v
9668 }
9669 return *s.Bucket
9670}
9671
9044// SetKey sets the Key field's value. 9672// SetKey sets the Key field's value.
9045func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { 9673func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput {
9046 s.Key = &v 9674 s.Key = &v
@@ -9053,7 +9681,6 @@ func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingIn
9053 return s 9681 return s
9054} 9682}
9055 9683
9056// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput
9057type DeleteObjectTaggingOutput struct { 9684type DeleteObjectTaggingOutput struct {
9058 _ struct{} `type:"structure"` 9685 _ struct{} `type:"structure"`
9059 9686
@@ -9077,7 +9704,6 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO
9077 return s 9704 return s
9078} 9705}
9079 9706
9080// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest
9081type DeleteObjectsInput struct { 9707type DeleteObjectsInput struct {
9082 _ struct{} `type:"structure" payload:"Delete"` 9708 _ struct{} `type:"structure" payload:"Delete"`
9083 9709
@@ -9085,7 +9711,7 @@ type DeleteObjectsInput struct {
9085 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 9711 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9086 9712
9087 // Delete is a required field 9713 // Delete is a required field
9088 Delete *Delete `locationName:"Delete" type:"structure" required:"true"` 9714 Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
9089 9715
9090 // The concatenation of the authentication device's serial number, a space, 9716 // The concatenation of the authentication device's serial number, a space,
9091 // and the value that is displayed on your authentication device. 9717 // and the value that is displayed on your authentication device.
@@ -9135,6 +9761,13 @@ func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput {
9135 return s 9761 return s
9136} 9762}
9137 9763
9764func (s *DeleteObjectsInput) getBucket() (v string) {
9765 if s.Bucket == nil {
9766 return v
9767 }
9768 return *s.Bucket
9769}
9770
9138// SetDelete sets the Delete field's value. 9771// SetDelete sets the Delete field's value.
9139func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { 9772func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput {
9140 s.Delete = v 9773 s.Delete = v
@@ -9153,7 +9786,6 @@ func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput {
9153 return s 9786 return s
9154} 9787}
9155 9788
9156// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput
9157type DeleteObjectsOutput struct { 9789type DeleteObjectsOutput struct {
9158 _ struct{} `type:"structure"` 9790 _ struct{} `type:"structure"`
9159 9791
@@ -9194,7 +9826,6 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput {
9194 return s 9826 return s
9195} 9827}
9196 9828
9197// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject
9198type DeletedObject struct { 9829type DeletedObject struct {
9199 _ struct{} `type:"structure"` 9830 _ struct{} `type:"structure"`
9200 9831
@@ -9241,16 +9872,26 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject {
9241 return s 9872 return s
9242} 9873}
9243 9874
9244// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination 9875// Container for replication destination information.
9245type Destination struct { 9876type Destination struct {
9246 _ struct{} `type:"structure"` 9877 _ struct{} `type:"structure"`
9247 9878
9879 // Container for information regarding the access control for replicas.
9880 AccessControlTranslation *AccessControlTranslation `type:"structure"`
9881
9882 // Account ID of the destination bucket. Currently this is only being verified
9883 // if Access Control Translation is enabled
9884 Account *string `type:"string"`
9885
9248 // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store 9886 // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store
9249 // replicas of the object identified by the rule. 9887 // replicas of the object identified by the rule.
9250 // 9888 //
9251 // Bucket is a required field 9889 // Bucket is a required field
9252 Bucket *string `type:"string" required:"true"` 9890 Bucket *string `type:"string" required:"true"`
9253 9891
9892 // Container for information regarding encryption based configuration for replicas.
9893 EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
9894
9254 // The class of storage used to store the object. 9895 // The class of storage used to store the object.
9255 StorageClass *string `type:"string" enum:"StorageClass"` 9896 StorageClass *string `type:"string" enum:"StorageClass"`
9256} 9897}
@@ -9271,6 +9912,11 @@ func (s *Destination) Validate() error {
9271 if s.Bucket == nil { 9912 if s.Bucket == nil {
9272 invalidParams.Add(request.NewErrParamRequired("Bucket")) 9913 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9273 } 9914 }
9915 if s.AccessControlTranslation != nil {
9916 if err := s.AccessControlTranslation.Validate(); err != nil {
9917 invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams))
9918 }
9919 }
9274 9920
9275 if invalidParams.Len() > 0 { 9921 if invalidParams.Len() > 0 {
9276 return invalidParams 9922 return invalidParams
@@ -9278,19 +9924,154 @@ func (s *Destination) Validate() error {
9278 return nil 9924 return nil
9279} 9925}
9280 9926
9927// SetAccessControlTranslation sets the AccessControlTranslation field's value.
9928func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination {
9929 s.AccessControlTranslation = v
9930 return s
9931}
9932
9933// SetAccount sets the Account field's value.
9934func (s *Destination) SetAccount(v string) *Destination {
9935 s.Account = &v
9936 return s
9937}
9938
9281// SetBucket sets the Bucket field's value. 9939// SetBucket sets the Bucket field's value.
9282func (s *Destination) SetBucket(v string) *Destination { 9940func (s *Destination) SetBucket(v string) *Destination {
9283 s.Bucket = &v 9941 s.Bucket = &v
9284 return s 9942 return s
9285} 9943}
9286 9944
9945func (s *Destination) getBucket() (v string) {
9946 if s.Bucket == nil {
9947 return v
9948 }
9949 return *s.Bucket
9950}
9951
9952// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
9953func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination {
9954 s.EncryptionConfiguration = v
9955 return s
9956}
9957
9287// SetStorageClass sets the StorageClass field's value. 9958// SetStorageClass sets the StorageClass field's value.
9288func (s *Destination) SetStorageClass(v string) *Destination { 9959func (s *Destination) SetStorageClass(v string) *Destination {
9289 s.StorageClass = &v 9960 s.StorageClass = &v
9290 return s 9961 return s
9291} 9962}
9292 9963
9293// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error 9964// Describes the server-side encryption that will be applied to the restore
9965// results.
9966type Encryption struct {
9967 _ struct{} `type:"structure"`
9968
9969 // The server-side encryption algorithm used when storing job results in Amazon
9970 // S3 (e.g., AES256, aws:kms).
9971 //
9972 // EncryptionType is a required field
9973 EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"`
9974
9975 // If the encryption type is aws:kms, this optional value can be used to specify
9976 // the encryption context for the restore results.
9977 KMSContext *string `type:"string"`
9978
9979 // If the encryption type is aws:kms, this optional value specifies the AWS
9980 // KMS key ID to use for encryption of job results.
9981 KMSKeyId *string `type:"string"`
9982}
9983
9984// String returns the string representation
9985func (s Encryption) String() string {
9986 return awsutil.Prettify(s)
9987}
9988
9989// GoString returns the string representation
9990func (s Encryption) GoString() string {
9991 return s.String()
9992}
9993
9994// Validate inspects the fields of the type to determine if they are valid.
9995func (s *Encryption) Validate() error {
9996 invalidParams := request.ErrInvalidParams{Context: "Encryption"}
9997 if s.EncryptionType == nil {
9998 invalidParams.Add(request.NewErrParamRequired("EncryptionType"))
9999 }
10000
10001 if invalidParams.Len() > 0 {
10002 return invalidParams
10003 }
10004 return nil
10005}
10006
10007// SetEncryptionType sets the EncryptionType field's value.
10008func (s *Encryption) SetEncryptionType(v string) *Encryption {
10009 s.EncryptionType = &v
10010 return s
10011}
10012
10013// SetKMSContext sets the KMSContext field's value.
10014func (s *Encryption) SetKMSContext(v string) *Encryption {
10015 s.KMSContext = &v
10016 return s
10017}
10018
10019// SetKMSKeyId sets the KMSKeyId field's value.
10020func (s *Encryption) SetKMSKeyId(v string) *Encryption {
10021 s.KMSKeyId = &v
10022 return s
10023}
10024
10025// Container for information regarding encryption based configuration for replicas.
10026type EncryptionConfiguration struct {
10027 _ struct{} `type:"structure"`
10028
10029 // The id of the KMS key used to encrypt the replica object.
10030 ReplicaKmsKeyID *string `type:"string"`
10031}
10032
10033// String returns the string representation
10034func (s EncryptionConfiguration) String() string {
10035 return awsutil.Prettify(s)
10036}
10037
10038// GoString returns the string representation
10039func (s EncryptionConfiguration) GoString() string {
10040 return s.String()
10041}
10042
10043// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value.
10044func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration {
10045 s.ReplicaKmsKeyID = &v
10046 return s
10047}
10048
10049type EndEvent struct {
10050 _ struct{} `locationName:"EndEvent" type:"structure"`
10051}
10052
10053// String returns the string representation
10054func (s EndEvent) String() string {
10055 return awsutil.Prettify(s)
10056}
10057
10058// GoString returns the string representation
10059func (s EndEvent) GoString() string {
10060 return s.String()
10061}
10062
10063// The EndEvent is and event in the SelectObjectContentEventStream group of events.
10064func (s *EndEvent) eventSelectObjectContentEventStream() {}
10065
10066// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value.
10067// This method is only used internally within the SDK's EventStream handling.
10068func (s *EndEvent) UnmarshalEvent(
10069 payloadUnmarshaler protocol.PayloadUnmarshaler,
10070 msg eventstream.Message,
10071) error {
10072 return nil
10073}
10074
9294type Error struct { 10075type Error struct {
9295 _ struct{} `type:"structure"` 10076 _ struct{} `type:"structure"`
9296 10077
@@ -9337,7 +10118,6 @@ func (s *Error) SetVersionId(v string) *Error {
9337 return s 10118 return s
9338} 10119}
9339 10120
9340// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument
9341type ErrorDocument struct { 10121type ErrorDocument struct {
9342 _ struct{} `type:"structure"` 10122 _ struct{} `type:"structure"`
9343 10123
@@ -9380,7 +10160,6 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument {
9380} 10160}
9381 10161
9382// Container for key value pair that defines the criteria for the filter rule. 10162// Container for key value pair that defines the criteria for the filter rule.
9383// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule
9384type FilterRule struct { 10163type FilterRule struct {
9385 _ struct{} `type:"structure"` 10164 _ struct{} `type:"structure"`
9386 10165
@@ -9388,6 +10167,7 @@ type FilterRule struct {
9388 // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. 10167 // the filtering rule applies. Maximum prefix length can be up to 1,024 characters.
9389 // Overlapping prefixes and suffixes are not supported. For more information, 10168 // Overlapping prefixes and suffixes are not supported. For more information,
9390 // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) 10169 // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
10170 // in the Amazon Simple Storage Service Developer Guide.
9391 Name *string `type:"string" enum:"FilterRuleName"` 10171 Name *string `type:"string" enum:"FilterRuleName"`
9392 10172
9393 Value *string `type:"string"` 10173 Value *string `type:"string"`
@@ -9415,7 +10195,6 @@ func (s *FilterRule) SetValue(v string) *FilterRule {
9415 return s 10195 return s
9416} 10196}
9417 10197
9418// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest
9419type GetBucketAccelerateConfigurationInput struct { 10198type GetBucketAccelerateConfigurationInput struct {
9420 _ struct{} `type:"structure"` 10199 _ struct{} `type:"structure"`
9421 10200
@@ -9454,7 +10233,13 @@ func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAc
9454 return s 10233 return s
9455} 10234}
9456 10235
9457// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput 10236func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) {
10237 if s.Bucket == nil {
10238 return v
10239 }
10240 return *s.Bucket
10241}
10242
9458type GetBucketAccelerateConfigurationOutput struct { 10243type GetBucketAccelerateConfigurationOutput struct {
9459 _ struct{} `type:"structure"` 10244 _ struct{} `type:"structure"`
9460 10245
@@ -9478,7 +10263,6 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA
9478 return s 10263 return s
9479} 10264}
9480 10265
9481// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest
9482type GetBucketAclInput struct { 10266type GetBucketAclInput struct {
9483 _ struct{} `type:"structure"` 10267 _ struct{} `type:"structure"`
9484 10268
@@ -9515,7 +10299,13 @@ func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput {
9515 return s 10299 return s
9516} 10300}
9517 10301
9518// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput 10302func (s *GetBucketAclInput) getBucket() (v string) {
10303 if s.Bucket == nil {
10304 return v
10305 }
10306 return *s.Bucket
10307}
10308
9519type GetBucketAclOutput struct { 10309type GetBucketAclOutput struct {
9520 _ struct{} `type:"structure"` 10310 _ struct{} `type:"structure"`
9521 10311
@@ -9547,7 +10337,6 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput {
9547 return s 10337 return s
9548} 10338}
9549 10339
9550// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest
9551type GetBucketAnalyticsConfigurationInput struct { 10340type GetBucketAnalyticsConfigurationInput struct {
9552 _ struct{} `type:"structure"` 10341 _ struct{} `type:"structure"`
9553 10342
@@ -9594,13 +10383,19 @@ func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAna
9594 return s 10383 return s
9595} 10384}
9596 10385
10386func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) {
10387 if s.Bucket == nil {
10388 return v
10389 }
10390 return *s.Bucket
10391}
10392
9597// SetId sets the Id field's value. 10393// SetId sets the Id field's value.
9598func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { 10394func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput {
9599 s.Id = &v 10395 s.Id = &v
9600 return s 10396 return s
9601} 10397}
9602 10398
9603// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput
9604type GetBucketAnalyticsConfigurationOutput struct { 10399type GetBucketAnalyticsConfigurationOutput struct {
9605 _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` 10400 _ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
9606 10401
@@ -9624,7 +10419,6 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana
9624 return s 10419 return s
9625} 10420}
9626 10421
9627// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest
9628type GetBucketCorsInput struct { 10422type GetBucketCorsInput struct {
9629 _ struct{} `type:"structure"` 10423 _ struct{} `type:"structure"`
9630 10424
@@ -9661,7 +10455,13 @@ func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput {
9661 return s 10455 return s
9662} 10456}
9663 10457
9664// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput 10458func (s *GetBucketCorsInput) getBucket() (v string) {
10459 if s.Bucket == nil {
10460 return v
10461 }
10462 return *s.Bucket
10463}
10464
9665type GetBucketCorsOutput struct { 10465type GetBucketCorsOutput struct {
9666 _ struct{} `type:"structure"` 10466 _ struct{} `type:"structure"`
9667 10467
@@ -9684,7 +10484,76 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput {
9684 return s 10484 return s
9685} 10485}
9686 10486
9687// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest 10487type GetBucketEncryptionInput struct {
10488 _ struct{} `type:"structure"`
10489
10490 // The name of the bucket from which the server-side encryption configuration
10491 // is retrieved.
10492 //
10493 // Bucket is a required field
10494 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10495}
10496
10497// String returns the string representation
10498func (s GetBucketEncryptionInput) String() string {
10499 return awsutil.Prettify(s)
10500}
10501
10502// GoString returns the string representation
10503func (s GetBucketEncryptionInput) GoString() string {
10504 return s.String()
10505}
10506
10507// Validate inspects the fields of the type to determine if they are valid.
10508func (s *GetBucketEncryptionInput) Validate() error {
10509 invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"}
10510 if s.Bucket == nil {
10511 invalidParams.Add(request.NewErrParamRequired("Bucket"))
10512 }
10513
10514 if invalidParams.Len() > 0 {
10515 return invalidParams
10516 }
10517 return nil
10518}
10519
10520// SetBucket sets the Bucket field's value.
10521func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput {
10522 s.Bucket = &v
10523 return s
10524}
10525
10526func (s *GetBucketEncryptionInput) getBucket() (v string) {
10527 if s.Bucket == nil {
10528 return v
10529 }
10530 return *s.Bucket
10531}
10532
10533type GetBucketEncryptionOutput struct {
10534 _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"`
10535
10536 // Container for server-side encryption configuration rules. Currently S3 supports
10537 // one rule only.
10538 ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"`
10539}
10540
10541// String returns the string representation
10542func (s GetBucketEncryptionOutput) String() string {
10543 return awsutil.Prettify(s)
10544}
10545
10546// GoString returns the string representation
10547func (s GetBucketEncryptionOutput) GoString() string {
10548 return s.String()
10549}
10550
10551// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value.
10552func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput {
10553 s.ServerSideEncryptionConfiguration = v
10554 return s
10555}
10556
9688type GetBucketInventoryConfigurationInput struct { 10557type GetBucketInventoryConfigurationInput struct {
9689 _ struct{} `type:"structure"` 10558 _ struct{} `type:"structure"`
9690 10559
@@ -9731,13 +10600,19 @@ func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInv
9731 return s 10600 return s
9732} 10601}
9733 10602
10603func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) {
10604 if s.Bucket == nil {
10605 return v
10606 }
10607 return *s.Bucket
10608}
10609
9734// SetId sets the Id field's value. 10610// SetId sets the Id field's value.
9735func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { 10611func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput {
9736 s.Id = &v 10612 s.Id = &v
9737 return s 10613 return s
9738} 10614}
9739 10615
9740// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput
9741type GetBucketInventoryConfigurationOutput struct { 10616type GetBucketInventoryConfigurationOutput struct {
9742 _ struct{} `type:"structure" payload:"InventoryConfiguration"` 10617 _ struct{} `type:"structure" payload:"InventoryConfiguration"`
9743 10618
@@ -9761,7 +10636,6 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv
9761 return s 10636 return s
9762} 10637}
9763 10638
9764// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest
9765type GetBucketLifecycleConfigurationInput struct { 10639type GetBucketLifecycleConfigurationInput struct {
9766 _ struct{} `type:"structure"` 10640 _ struct{} `type:"structure"`
9767 10641
@@ -9798,7 +10672,13 @@ func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLif
9798 return s 10672 return s
9799} 10673}
9800 10674
9801// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput 10675func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) {
10676 if s.Bucket == nil {
10677 return v
10678 }
10679 return *s.Bucket
10680}
10681
9802type GetBucketLifecycleConfigurationOutput struct { 10682type GetBucketLifecycleConfigurationOutput struct {
9803 _ struct{} `type:"structure"` 10683 _ struct{} `type:"structure"`
9804 10684
@@ -9821,7 +10701,6 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge
9821 return s 10701 return s
9822} 10702}
9823 10703
9824// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest
9825type GetBucketLifecycleInput struct { 10704type GetBucketLifecycleInput struct {
9826 _ struct{} `type:"structure"` 10705 _ struct{} `type:"structure"`
9827 10706
@@ -9858,7 +10737,13 @@ func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput {
9858 return s 10737 return s
9859} 10738}
9860 10739
9861// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput 10740func (s *GetBucketLifecycleInput) getBucket() (v string) {
10741 if s.Bucket == nil {
10742 return v
10743 }
10744 return *s.Bucket
10745}
10746
9862type GetBucketLifecycleOutput struct { 10747type GetBucketLifecycleOutput struct {
9863 _ struct{} `type:"structure"` 10748 _ struct{} `type:"structure"`
9864 10749
@@ -9881,7 +10766,6 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput
9881 return s 10766 return s
9882} 10767}
9883 10768
9884// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest
9885type GetBucketLocationInput struct { 10769type GetBucketLocationInput struct {
9886 _ struct{} `type:"structure"` 10770 _ struct{} `type:"structure"`
9887 10771
@@ -9918,7 +10802,13 @@ func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput {
9918 return s 10802 return s
9919} 10803}
9920 10804
9921// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput 10805func (s *GetBucketLocationInput) getBucket() (v string) {
10806 if s.Bucket == nil {
10807 return v
10808 }
10809 return *s.Bucket
10810}
10811
9922type GetBucketLocationOutput struct { 10812type GetBucketLocationOutput struct {
9923 _ struct{} `type:"structure"` 10813 _ struct{} `type:"structure"`
9924 10814
@@ -9941,7 +10831,6 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca
9941 return s 10831 return s
9942} 10832}
9943 10833
9944// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest
9945type GetBucketLoggingInput struct { 10834type GetBucketLoggingInput struct {
9946 _ struct{} `type:"structure"` 10835 _ struct{} `type:"structure"`
9947 10836
@@ -9978,10 +10867,19 @@ func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput {
9978 return s 10867 return s
9979} 10868}
9980 10869
9981// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput 10870func (s *GetBucketLoggingInput) getBucket() (v string) {
10871 if s.Bucket == nil {
10872 return v
10873 }
10874 return *s.Bucket
10875}
10876
9982type GetBucketLoggingOutput struct { 10877type GetBucketLoggingOutput struct {
9983 _ struct{} `type:"structure"` 10878 _ struct{} `type:"structure"`
9984 10879
10880 // Container for logging information. Presence of this element indicates that
10881 // logging is enabled. Parameters TargetBucket and TargetPrefix are required
10882 // in this case.
9985 LoggingEnabled *LoggingEnabled `type:"structure"` 10883 LoggingEnabled *LoggingEnabled `type:"structure"`
9986} 10884}
9987 10885
@@ -10001,7 +10899,6 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket
10001 return s 10899 return s
10002} 10900}
10003 10901
10004// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest
10005type GetBucketMetricsConfigurationInput struct { 10902type GetBucketMetricsConfigurationInput struct {
10006 _ struct{} `type:"structure"` 10903 _ struct{} `type:"structure"`
10007 10904
@@ -10048,13 +10945,19 @@ func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetri
10048 return s 10945 return s
10049} 10946}
10050 10947
10948func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) {
10949 if s.Bucket == nil {
10950 return v
10951 }
10952 return *s.Bucket
10953}
10954
10051// SetId sets the Id field's value. 10955// SetId sets the Id field's value.
10052func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { 10956func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput {
10053 s.Id = &v 10957 s.Id = &v
10054 return s 10958 return s
10055} 10959}
10056 10960
10057// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput
10058type GetBucketMetricsConfigurationOutput struct { 10961type GetBucketMetricsConfigurationOutput struct {
10059 _ struct{} `type:"structure" payload:"MetricsConfiguration"` 10962 _ struct{} `type:"structure" payload:"MetricsConfiguration"`
10060 10963
@@ -10078,7 +10981,6 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics
10078 return s 10981 return s
10079} 10982}
10080 10983
10081// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest
10082type GetBucketNotificationConfigurationRequest struct { 10984type GetBucketNotificationConfigurationRequest struct {
10083 _ struct{} `type:"structure"` 10985 _ struct{} `type:"structure"`
10084 10986
@@ -10117,7 +11019,13 @@ func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBuck
10117 return s 11019 return s
10118} 11020}
10119 11021
10120// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest 11022func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) {
11023 if s.Bucket == nil {
11024 return v
11025 }
11026 return *s.Bucket
11027}
11028
10121type GetBucketPolicyInput struct { 11029type GetBucketPolicyInput struct {
10122 _ struct{} `type:"structure"` 11030 _ struct{} `type:"structure"`
10123 11031
@@ -10154,7 +11062,13 @@ func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput {
10154 return s 11062 return s
10155} 11063}
10156 11064
10157// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput 11065func (s *GetBucketPolicyInput) getBucket() (v string) {
11066 if s.Bucket == nil {
11067 return v
11068 }
11069 return *s.Bucket
11070}
11071
10158type GetBucketPolicyOutput struct { 11072type GetBucketPolicyOutput struct {
10159 _ struct{} `type:"structure" payload:"Policy"` 11073 _ struct{} `type:"structure" payload:"Policy"`
10160 11074
@@ -10178,7 +11092,6 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput {
10178 return s 11092 return s
10179} 11093}
10180 11094
10181// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest
10182type GetBucketReplicationInput struct { 11095type GetBucketReplicationInput struct {
10183 _ struct{} `type:"structure"` 11096 _ struct{} `type:"structure"`
10184 11097
@@ -10215,7 +11128,13 @@ func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInp
10215 return s 11128 return s
10216} 11129}
10217 11130
10218// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput 11131func (s *GetBucketReplicationInput) getBucket() (v string) {
11132 if s.Bucket == nil {
11133 return v
11134 }
11135 return *s.Bucket
11136}
11137
10219type GetBucketReplicationOutput struct { 11138type GetBucketReplicationOutput struct {
10220 _ struct{} `type:"structure" payload:"ReplicationConfiguration"` 11139 _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
10221 11140
@@ -10240,7 +11159,6 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC
10240 return s 11159 return s
10241} 11160}
10242 11161
10243// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest
10244type GetBucketRequestPaymentInput struct { 11162type GetBucketRequestPaymentInput struct {
10245 _ struct{} `type:"structure"` 11163 _ struct{} `type:"structure"`
10246 11164
@@ -10277,7 +11195,13 @@ func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaym
10277 return s 11195 return s
10278} 11196}
10279 11197
10280// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput 11198func (s *GetBucketRequestPaymentInput) getBucket() (v string) {
11199 if s.Bucket == nil {
11200 return v
11201 }
11202 return *s.Bucket
11203}
11204
10281type GetBucketRequestPaymentOutput struct { 11205type GetBucketRequestPaymentOutput struct {
10282 _ struct{} `type:"structure"` 11206 _ struct{} `type:"structure"`
10283 11207
@@ -10301,7 +11225,6 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym
10301 return s 11225 return s
10302} 11226}
10303 11227
10304// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest
10305type GetBucketTaggingInput struct { 11228type GetBucketTaggingInput struct {
10306 _ struct{} `type:"structure"` 11229 _ struct{} `type:"structure"`
10307 11230
@@ -10338,7 +11261,13 @@ func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput {
10338 return s 11261 return s
10339} 11262}
10340 11263
10341// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput 11264func (s *GetBucketTaggingInput) getBucket() (v string) {
11265 if s.Bucket == nil {
11266 return v
11267 }
11268 return *s.Bucket
11269}
11270
10342type GetBucketTaggingOutput struct { 11271type GetBucketTaggingOutput struct {
10343 _ struct{} `type:"structure"` 11272 _ struct{} `type:"structure"`
10344 11273
@@ -10362,7 +11291,6 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput {
10362 return s 11291 return s
10363} 11292}
10364 11293
10365// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest
10366type GetBucketVersioningInput struct { 11294type GetBucketVersioningInput struct {
10367 _ struct{} `type:"structure"` 11295 _ struct{} `type:"structure"`
10368 11296
@@ -10399,7 +11327,13 @@ func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput
10399 return s 11327 return s
10400} 11328}
10401 11329
10402// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput 11330func (s *GetBucketVersioningInput) getBucket() (v string) {
11331 if s.Bucket == nil {
11332 return v
11333 }
11334 return *s.Bucket
11335}
11336
10403type GetBucketVersioningOutput struct { 11337type GetBucketVersioningOutput struct {
10404 _ struct{} `type:"structure"` 11338 _ struct{} `type:"structure"`
10405 11339
@@ -10434,7 +11368,6 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp
10434 return s 11368 return s
10435} 11369}
10436 11370
10437// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest
10438type GetBucketWebsiteInput struct { 11371type GetBucketWebsiteInput struct {
10439 _ struct{} `type:"structure"` 11372 _ struct{} `type:"structure"`
10440 11373
@@ -10471,7 +11404,13 @@ func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput {
10471 return s 11404 return s
10472} 11405}
10473 11406
10474// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput 11407func (s *GetBucketWebsiteInput) getBucket() (v string) {
11408 if s.Bucket == nil {
11409 return v
11410 }
11411 return *s.Bucket
11412}
11413
10475type GetBucketWebsiteOutput struct { 11414type GetBucketWebsiteOutput struct {
10476 _ struct{} `type:"structure"` 11415 _ struct{} `type:"structure"`
10477 11416
@@ -10518,7 +11457,6 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb
10518 return s 11457 return s
10519} 11458}
10520 11459
10521// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest
10522type GetObjectAclInput struct { 11460type GetObjectAclInput struct {
10523 _ struct{} `type:"structure"` 11461 _ struct{} `type:"structure"`
10524 11462
@@ -10573,6 +11511,13 @@ func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput {
10573 return s 11511 return s
10574} 11512}
10575 11513
11514func (s *GetObjectAclInput) getBucket() (v string) {
11515 if s.Bucket == nil {
11516 return v
11517 }
11518 return *s.Bucket
11519}
11520
10576// SetKey sets the Key field's value. 11521// SetKey sets the Key field's value.
10577func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { 11522func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput {
10578 s.Key = &v 11523 s.Key = &v
@@ -10591,7 +11536,6 @@ func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput {
10591 return s 11536 return s
10592} 11537}
10593 11538
10594// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput
10595type GetObjectAclOutput struct { 11539type GetObjectAclOutput struct {
10596 _ struct{} `type:"structure"` 11540 _ struct{} `type:"structure"`
10597 11541
@@ -10633,7 +11577,6 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput {
10633 return s 11577 return s
10634} 11578}
10635 11579
10636// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest
10637type GetObjectInput struct { 11580type GetObjectInput struct {
10638 _ struct{} `type:"structure"` 11581 _ struct{} `type:"structure"`
10639 11582
@@ -10646,7 +11589,7 @@ type GetObjectInput struct {
10646 11589
10647 // Return the object only if it has been modified since the specified time, 11590 // Return the object only if it has been modified since the specified time,
10648 // otherwise return a 304 (not modified). 11591 // otherwise return a 304 (not modified).
10649 IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` 11592 IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
10650 11593
10651 // Return the object only if its entity tag (ETag) is different from the one 11594 // Return the object only if its entity tag (ETag) is different from the one
10652 // specified, otherwise return a 304 (not modified). 11595 // specified, otherwise return a 304 (not modified).
@@ -10654,7 +11597,7 @@ type GetObjectInput struct {
10654 11597
10655 // Return the object only if it has not been modified since the specified time, 11598 // Return the object only if it has not been modified since the specified time,
10656 // otherwise return a 412 (precondition failed). 11599 // otherwise return a 412 (precondition failed).
10657 IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` 11600 IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
10658 11601
10659 // Key is a required field 11602 // Key is a required field
10660 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` 11603 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
@@ -10690,7 +11633,7 @@ type GetObjectInput struct {
10690 ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` 11633 ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"`
10691 11634
10692 // Sets the Expires header of the response. 11635 // Sets the Expires header of the response.
10693 ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` 11636 ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"`
10694 11637
10695 // Specifies the algorithm to use to when encrypting the object (e.g., AES256). 11638 // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
10696 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` 11639 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
@@ -10746,6 +11689,13 @@ func (s *GetObjectInput) SetBucket(v string) *GetObjectInput {
10746 return s 11689 return s
10747} 11690}
10748 11691
11692func (s *GetObjectInput) getBucket() (v string) {
11693 if s.Bucket == nil {
11694 return v
11695 }
11696 return *s.Bucket
11697}
11698
10749// SetIfMatch sets the IfMatch field's value. 11699// SetIfMatch sets the IfMatch field's value.
10750func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { 11700func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput {
10751 s.IfMatch = &v 11701 s.IfMatch = &v
@@ -10842,6 +11792,13 @@ func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput {
10842 return s 11792 return s
10843} 11793}
10844 11794
11795func (s *GetObjectInput) getSSECustomerKey() (v string) {
11796 if s.SSECustomerKey == nil {
11797 return v
11798 }
11799 return *s.SSECustomerKey
11800}
11801
10845// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. 11802// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
10846func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { 11803func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput {
10847 s.SSECustomerKeyMD5 = &v 11804 s.SSECustomerKeyMD5 = &v
@@ -10854,7 +11811,6 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput {
10854 return s 11811 return s
10855} 11812}
10856 11813
10857// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput
10858type GetObjectOutput struct { 11814type GetObjectOutput struct {
10859 _ struct{} `type:"structure" payload:"Body"` 11815 _ struct{} `type:"structure" payload:"Body"`
10860 11816
@@ -10904,7 +11860,7 @@ type GetObjectOutput struct {
10904 Expires *string `location:"header" locationName:"Expires" type:"string"` 11860 Expires *string `location:"header" locationName:"Expires" type:"string"`
10905 11861
10906 // Last modified date of the object 11862 // Last modified date of the object
10907 LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` 11863 LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
10908 11864
10909 // A map of metadata to store with the object in S3. 11865 // A map of metadata to store with the object in S3.
10910 Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` 11866 Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
@@ -11138,7 +12094,6 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput
11138 return s 12094 return s
11139} 12095}
11140 12096
11141// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest
11142type GetObjectTaggingInput struct { 12097type GetObjectTaggingInput struct {
11143 _ struct{} `type:"structure"` 12098 _ struct{} `type:"structure"`
11144 12099
@@ -11186,6 +12141,13 @@ func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput {
11186 return s 12141 return s
11187} 12142}
11188 12143
12144func (s *GetObjectTaggingInput) getBucket() (v string) {
12145 if s.Bucket == nil {
12146 return v
12147 }
12148 return *s.Bucket
12149}
12150
11189// SetKey sets the Key field's value. 12151// SetKey sets the Key field's value.
11190func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { 12152func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput {
11191 s.Key = &v 12153 s.Key = &v
@@ -11198,7 +12160,6 @@ func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput {
11198 return s 12160 return s
11199} 12161}
11200 12162
11201// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput
11202type GetObjectTaggingOutput struct { 12163type GetObjectTaggingOutput struct {
11203 _ struct{} `type:"structure"` 12164 _ struct{} `type:"structure"`
11204 12165
@@ -11230,7 +12191,6 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput
11230 return s 12191 return s
11231} 12192}
11232 12193
11233// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest
11234type GetObjectTorrentInput struct { 12194type GetObjectTorrentInput struct {
11235 _ struct{} `type:"structure"` 12195 _ struct{} `type:"structure"`
11236 12196
@@ -11282,6 +12242,13 @@ func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput {
11282 return s 12242 return s
11283} 12243}
11284 12244
12245func (s *GetObjectTorrentInput) getBucket() (v string) {
12246 if s.Bucket == nil {
12247 return v
12248 }
12249 return *s.Bucket
12250}
12251
11285// SetKey sets the Key field's value. 12252// SetKey sets the Key field's value.
11286func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { 12253func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput {
11287 s.Key = &v 12254 s.Key = &v
@@ -11294,7 +12261,6 @@ func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput
11294 return s 12261 return s
11295} 12262}
11296 12263
11297// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput
11298type GetObjectTorrentOutput struct { 12264type GetObjectTorrentOutput struct {
11299 _ struct{} `type:"structure" payload:"Body"` 12265 _ struct{} `type:"structure" payload:"Body"`
11300 12266
@@ -11327,7 +12293,6 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu
11327 return s 12293 return s
11328} 12294}
11329 12295
11330// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters
11331type GlacierJobParameters struct { 12296type GlacierJobParameters struct {
11332 _ struct{} `type:"structure"` 12297 _ struct{} `type:"structure"`
11333 12298
@@ -11366,11 +12331,10 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters {
11366 return s 12331 return s
11367} 12332}
11368 12333
11369// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant
11370type Grant struct { 12334type Grant struct {
11371 _ struct{} `type:"structure"` 12335 _ struct{} `type:"structure"`
11372 12336
11373 Grantee *Grantee `type:"structure"` 12337 Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
11374 12338
11375 // Specifies the permission given to the grantee. 12339 // Specifies the permission given to the grantee.
11376 Permission *string `type:"string" enum:"Permission"` 12340 Permission *string `type:"string" enum:"Permission"`
@@ -11413,7 +12377,6 @@ func (s *Grant) SetPermission(v string) *Grant {
11413 return s 12377 return s
11414} 12378}
11415 12379
11416// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee
11417type Grantee struct { 12380type Grantee struct {
11418 _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` 12381 _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
11419 12382
@@ -11488,7 +12451,6 @@ func (s *Grantee) SetURI(v string) *Grantee {
11488 return s 12451 return s
11489} 12452}
11490 12453
11491// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest
11492type HeadBucketInput struct { 12454type HeadBucketInput struct {
11493 _ struct{} `type:"structure"` 12455 _ struct{} `type:"structure"`
11494 12456
@@ -11525,7 +12487,13 @@ func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput {
11525 return s 12487 return s
11526} 12488}
11527 12489
11528// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput 12490func (s *HeadBucketInput) getBucket() (v string) {
12491 if s.Bucket == nil {
12492 return v
12493 }
12494 return *s.Bucket
12495}
12496
11529type HeadBucketOutput struct { 12497type HeadBucketOutput struct {
11530 _ struct{} `type:"structure"` 12498 _ struct{} `type:"structure"`
11531} 12499}
@@ -11540,7 +12508,6 @@ func (s HeadBucketOutput) GoString() string {
11540 return s.String() 12508 return s.String()
11541} 12509}
11542 12510
11543// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest
11544type HeadObjectInput struct { 12511type HeadObjectInput struct {
11545 _ struct{} `type:"structure"` 12512 _ struct{} `type:"structure"`
11546 12513
@@ -11553,7 +12520,7 @@ type HeadObjectInput struct {
11553 12520
11554 // Return the object only if it has been modified since the specified time, 12521 // Return the object only if it has been modified since the specified time,
11555 // otherwise return a 304 (not modified). 12522 // otherwise return a 304 (not modified).
11556 IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` 12523 IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
11557 12524
11558 // Return the object only if its entity tag (ETag) is different from the one 12525 // Return the object only if its entity tag (ETag) is different from the one
11559 // specified, otherwise return a 304 (not modified). 12526 // specified, otherwise return a 304 (not modified).
@@ -11561,7 +12528,7 @@ type HeadObjectInput struct {
11561 12528
11562 // Return the object only if it has not been modified since the specified time, 12529 // Return the object only if it has not been modified since the specified time,
11563 // otherwise return a 412 (precondition failed). 12530 // otherwise return a 412 (precondition failed).
11564 IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` 12531 IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
11565 12532
11566 // Key is a required field 12533 // Key is a required field
11567 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` 12534 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
@@ -11636,6 +12603,13 @@ func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput {
11636 return s 12603 return s
11637} 12604}
11638 12605
12606func (s *HeadObjectInput) getBucket() (v string) {
12607 if s.Bucket == nil {
12608 return v
12609 }
12610 return *s.Bucket
12611}
12612
11639// SetIfMatch sets the IfMatch field's value. 12613// SetIfMatch sets the IfMatch field's value.
11640func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { 12614func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput {
11641 s.IfMatch = &v 12615 s.IfMatch = &v
@@ -11696,6 +12670,13 @@ func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput {
11696 return s 12670 return s
11697} 12671}
11698 12672
12673func (s *HeadObjectInput) getSSECustomerKey() (v string) {
12674 if s.SSECustomerKey == nil {
12675 return v
12676 }
12677 return *s.SSECustomerKey
12678}
12679
11699// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. 12680// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
11700func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { 12681func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput {
11701 s.SSECustomerKeyMD5 = &v 12682 s.SSECustomerKeyMD5 = &v
@@ -11708,7 +12689,6 @@ func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput {
11708 return s 12689 return s
11709} 12690}
11710 12691
11711// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput
11712type HeadObjectOutput struct { 12692type HeadObjectOutput struct {
11713 _ struct{} `type:"structure"` 12693 _ struct{} `type:"structure"`
11714 12694
@@ -11752,7 +12732,7 @@ type HeadObjectOutput struct {
11752 Expires *string `location:"header" locationName:"Expires" type:"string"` 12732 Expires *string `location:"header" locationName:"Expires" type:"string"`
11753 12733
11754 // Last modified date of the object 12734 // Last modified date of the object
11755 LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` 12735 LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
11756 12736
11757 // A map of metadata to store with the object in S3. 12737 // A map of metadata to store with the object in S3.
11758 Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` 12738 Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
@@ -11965,7 +12945,6 @@ func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutpu
11965 return s 12945 return s
11966} 12946}
11967 12947
11968// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument
11969type IndexDocument struct { 12948type IndexDocument struct {
11970 _ struct{} `type:"structure"` 12949 _ struct{} `type:"structure"`
11971 12950
@@ -12007,7 +12986,6 @@ func (s *IndexDocument) SetSuffix(v string) *IndexDocument {
12007 return s 12986 return s
12008} 12987}
12009 12988
12010// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator
12011type Initiator struct { 12989type Initiator struct {
12012 _ struct{} `type:"structure"` 12990 _ struct{} `type:"structure"`
12013 12991
@@ -12041,7 +13019,49 @@ func (s *Initiator) SetID(v string) *Initiator {
12041 return s 13019 return s
12042} 13020}
12043 13021
12044// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration 13022// Describes the serialization format of the object.
13023type InputSerialization struct {
13024 _ struct{} `type:"structure"`
13025
13026 // Describes the serialization of a CSV-encoded object.
13027 CSV *CSVInput `type:"structure"`
13028
13029 // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default
13030 // Value: NONE.
13031 CompressionType *string `type:"string" enum:"CompressionType"`
13032
13033 // Specifies JSON as object's input serialization format.
13034 JSON *JSONInput `type:"structure"`
13035}
13036
13037// String returns the string representation
13038func (s InputSerialization) String() string {
13039 return awsutil.Prettify(s)
13040}
13041
13042// GoString returns the string representation
13043func (s InputSerialization) GoString() string {
13044 return s.String()
13045}
13046
13047// SetCSV sets the CSV field's value.
13048func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization {
13049 s.CSV = v
13050 return s
13051}
13052
13053// SetCompressionType sets the CompressionType field's value.
13054func (s *InputSerialization) SetCompressionType(v string) *InputSerialization {
13055 s.CompressionType = &v
13056 return s
13057}
13058
13059// SetJSON sets the JSON field's value.
13060func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization {
13061 s.JSON = v
13062 return s
13063}
13064
12045type InventoryConfiguration struct { 13065type InventoryConfiguration struct {
12046 _ struct{} `type:"structure"` 13066 _ struct{} `type:"structure"`
12047 13067
@@ -12170,7 +13190,6 @@ func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryCon
12170 return s 13190 return s
12171} 13191}
12172 13192
12173// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination
12174type InventoryDestination struct { 13193type InventoryDestination struct {
12175 _ struct{} `type:"structure"` 13194 _ struct{} `type:"structure"`
12176 13195
@@ -12215,7 +13234,55 @@ func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestin
12215 return s 13234 return s
12216} 13235}
12217 13236
12218// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter 13237// Contains the type of server-side encryption used to encrypt the inventory
13238// results.
13239type InventoryEncryption struct {
13240 _ struct{} `type:"structure"`
13241
13242 // Specifies the use of SSE-KMS to encrypt delievered Inventory reports.
13243 SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"`
13244
13245 // Specifies the use of SSE-S3 to encrypt delievered Inventory reports.
13246 SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"`
13247}
13248
13249// String returns the string representation
13250func (s InventoryEncryption) String() string {
13251 return awsutil.Prettify(s)
13252}
13253
13254// GoString returns the string representation
13255func (s InventoryEncryption) GoString() string {
13256 return s.String()
13257}
13258
13259// Validate inspects the fields of the type to determine if they are valid.
13260func (s *InventoryEncryption) Validate() error {
13261 invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"}
13262 if s.SSEKMS != nil {
13263 if err := s.SSEKMS.Validate(); err != nil {
13264 invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams))
13265 }
13266 }
13267
13268 if invalidParams.Len() > 0 {
13269 return invalidParams
13270 }
13271 return nil
13272}
13273
13274// SetSSEKMS sets the SSEKMS field's value.
13275func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption {
13276 s.SSEKMS = v
13277 return s
13278}
13279
13280// SetSSES3 sets the SSES3 field's value.
13281func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption {
13282 s.SSES3 = v
13283 return s
13284}
13285
12219type InventoryFilter struct { 13286type InventoryFilter struct {
12220 _ struct{} `type:"structure"` 13287 _ struct{} `type:"structure"`
12221 13288
@@ -12254,7 +13321,6 @@ func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter {
12254 return s 13321 return s
12255} 13322}
12256 13323
12257// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination
12258type InventoryS3BucketDestination struct { 13324type InventoryS3BucketDestination struct {
12259 _ struct{} `type:"structure"` 13325 _ struct{} `type:"structure"`
12260 13326
@@ -12267,6 +13333,10 @@ type InventoryS3BucketDestination struct {
12267 // Bucket is a required field 13333 // Bucket is a required field
12268 Bucket *string `type:"string" required:"true"` 13334 Bucket *string `type:"string" required:"true"`
12269 13335
13336 // Contains the type of server-side encryption used to encrypt the inventory
13337 // results.
13338 Encryption *InventoryEncryption `type:"structure"`
13339
12270 // Specifies the output format of the inventory results. 13340 // Specifies the output format of the inventory results.
12271 // 13341 //
12272 // Format is a required field 13342 // Format is a required field
@@ -12295,6 +13365,11 @@ func (s *InventoryS3BucketDestination) Validate() error {
12295 if s.Format == nil { 13365 if s.Format == nil {
12296 invalidParams.Add(request.NewErrParamRequired("Format")) 13366 invalidParams.Add(request.NewErrParamRequired("Format"))
12297 } 13367 }
13368 if s.Encryption != nil {
13369 if err := s.Encryption.Validate(); err != nil {
13370 invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams))
13371 }
13372 }
12298 13373
12299 if invalidParams.Len() > 0 { 13374 if invalidParams.Len() > 0 {
12300 return invalidParams 13375 return invalidParams
@@ -12314,6 +13389,19 @@ func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDes
12314 return s 13389 return s
12315} 13390}
12316 13391
13392func (s *InventoryS3BucketDestination) getBucket() (v string) {
13393 if s.Bucket == nil {
13394 return v
13395 }
13396 return *s.Bucket
13397}
13398
13399// SetEncryption sets the Encryption field's value.
13400func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination {
13401 s.Encryption = v
13402 return s
13403}
13404
12317// SetFormat sets the Format field's value. 13405// SetFormat sets the Format field's value.
12318func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { 13406func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination {
12319 s.Format = &v 13407 s.Format = &v
@@ -12326,7 +13414,6 @@ func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDes
12326 return s 13414 return s
12327} 13415}
12328 13416
12329// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule
12330type InventorySchedule struct { 13417type InventorySchedule struct {
12331 _ struct{} `type:"structure"` 13418 _ struct{} `type:"structure"`
12332 13419
@@ -12365,8 +13452,53 @@ func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule {
12365 return s 13452 return s
12366} 13453}
12367 13454
13455type JSONInput struct {
13456 _ struct{} `type:"structure"`
13457
13458 // The type of JSON. Valid values: Document, Lines.
13459 Type *string `type:"string" enum:"JSONType"`
13460}
13461
13462// String returns the string representation
13463func (s JSONInput) String() string {
13464 return awsutil.Prettify(s)
13465}
13466
13467// GoString returns the string representation
13468func (s JSONInput) GoString() string {
13469 return s.String()
13470}
13471
13472// SetType sets the Type field's value.
13473func (s *JSONInput) SetType(v string) *JSONInput {
13474 s.Type = &v
13475 return s
13476}
13477
13478type JSONOutput struct {
13479 _ struct{} `type:"structure"`
13480
13481 // The value used to separate individual records in the output.
13482 RecordDelimiter *string `type:"string"`
13483}
13484
13485// String returns the string representation
13486func (s JSONOutput) String() string {
13487 return awsutil.Prettify(s)
13488}
13489
13490// GoString returns the string representation
13491func (s JSONOutput) GoString() string {
13492 return s.String()
13493}
13494
13495// SetRecordDelimiter sets the RecordDelimiter field's value.
13496func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput {
13497 s.RecordDelimiter = &v
13498 return s
13499}
13500
12368// Container for object key name prefix and suffix filtering rules. 13501// Container for object key name prefix and suffix filtering rules.
12369// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter
12370type KeyFilter struct { 13502type KeyFilter struct {
12371 _ struct{} `type:"structure"` 13503 _ struct{} `type:"structure"`
12372 13504
@@ -12392,7 +13524,6 @@ func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter {
12392} 13524}
12393 13525
12394// Container for specifying the AWS Lambda notification configuration. 13526// Container for specifying the AWS Lambda notification configuration.
12395// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration
12396type LambdaFunctionConfiguration struct { 13527type LambdaFunctionConfiguration struct {
12397 _ struct{} `type:"structure"` 13528 _ struct{} `type:"structure"`
12398 13529
@@ -12401,6 +13532,7 @@ type LambdaFunctionConfiguration struct {
12401 13532
12402 // Container for object key name filtering rules. For information about key 13533 // Container for object key name filtering rules. For information about key
12403 // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) 13534 // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
13535 // in the Amazon Simple Storage Service Developer Guide.
12404 Filter *NotificationConfigurationFilter `type:"structure"` 13536 Filter *NotificationConfigurationFilter `type:"structure"`
12405 13537
12406 // Optional unique identifier for configurations in a notification configuration. 13538 // Optional unique identifier for configurations in a notification configuration.
@@ -12464,7 +13596,6 @@ func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunc
12464 return s 13596 return s
12465} 13597}
12466 13598
12467// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration
12468type LifecycleConfiguration struct { 13599type LifecycleConfiguration struct {
12469 _ struct{} `type:"structure"` 13600 _ struct{} `type:"structure"`
12470 13601
@@ -12511,7 +13642,6 @@ func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration {
12511 return s 13642 return s
12512} 13643}
12513 13644
12514// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration
12515type LifecycleExpiration struct { 13645type LifecycleExpiration struct {
12516 _ struct{} `type:"structure"` 13646 _ struct{} `type:"structure"`
12517 13647
@@ -12558,7 +13688,6 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp
12558 return s 13688 return s
12559} 13689}
12560 13690
12561// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule
12562type LifecycleRule struct { 13691type LifecycleRule struct {
12563 _ struct{} `type:"structure"` 13692 _ struct{} `type:"structure"`
12564 13693
@@ -12682,7 +13811,6 @@ func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule {
12682// This is used in a Lifecycle Rule Filter to apply a logical AND to two or 13811// This is used in a Lifecycle Rule Filter to apply a logical AND to two or
12683// more predicates. The Lifecycle Rule will apply to any object matching all 13812// more predicates. The Lifecycle Rule will apply to any object matching all
12684// of the predicates configured inside the And operator. 13813// of the predicates configured inside the And operator.
12685// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator
12686type LifecycleRuleAndOperator struct { 13814type LifecycleRuleAndOperator struct {
12687 _ struct{} `type:"structure"` 13815 _ struct{} `type:"structure"`
12688 13816
@@ -12737,7 +13865,6 @@ func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator {
12737 13865
12738// The Filter is used to identify objects that a Lifecycle Rule applies to. 13866// The Filter is used to identify objects that a Lifecycle Rule applies to.
12739// A Filter must have exactly one of Prefix, Tag, or And specified. 13867// A Filter must have exactly one of Prefix, Tag, or And specified.
12740// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter
12741type LifecycleRuleFilter struct { 13868type LifecycleRuleFilter struct {
12742 _ struct{} `type:"structure"` 13869 _ struct{} `type:"structure"`
12743 13870
@@ -12801,7 +13928,6 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter {
12801 return s 13928 return s
12802} 13929}
12803 13930
12804// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest
12805type ListBucketAnalyticsConfigurationsInput struct { 13931type ListBucketAnalyticsConfigurationsInput struct {
12806 _ struct{} `type:"structure"` 13932 _ struct{} `type:"structure"`
12807 13933
@@ -12844,13 +13970,19 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucket
12844 return s 13970 return s
12845} 13971}
12846 13972
13973func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) {
13974 if s.Bucket == nil {
13975 return v
13976 }
13977 return *s.Bucket
13978}
13979
12847// SetContinuationToken sets the ContinuationToken field's value. 13980// SetContinuationToken sets the ContinuationToken field's value.
12848func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { 13981func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput {
12849 s.ContinuationToken = &v 13982 s.ContinuationToken = &v
12850 return s 13983 return s
12851} 13984}
12852 13985
12853// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput
12854type ListBucketAnalyticsConfigurationsOutput struct { 13986type ListBucketAnalyticsConfigurationsOutput struct {
12855 _ struct{} `type:"structure"` 13987 _ struct{} `type:"structure"`
12856 13988
@@ -12905,7 +14037,6 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str
12905 return s 14037 return s
12906} 14038}
12907 14039
12908// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest
12909type ListBucketInventoryConfigurationsInput struct { 14040type ListBucketInventoryConfigurationsInput struct {
12910 _ struct{} `type:"structure"` 14041 _ struct{} `type:"structure"`
12911 14042
@@ -12950,13 +14081,19 @@ func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucket
12950 return s 14081 return s
12951} 14082}
12952 14083
14084func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) {
14085 if s.Bucket == nil {
14086 return v
14087 }
14088 return *s.Bucket
14089}
14090
12953// SetContinuationToken sets the ContinuationToken field's value. 14091// SetContinuationToken sets the ContinuationToken field's value.
12954func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { 14092func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput {
12955 s.ContinuationToken = &v 14093 s.ContinuationToken = &v
12956 return s 14094 return s
12957} 14095}
12958 14096
12959// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput
12960type ListBucketInventoryConfigurationsOutput struct { 14097type ListBucketInventoryConfigurationsOutput struct {
12961 _ struct{} `type:"structure"` 14098 _ struct{} `type:"structure"`
12962 14099
@@ -13011,7 +14148,6 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str
13011 return s 14148 return s
13012} 14149}
13013 14150
13014// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest
13015type ListBucketMetricsConfigurationsInput struct { 14151type ListBucketMetricsConfigurationsInput struct {
13016 _ struct{} `type:"structure"` 14152 _ struct{} `type:"structure"`
13017 14153
@@ -13056,13 +14192,19 @@ func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMe
13056 return s 14192 return s
13057} 14193}
13058 14194
14195func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) {
14196 if s.Bucket == nil {
14197 return v
14198 }
14199 return *s.Bucket
14200}
14201
13059// SetContinuationToken sets the ContinuationToken field's value. 14202// SetContinuationToken sets the ContinuationToken field's value.
13060func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { 14203func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput {
13061 s.ContinuationToken = &v 14204 s.ContinuationToken = &v
13062 return s 14205 return s
13063} 14206}
13064 14207
13065// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput
13066type ListBucketMetricsConfigurationsOutput struct { 14208type ListBucketMetricsConfigurationsOutput struct {
13067 _ struct{} `type:"structure"` 14209 _ struct{} `type:"structure"`
13068 14210
@@ -13119,7 +14261,6 @@ func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v strin
13119 return s 14261 return s
13120} 14262}
13121 14263
13122// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput
13123type ListBucketsInput struct { 14264type ListBucketsInput struct {
13124 _ struct{} `type:"structure"` 14265 _ struct{} `type:"structure"`
13125} 14266}
@@ -13134,7 +14275,6 @@ func (s ListBucketsInput) GoString() string {
13134 return s.String() 14275 return s.String()
13135} 14276}
13136 14277
13137// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput
13138type ListBucketsOutput struct { 14278type ListBucketsOutput struct {
13139 _ struct{} `type:"structure"` 14279 _ struct{} `type:"structure"`
13140 14280
@@ -13165,7 +14305,6 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
13165 return s 14305 return s
13166} 14306}
13167 14307
13168// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest
13169type ListMultipartUploadsInput struct { 14308type ListMultipartUploadsInput struct {
13170 _ struct{} `type:"structure"` 14309 _ struct{} `type:"structure"`
13171 14310
@@ -13231,6 +14370,13 @@ func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInp
13231 return s 14370 return s
13232} 14371}
13233 14372
14373func (s *ListMultipartUploadsInput) getBucket() (v string) {
14374 if s.Bucket == nil {
14375 return v
14376 }
14377 return *s.Bucket
14378}
14379
13234// SetDelimiter sets the Delimiter field's value. 14380// SetDelimiter sets the Delimiter field's value.
13235func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { 14381func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput {
13236 s.Delimiter = &v 14382 s.Delimiter = &v
@@ -13267,7 +14413,6 @@ func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUp
13267 return s 14413 return s
13268} 14414}
13269 14415
13270// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput
13271type ListMultipartUploadsOutput struct { 14416type ListMultipartUploadsOutput struct {
13272 _ struct{} `type:"structure"` 14417 _ struct{} `type:"structure"`
13273 14418
@@ -13328,6 +14473,13 @@ func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOu
13328 return s 14473 return s
13329} 14474}
13330 14475
14476func (s *ListMultipartUploadsOutput) getBucket() (v string) {
14477 if s.Bucket == nil {
14478 return v
14479 }
14480 return *s.Bucket
14481}
14482
13331// SetCommonPrefixes sets the CommonPrefixes field's value. 14483// SetCommonPrefixes sets the CommonPrefixes field's value.
13332func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { 14484func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput {
13333 s.CommonPrefixes = v 14485 s.CommonPrefixes = v
@@ -13394,7 +14546,6 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti
13394 return s 14546 return s
13395} 14547}
13396 14548
13397// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest
13398type ListObjectVersionsInput struct { 14549type ListObjectVersionsInput struct {
13399 _ struct{} `type:"structure"` 14550 _ struct{} `type:"structure"`
13400 14551
@@ -13455,6 +14606,13 @@ func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput {
13455 return s 14606 return s
13456} 14607}
13457 14608
14609func (s *ListObjectVersionsInput) getBucket() (v string) {
14610 if s.Bucket == nil {
14611 return v
14612 }
14613 return *s.Bucket
14614}
14615
13458// SetDelimiter sets the Delimiter field's value. 14616// SetDelimiter sets the Delimiter field's value.
13459func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { 14617func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput {
13460 s.Delimiter = &v 14618 s.Delimiter = &v
@@ -13491,7 +14649,6 @@ func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersio
13491 return s 14649 return s
13492} 14650}
13493 14651
13494// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput
13495type ListObjectVersionsOutput struct { 14652type ListObjectVersionsOutput struct {
13496 _ struct{} `type:"structure"` 14653 _ struct{} `type:"structure"`
13497 14654
@@ -13619,7 +14776,6 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe
13619 return s 14776 return s
13620} 14777}
13621 14778
13622// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest
13623type ListObjectsInput struct { 14779type ListObjectsInput struct {
13624 _ struct{} `type:"structure"` 14780 _ struct{} `type:"structure"`
13625 14781
@@ -13682,6 +14838,13 @@ func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput {
13682 return s 14838 return s
13683} 14839}
13684 14840
14841func (s *ListObjectsInput) getBucket() (v string) {
14842 if s.Bucket == nil {
14843 return v
14844 }
14845 return *s.Bucket
14846}
14847
13685// SetDelimiter sets the Delimiter field's value. 14848// SetDelimiter sets the Delimiter field's value.
13686func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { 14849func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput {
13687 s.Delimiter = &v 14850 s.Delimiter = &v
@@ -13718,7 +14881,6 @@ func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput {
13718 return s 14881 return s
13719} 14882}
13720 14883
13721// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput
13722type ListObjectsOutput struct { 14884type ListObjectsOutput struct {
13723 _ struct{} `type:"structure"` 14885 _ struct{} `type:"structure"`
13724 14886
@@ -13823,7 +14985,6 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput {
13823 return s 14985 return s
13824} 14986}
13825 14987
13826// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request
13827type ListObjectsV2Input struct { 14988type ListObjectsV2Input struct {
13828 _ struct{} `type:"structure"` 14989 _ struct{} `type:"structure"`
13829 14990
@@ -13894,6 +15055,13 @@ func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input {
13894 return s 15055 return s
13895} 15056}
13896 15057
15058func (s *ListObjectsV2Input) getBucket() (v string) {
15059 if s.Bucket == nil {
15060 return v
15061 }
15062 return *s.Bucket
15063}
15064
13897// SetContinuationToken sets the ContinuationToken field's value. 15065// SetContinuationToken sets the ContinuationToken field's value.
13898func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { 15066func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input {
13899 s.ContinuationToken = &v 15067 s.ContinuationToken = &v
@@ -13942,7 +15110,6 @@ func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input {
13942 return s 15110 return s
13943} 15111}
13944 15112
13945// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output
13946type ListObjectsV2Output struct { 15113type ListObjectsV2Output struct {
13947 _ struct{} `type:"structure"` 15114 _ struct{} `type:"structure"`
13948 15115
@@ -14076,7 +15243,6 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output {
14076 return s 15243 return s
14077} 15244}
14078 15245
14079// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest
14080type ListPartsInput struct { 15246type ListPartsInput struct {
14081 _ struct{} `type:"structure"` 15247 _ struct{} `type:"structure"`
14082 15248
@@ -14143,6 +15309,13 @@ func (s *ListPartsInput) SetBucket(v string) *ListPartsInput {
14143 return s 15309 return s
14144} 15310}
14145 15311
15312func (s *ListPartsInput) getBucket() (v string) {
15313 if s.Bucket == nil {
15314 return v
15315 }
15316 return *s.Bucket
15317}
15318
14146// SetKey sets the Key field's value. 15319// SetKey sets the Key field's value.
14147func (s *ListPartsInput) SetKey(v string) *ListPartsInput { 15320func (s *ListPartsInput) SetKey(v string) *ListPartsInput {
14148 s.Key = &v 15321 s.Key = &v
@@ -14173,12 +15346,11 @@ func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput {
14173 return s 15346 return s
14174} 15347}
14175 15348
14176// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput
14177type ListPartsOutput struct { 15349type ListPartsOutput struct {
14178 _ struct{} `type:"structure"` 15350 _ struct{} `type:"structure"`
14179 15351
14180 // Date when multipart upload will become eligible for abort operation by lifecycle. 15352 // Date when multipart upload will become eligible for abort operation by lifecycle.
14181 AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` 15353 AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
14182 15354
14183 // Id of the lifecycle rule that makes a multipart upload eligible for abort 15355 // Id of the lifecycle rule that makes a multipart upload eligible for abort
14184 // operation. 15356 // operation.
@@ -14250,6 +15422,13 @@ func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput {
14250 return s 15422 return s
14251} 15423}
14252 15424
15425func (s *ListPartsOutput) getBucket() (v string) {
15426 if s.Bucket == nil {
15427 return v
15428 }
15429 return *s.Bucket
15430}
15431
14253// SetInitiator sets the Initiator field's value. 15432// SetInitiator sets the Initiator field's value.
14254func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { 15433func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput {
14255 s.Initiator = v 15434 s.Initiator = v
@@ -14316,7 +15495,137 @@ func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput {
14316 return s 15495 return s
14317} 15496}
14318 15497
14319// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled 15498// Describes an S3 location that will receive the results of the restore request.
15499type Location struct {
15500 _ struct{} `type:"structure"`
15501
15502 // A list of grants that control access to the staged results.
15503 AccessControlList []*Grant `locationNameList:"Grant" type:"list"`
15504
15505 // The name of the bucket where the restore results will be placed.
15506 //
15507 // BucketName is a required field
15508 BucketName *string `type:"string" required:"true"`
15509
15510 // The canned ACL to apply to the restore results.
15511 CannedACL *string `type:"string" enum:"ObjectCannedACL"`
15512
15513 // Describes the server-side encryption that will be applied to the restore
15514 // results.
15515 Encryption *Encryption `type:"structure"`
15516
15517 // The prefix that is prepended to the restore results for this request.
15518 //
15519 // Prefix is a required field
15520 Prefix *string `type:"string" required:"true"`
15521
15522 // The class of storage used to store the restore results.
15523 StorageClass *string `type:"string" enum:"StorageClass"`
15524
15525 // The tag-set that is applied to the restore results.
15526 Tagging *Tagging `type:"structure"`
15527
15528 // A list of metadata to store with the restore results in S3.
15529 UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"`
15530}
15531
15532// String returns the string representation
15533func (s Location) String() string {
15534 return awsutil.Prettify(s)
15535}
15536
15537// GoString returns the string representation
15538func (s Location) GoString() string {
15539 return s.String()
15540}
15541
15542// Validate inspects the fields of the type to determine if they are valid.
15543func (s *Location) Validate() error {
15544 invalidParams := request.ErrInvalidParams{Context: "Location"}
15545 if s.BucketName == nil {
15546 invalidParams.Add(request.NewErrParamRequired("BucketName"))
15547 }
15548 if s.Prefix == nil {
15549 invalidParams.Add(request.NewErrParamRequired("Prefix"))
15550 }
15551 if s.AccessControlList != nil {
15552 for i, v := range s.AccessControlList {
15553 if v == nil {
15554 continue
15555 }
15556 if err := v.Validate(); err != nil {
15557 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams))
15558 }
15559 }
15560 }
15561 if s.Encryption != nil {
15562 if err := s.Encryption.Validate(); err != nil {
15563 invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams))
15564 }
15565 }
15566 if s.Tagging != nil {
15567 if err := s.Tagging.Validate(); err != nil {
15568 invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
15569 }
15570 }
15571
15572 if invalidParams.Len() > 0 {
15573 return invalidParams
15574 }
15575 return nil
15576}
15577
15578// SetAccessControlList sets the AccessControlList field's value.
15579func (s *Location) SetAccessControlList(v []*Grant) *Location {
15580 s.AccessControlList = v
15581 return s
15582}
15583
15584// SetBucketName sets the BucketName field's value.
15585func (s *Location) SetBucketName(v string) *Location {
15586 s.BucketName = &v
15587 return s
15588}
15589
15590// SetCannedACL sets the CannedACL field's value.
15591func (s *Location) SetCannedACL(v string) *Location {
15592 s.CannedACL = &v
15593 return s
15594}
15595
15596// SetEncryption sets the Encryption field's value.
15597func (s *Location) SetEncryption(v *Encryption) *Location {
15598 s.Encryption = v
15599 return s
15600}
15601
15602// SetPrefix sets the Prefix field's value.
15603func (s *Location) SetPrefix(v string) *Location {
15604 s.Prefix = &v
15605 return s
15606}
15607
15608// SetStorageClass sets the StorageClass field's value.
15609func (s *Location) SetStorageClass(v string) *Location {
15610 s.StorageClass = &v
15611 return s
15612}
15613
15614// SetTagging sets the Tagging field's value.
15615func (s *Location) SetTagging(v *Tagging) *Location {
15616 s.Tagging = v
15617 return s
15618}
15619
15620// SetUserMetadata sets the UserMetadata field's value.
15621func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location {
15622 s.UserMetadata = v
15623 return s
15624}
15625
15626// Container for logging information. Presence of this element indicates that
15627// logging is enabled. Parameters TargetBucket and TargetPrefix are required
15628// in this case.
14320type LoggingEnabled struct { 15629type LoggingEnabled struct {
14321 _ struct{} `type:"structure"` 15630 _ struct{} `type:"structure"`
14322 15631
@@ -14326,13 +15635,17 @@ type LoggingEnabled struct {
14326 // to deliver their logs to the same target bucket. In this case you should 15635 // to deliver their logs to the same target bucket. In this case you should
14327 // choose a different TargetPrefix for each source bucket so that the delivered 15636 // choose a different TargetPrefix for each source bucket so that the delivered
14328 // log files can be distinguished by key. 15637 // log files can be distinguished by key.
14329 TargetBucket *string `type:"string"` 15638 //
15639 // TargetBucket is a required field
15640 TargetBucket *string `type:"string" required:"true"`
14330 15641
14331 TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` 15642 TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
14332 15643
14333 // This element lets you specify a prefix for the keys that the log files will 15644 // This element lets you specify a prefix for the keys that the log files will
14334 // be stored under. 15645 // be stored under.
14335 TargetPrefix *string `type:"string"` 15646 //
15647 // TargetPrefix is a required field
15648 TargetPrefix *string `type:"string" required:"true"`
14336} 15649}
14337 15650
14338// String returns the string representation 15651// String returns the string representation
@@ -14348,6 +15661,12 @@ func (s LoggingEnabled) GoString() string {
14348// Validate inspects the fields of the type to determine if they are valid. 15661// Validate inspects the fields of the type to determine if they are valid.
14349func (s *LoggingEnabled) Validate() error { 15662func (s *LoggingEnabled) Validate() error {
14350 invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} 15663 invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"}
15664 if s.TargetBucket == nil {
15665 invalidParams.Add(request.NewErrParamRequired("TargetBucket"))
15666 }
15667 if s.TargetPrefix == nil {
15668 invalidParams.Add(request.NewErrParamRequired("TargetPrefix"))
15669 }
14351 if s.TargetGrants != nil { 15670 if s.TargetGrants != nil {
14352 for i, v := range s.TargetGrants { 15671 for i, v := range s.TargetGrants {
14353 if v == nil { 15672 if v == nil {
@@ -14383,7 +15702,37 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled {
14383 return s 15702 return s
14384} 15703}
14385 15704
14386// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator 15705// A metadata key-value pair to store with an object.
15706type MetadataEntry struct {
15707 _ struct{} `type:"structure"`
15708
15709 Name *string `type:"string"`
15710
15711 Value *string `type:"string"`
15712}
15713
15714// String returns the string representation
15715func (s MetadataEntry) String() string {
15716 return awsutil.Prettify(s)
15717}
15718
15719// GoString returns the string representation
15720func (s MetadataEntry) GoString() string {
15721 return s.String()
15722}
15723
15724// SetName sets the Name field's value.
15725func (s *MetadataEntry) SetName(v string) *MetadataEntry {
15726 s.Name = &v
15727 return s
15728}
15729
15730// SetValue sets the Value field's value.
15731func (s *MetadataEntry) SetValue(v string) *MetadataEntry {
15732 s.Value = &v
15733 return s
15734}
15735
14387type MetricsAndOperator struct { 15736type MetricsAndOperator struct {
14388 _ struct{} `type:"structure"` 15737 _ struct{} `type:"structure"`
14389 15738
@@ -14436,7 +15785,6 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator {
14436 return s 15785 return s
14437} 15786}
14438 15787
14439// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration
14440type MetricsConfiguration struct { 15788type MetricsConfiguration struct {
14441 _ struct{} `type:"structure"` 15789 _ struct{} `type:"structure"`
14442 15790
@@ -14491,7 +15839,6 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration {
14491 return s 15839 return s
14492} 15840}
14493 15841
14494// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter
14495type MetricsFilter struct { 15842type MetricsFilter struct {
14496 _ struct{} `type:"structure"` 15843 _ struct{} `type:"structure"`
14497 15844
@@ -14555,12 +15902,11 @@ func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter {
14555 return s 15902 return s
14556} 15903}
14557 15904
14558// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload
14559type MultipartUpload struct { 15905type MultipartUpload struct {
14560 _ struct{} `type:"structure"` 15906 _ struct{} `type:"structure"`
14561 15907
14562 // Date and time at which the multipart upload was initiated. 15908 // Date and time at which the multipart upload was initiated.
14563 Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` 15909 Initiated *time.Time `type:"timestamp"`
14564 15910
14565 // Identifies who initiated the multipart upload. 15911 // Identifies who initiated the multipart upload.
14566 Initiator *Initiator `type:"structure"` 15912 Initiator *Initiator `type:"structure"`
@@ -14628,14 +15974,14 @@ func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload {
14628// configuration action on a bucket that has versioning enabled (or suspended) 15974// configuration action on a bucket that has versioning enabled (or suspended)
14629// to request that Amazon S3 delete noncurrent object versions at a specific 15975// to request that Amazon S3 delete noncurrent object versions at a specific
14630// period in the object's lifetime. 15976// period in the object's lifetime.
14631// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration
14632type NoncurrentVersionExpiration struct { 15977type NoncurrentVersionExpiration struct {
14633 _ struct{} `type:"structure"` 15978 _ struct{} `type:"structure"`
14634 15979
14635 // Specifies the number of days an object is noncurrent before Amazon S3 can 15980 // Specifies the number of days an object is noncurrent before Amazon S3 can
14636 // perform the associated action. For information about the noncurrent days 15981 // perform the associated action. For information about the noncurrent days
14637 // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent 15982 // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
14638 // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) 15983 // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
15984 // the Amazon Simple Storage Service Developer Guide.
14639 NoncurrentDays *int64 `type:"integer"` 15985 NoncurrentDays *int64 `type:"integer"`
14640} 15986}
14641 15987
@@ -14656,18 +16002,19 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers
14656} 16002}
14657 16003
14658// Container for the transition rule that describes when noncurrent objects 16004// Container for the transition rule that describes when noncurrent objects
14659// transition to the STANDARD_IA or GLACIER storage class. If your bucket is 16005// transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your
14660// versioning-enabled (or versioning is suspended), you can set this action 16006// bucket is versioning-enabled (or versioning is suspended), you can set this
14661// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA 16007// action to request that Amazon S3 transition noncurrent object versions to
14662// or GLACIER storage class at a specific period in the object's lifetime. 16008// the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period
14663// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition 16009// in the object's lifetime.
14664type NoncurrentVersionTransition struct { 16010type NoncurrentVersionTransition struct {
14665 _ struct{} `type:"structure"` 16011 _ struct{} `type:"structure"`
14666 16012
14667 // Specifies the number of days an object is noncurrent before Amazon S3 can 16013 // Specifies the number of days an object is noncurrent before Amazon S3 can
14668 // perform the associated action. For information about the noncurrent days 16014 // perform the associated action. For information about the noncurrent days
14669 // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent 16015 // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
14670 // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) 16016 // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
16017 // the Amazon Simple Storage Service Developer Guide.
14671 NoncurrentDays *int64 `type:"integer"` 16018 NoncurrentDays *int64 `type:"integer"`
14672 16019
14673 // The class of storage used to store the object. 16020 // The class of storage used to store the object.
@@ -14698,7 +16045,6 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi
14698 16045
14699// Container for specifying the notification configuration of the bucket. If 16046// Container for specifying the notification configuration of the bucket. If
14700// this element is empty, notifications are turned off on the bucket. 16047// this element is empty, notifications are turned off on the bucket.
14701// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration
14702type NotificationConfiguration struct { 16048type NotificationConfiguration struct {
14703 _ struct{} `type:"structure"` 16049 _ struct{} `type:"structure"`
14704 16050
@@ -14777,7 +16123,6 @@ func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfigurati
14777 return s 16123 return s
14778} 16124}
14779 16125
14780// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated
14781type NotificationConfigurationDeprecated struct { 16126type NotificationConfigurationDeprecated struct {
14782 _ struct{} `type:"structure"` 16127 _ struct{} `type:"structure"`
14783 16128
@@ -14818,7 +16163,7 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf
14818 16163
14819// Container for object key name filtering rules. For information about key 16164// Container for object key name filtering rules. For information about key
14820// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) 16165// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
14821// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter 16166// in the Amazon Simple Storage Service Developer Guide.
14822type NotificationConfigurationFilter struct { 16167type NotificationConfigurationFilter struct {
14823 _ struct{} `type:"structure"` 16168 _ struct{} `type:"structure"`
14824 16169
@@ -14842,7 +16187,6 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf
14842 return s 16187 return s
14843} 16188}
14844 16189
14845// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object
14846type Object struct { 16190type Object struct {
14847 _ struct{} `type:"structure"` 16191 _ struct{} `type:"structure"`
14848 16192
@@ -14850,7 +16194,7 @@ type Object struct {
14850 16194
14851 Key *string `min:"1" type:"string"` 16195 Key *string `min:"1" type:"string"`
14852 16196
14853 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` 16197 LastModified *time.Time `type:"timestamp"`
14854 16198
14855 Owner *Owner `type:"structure"` 16199 Owner *Owner `type:"structure"`
14856 16200
@@ -14906,7 +16250,6 @@ func (s *Object) SetStorageClass(v string) *Object {
14906 return s 16250 return s
14907} 16251}
14908 16252
14909// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier
14910type ObjectIdentifier struct { 16253type ObjectIdentifier struct {
14911 _ struct{} `type:"structure"` 16254 _ struct{} `type:"structure"`
14912 16255
@@ -14957,7 +16300,6 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier {
14957 return s 16300 return s
14958} 16301}
14959 16302
14960// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion
14961type ObjectVersion struct { 16303type ObjectVersion struct {
14962 _ struct{} `type:"structure"` 16304 _ struct{} `type:"structure"`
14963 16305
@@ -14971,7 +16313,7 @@ type ObjectVersion struct {
14971 Key *string `min:"1" type:"string"` 16313 Key *string `min:"1" type:"string"`
14972 16314
14973 // Date and time the object was last modified. 16315 // Date and time the object was last modified.
14974 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` 16316 LastModified *time.Time `type:"timestamp"`
14975 16317
14976 Owner *Owner `type:"structure"` 16318 Owner *Owner `type:"structure"`
14977 16319
@@ -15043,7 +16385,78 @@ func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion {
15043 return s 16385 return s
15044} 16386}
15045 16387
15046// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner 16388// Describes the location where the restore job's output is stored.
16389type OutputLocation struct {
16390 _ struct{} `type:"structure"`
16391
16392 // Describes an S3 location that will receive the results of the restore request.
16393 S3 *Location `type:"structure"`
16394}
16395
16396// String returns the string representation
16397func (s OutputLocation) String() string {
16398 return awsutil.Prettify(s)
16399}
16400
16401// GoString returns the string representation
16402func (s OutputLocation) GoString() string {
16403 return s.String()
16404}
16405
16406// Validate inspects the fields of the type to determine if they are valid.
16407func (s *OutputLocation) Validate() error {
16408 invalidParams := request.ErrInvalidParams{Context: "OutputLocation"}
16409 if s.S3 != nil {
16410 if err := s.S3.Validate(); err != nil {
16411 invalidParams.AddNested("S3", err.(request.ErrInvalidParams))
16412 }
16413 }
16414
16415 if invalidParams.Len() > 0 {
16416 return invalidParams
16417 }
16418 return nil
16419}
16420
16421// SetS3 sets the S3 field's value.
16422func (s *OutputLocation) SetS3(v *Location) *OutputLocation {
16423 s.S3 = v
16424 return s
16425}
16426
16427// Describes how results of the Select job are serialized.
16428type OutputSerialization struct {
16429 _ struct{} `type:"structure"`
16430
16431 // Describes the serialization of CSV-encoded Select results.
16432 CSV *CSVOutput `type:"structure"`
16433
16434 // Specifies JSON as request's output serialization format.
16435 JSON *JSONOutput `type:"structure"`
16436}
16437
16438// String returns the string representation
16439func (s OutputSerialization) String() string {
16440 return awsutil.Prettify(s)
16441}
16442
16443// GoString returns the string representation
16444func (s OutputSerialization) GoString() string {
16445 return s.String()
16446}
16447
16448// SetCSV sets the CSV field's value.
16449func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization {
16450 s.CSV = v
16451 return s
16452}
16453
16454// SetJSON sets the JSON field's value.
16455func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization {
16456 s.JSON = v
16457 return s
16458}
16459
15047type Owner struct { 16460type Owner struct {
15048 _ struct{} `type:"structure"` 16461 _ struct{} `type:"structure"`
15049 16462
@@ -15074,7 +16487,6 @@ func (s *Owner) SetID(v string) *Owner {
15074 return s 16487 return s
15075} 16488}
15076 16489
15077// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part
15078type Part struct { 16490type Part struct {
15079 _ struct{} `type:"structure"` 16491 _ struct{} `type:"structure"`
15080 16492
@@ -15082,7 +16494,7 @@ type Part struct {
15082 ETag *string `type:"string"` 16494 ETag *string `type:"string"`
15083 16495
15084 // Date and time at which the part was uploaded. 16496 // Date and time at which the part was uploaded.
15085 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` 16497 LastModified *time.Time `type:"timestamp"`
15086 16498
15087 // Part number identifying the part. This is a positive integer between 1 and 16499 // Part number identifying the part. This is a positive integer between 1 and
15088 // 10,000. 16500 // 10,000.
@@ -15126,14 +16538,94 @@ func (s *Part) SetSize(v int64) *Part {
15126 return s 16538 return s
15127} 16539}
15128 16540
15129// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest 16541type Progress struct {
16542 _ struct{} `type:"structure"`
16543
16544 // Current number of uncompressed object bytes processed.
16545 BytesProcessed *int64 `type:"long"`
16546
16547 // Current number of bytes of records payload data returned.
16548 BytesReturned *int64 `type:"long"`
16549
16550 // Current number of object bytes scanned.
16551 BytesScanned *int64 `type:"long"`
16552}
16553
16554// String returns the string representation
16555func (s Progress) String() string {
16556 return awsutil.Prettify(s)
16557}
16558
16559// GoString returns the string representation
16560func (s Progress) GoString() string {
16561 return s.String()
16562}
16563
16564// SetBytesProcessed sets the BytesProcessed field's value.
16565func (s *Progress) SetBytesProcessed(v int64) *Progress {
16566 s.BytesProcessed = &v
16567 return s
16568}
16569
16570// SetBytesReturned sets the BytesReturned field's value.
16571func (s *Progress) SetBytesReturned(v int64) *Progress {
16572 s.BytesReturned = &v
16573 return s
16574}
16575
16576// SetBytesScanned sets the BytesScanned field's value.
16577func (s *Progress) SetBytesScanned(v int64) *Progress {
16578 s.BytesScanned = &v
16579 return s
16580}
16581
16582type ProgressEvent struct {
16583 _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"`
16584
16585 // The Progress event details.
16586 Details *Progress `locationName:"Details" type:"structure"`
16587}
16588
16589// String returns the string representation
16590func (s ProgressEvent) String() string {
16591 return awsutil.Prettify(s)
16592}
16593
16594// GoString returns the string representation
16595func (s ProgressEvent) GoString() string {
16596 return s.String()
16597}
16598
16599// SetDetails sets the Details field's value.
16600func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent {
16601 s.Details = v
16602 return s
16603}
16604
16605// The ProgressEvent is and event in the SelectObjectContentEventStream group of events.
16606func (s *ProgressEvent) eventSelectObjectContentEventStream() {}
16607
16608// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value.
16609// This method is only used internally within the SDK's EventStream handling.
16610func (s *ProgressEvent) UnmarshalEvent(
16611 payloadUnmarshaler protocol.PayloadUnmarshaler,
16612 msg eventstream.Message,
16613) error {
16614 if err := payloadUnmarshaler.UnmarshalPayload(
16615 bytes.NewReader(msg.Payload), s,
16616 ); err != nil {
16617 return err
16618 }
16619 return nil
16620}
16621
15130type PutBucketAccelerateConfigurationInput struct { 16622type PutBucketAccelerateConfigurationInput struct {
15131 _ struct{} `type:"structure" payload:"AccelerateConfiguration"` 16623 _ struct{} `type:"structure" payload:"AccelerateConfiguration"`
15132 16624
15133 // Specifies the Accelerate Configuration you want to set for the bucket. 16625 // Specifies the Accelerate Configuration you want to set for the bucket.
15134 // 16626 //
15135 // AccelerateConfiguration is a required field 16627 // AccelerateConfiguration is a required field
15136 AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` 16628 AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
15137 16629
15138 // Name of the bucket for which the accelerate configuration is set. 16630 // Name of the bucket for which the accelerate configuration is set.
15139 // 16631 //
@@ -15179,7 +16671,13 @@ func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAc
15179 return s 16671 return s
15180} 16672}
15181 16673
15182// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput 16674func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) {
16675 if s.Bucket == nil {
16676 return v
16677 }
16678 return *s.Bucket
16679}
16680
15183type PutBucketAccelerateConfigurationOutput struct { 16681type PutBucketAccelerateConfigurationOutput struct {
15184 _ struct{} `type:"structure"` 16682 _ struct{} `type:"structure"`
15185} 16683}
@@ -15194,14 +16692,13 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string {
15194 return s.String() 16692 return s.String()
15195} 16693}
15196 16694
15197// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest
15198type PutBucketAclInput struct { 16695type PutBucketAclInput struct {
15199 _ struct{} `type:"structure" payload:"AccessControlPolicy"` 16696 _ struct{} `type:"structure" payload:"AccessControlPolicy"`
15200 16697
15201 // The canned ACL to apply to the bucket. 16698 // The canned ACL to apply to the bucket.
15202 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` 16699 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
15203 16700
15204 AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` 16701 AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
15205 16702
15206 // Bucket is a required field 16703 // Bucket is a required field
15207 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 16704 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -15269,6 +16766,13 @@ func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput {
15269 return s 16766 return s
15270} 16767}
15271 16768
16769func (s *PutBucketAclInput) getBucket() (v string) {
16770 if s.Bucket == nil {
16771 return v
16772 }
16773 return *s.Bucket
16774}
16775
15272// SetGrantFullControl sets the GrantFullControl field's value. 16776// SetGrantFullControl sets the GrantFullControl field's value.
15273func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { 16777func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput {
15274 s.GrantFullControl = &v 16778 s.GrantFullControl = &v
@@ -15299,7 +16803,6 @@ func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput {
15299 return s 16803 return s
15300} 16804}
15301 16805
15302// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput
15303type PutBucketAclOutput struct { 16806type PutBucketAclOutput struct {
15304 _ struct{} `type:"structure"` 16807 _ struct{} `type:"structure"`
15305} 16808}
@@ -15314,14 +16817,13 @@ func (s PutBucketAclOutput) GoString() string {
15314 return s.String() 16817 return s.String()
15315} 16818}
15316 16819
15317// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest
15318type PutBucketAnalyticsConfigurationInput struct { 16820type PutBucketAnalyticsConfigurationInput struct {
15319 _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` 16821 _ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
15320 16822
15321 // The configuration and any analyses for the analytics filter. 16823 // The configuration and any analyses for the analytics filter.
15322 // 16824 //
15323 // AnalyticsConfiguration is a required field 16825 // AnalyticsConfiguration is a required field
15324 AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"` 16826 AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
15325 16827
15326 // The name of the bucket to which an analytics configuration is stored. 16828 // The name of the bucket to which an analytics configuration is stored.
15327 // 16829 //
@@ -15380,13 +16882,19 @@ func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAna
15380 return s 16882 return s
15381} 16883}
15382 16884
16885func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) {
16886 if s.Bucket == nil {
16887 return v
16888 }
16889 return *s.Bucket
16890}
16891
15383// SetId sets the Id field's value. 16892// SetId sets the Id field's value.
15384func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { 16893func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput {
15385 s.Id = &v 16894 s.Id = &v
15386 return s 16895 return s
15387} 16896}
15388 16897
15389// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput
15390type PutBucketAnalyticsConfigurationOutput struct { 16898type PutBucketAnalyticsConfigurationOutput struct {
15391 _ struct{} `type:"structure"` 16899 _ struct{} `type:"structure"`
15392} 16900}
@@ -15401,7 +16909,6 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string {
15401 return s.String() 16909 return s.String()
15402} 16910}
15403 16911
15404// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest
15405type PutBucketCorsInput struct { 16912type PutBucketCorsInput struct {
15406 _ struct{} `type:"structure" payload:"CORSConfiguration"` 16913 _ struct{} `type:"structure" payload:"CORSConfiguration"`
15407 16914
@@ -15409,7 +16916,7 @@ type PutBucketCorsInput struct {
15409 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 16916 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15410 16917
15411 // CORSConfiguration is a required field 16918 // CORSConfiguration is a required field
15412 CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` 16919 CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
15413} 16920}
15414 16921
15415// String returns the string representation 16922// String returns the string representation
@@ -15449,13 +16956,19 @@ func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput {
15449 return s 16956 return s
15450} 16957}
15451 16958
16959func (s *PutBucketCorsInput) getBucket() (v string) {
16960 if s.Bucket == nil {
16961 return v
16962 }
16963 return *s.Bucket
16964}
16965
15452// SetCORSConfiguration sets the CORSConfiguration field's value. 16966// SetCORSConfiguration sets the CORSConfiguration field's value.
15453func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { 16967func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput {
15454 s.CORSConfiguration = v 16968 s.CORSConfiguration = v
15455 return s 16969 return s
15456} 16970}
15457 16971
15458// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput
15459type PutBucketCorsOutput struct { 16972type PutBucketCorsOutput struct {
15460 _ struct{} `type:"structure"` 16973 _ struct{} `type:"structure"`
15461} 16974}
@@ -15470,7 +16983,86 @@ func (s PutBucketCorsOutput) GoString() string {
15470 return s.String() 16983 return s.String()
15471} 16984}
15472 16985
15473// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest 16986type PutBucketEncryptionInput struct {
16987 _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"`
16988
16989 // The name of the bucket for which the server-side encryption configuration
16990 // is set.
16991 //
16992 // Bucket is a required field
16993 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16994
16995 // Container for server-side encryption configuration rules. Currently S3 supports
16996 // one rule only.
16997 //
16998 // ServerSideEncryptionConfiguration is a required field
16999 ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
17000}
17001
17002// String returns the string representation
17003func (s PutBucketEncryptionInput) String() string {
17004 return awsutil.Prettify(s)
17005}
17006
17007// GoString returns the string representation
17008func (s PutBucketEncryptionInput) GoString() string {
17009 return s.String()
17010}
17011
17012// Validate inspects the fields of the type to determine if they are valid.
17013func (s *PutBucketEncryptionInput) Validate() error {
17014 invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"}
17015 if s.Bucket == nil {
17016 invalidParams.Add(request.NewErrParamRequired("Bucket"))
17017 }
17018 if s.ServerSideEncryptionConfiguration == nil {
17019 invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration"))
17020 }
17021 if s.ServerSideEncryptionConfiguration != nil {
17022 if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil {
17023 invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams))
17024 }
17025 }
17026
17027 if invalidParams.Len() > 0 {
17028 return invalidParams
17029 }
17030 return nil
17031}
17032
17033// SetBucket sets the Bucket field's value.
17034func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput {
17035 s.Bucket = &v
17036 return s
17037}
17038
17039func (s *PutBucketEncryptionInput) getBucket() (v string) {
17040 if s.Bucket == nil {
17041 return v
17042 }
17043 return *s.Bucket
17044}
17045
17046// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value.
17047func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput {
17048 s.ServerSideEncryptionConfiguration = v
17049 return s
17050}
17051
17052type PutBucketEncryptionOutput struct {
17053 _ struct{} `type:"structure"`
17054}
17055
17056// String returns the string representation
17057func (s PutBucketEncryptionOutput) String() string {
17058 return awsutil.Prettify(s)
17059}
17060
17061// GoString returns the string representation
17062func (s PutBucketEncryptionOutput) GoString() string {
17063 return s.String()
17064}
17065
15474type PutBucketInventoryConfigurationInput struct { 17066type PutBucketInventoryConfigurationInput struct {
15475 _ struct{} `type:"structure" payload:"InventoryConfiguration"` 17067 _ struct{} `type:"structure" payload:"InventoryConfiguration"`
15476 17068
@@ -15487,7 +17079,7 @@ type PutBucketInventoryConfigurationInput struct {
15487 // Specifies the inventory configuration. 17079 // Specifies the inventory configuration.
15488 // 17080 //
15489 // InventoryConfiguration is a required field 17081 // InventoryConfiguration is a required field
15490 InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"` 17082 InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
15491} 17083}
15492 17084
15493// String returns the string representation 17085// String returns the string representation
@@ -15530,6 +17122,13 @@ func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInv
15530 return s 17122 return s
15531} 17123}
15532 17124
17125func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) {
17126 if s.Bucket == nil {
17127 return v
17128 }
17129 return *s.Bucket
17130}
17131
15533// SetId sets the Id field's value. 17132// SetId sets the Id field's value.
15534func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { 17133func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput {
15535 s.Id = &v 17134 s.Id = &v
@@ -15542,7 +17141,6 @@ func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *Inve
15542 return s 17141 return s
15543} 17142}
15544 17143
15545// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput
15546type PutBucketInventoryConfigurationOutput struct { 17144type PutBucketInventoryConfigurationOutput struct {
15547 _ struct{} `type:"structure"` 17145 _ struct{} `type:"structure"`
15548} 17146}
@@ -15557,14 +17155,13 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string {
15557 return s.String() 17155 return s.String()
15558} 17156}
15559 17157
15560// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest
15561type PutBucketLifecycleConfigurationInput struct { 17158type PutBucketLifecycleConfigurationInput struct {
15562 _ struct{} `type:"structure" payload:"LifecycleConfiguration"` 17159 _ struct{} `type:"structure" payload:"LifecycleConfiguration"`
15563 17160
15564 // Bucket is a required field 17161 // Bucket is a required field
15565 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 17162 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15566 17163
15567 LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` 17164 LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
15568} 17165}
15569 17166
15570// String returns the string representation 17167// String returns the string representation
@@ -15601,13 +17198,19 @@ func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLif
15601 return s 17198 return s
15602} 17199}
15603 17200
17201func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) {
17202 if s.Bucket == nil {
17203 return v
17204 }
17205 return *s.Bucket
17206}
17207
15604// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. 17208// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
15605func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { 17209func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput {
15606 s.LifecycleConfiguration = v 17210 s.LifecycleConfiguration = v
15607 return s 17211 return s
15608} 17212}
15609 17213
15610// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput
15611type PutBucketLifecycleConfigurationOutput struct { 17214type PutBucketLifecycleConfigurationOutput struct {
15612 _ struct{} `type:"structure"` 17215 _ struct{} `type:"structure"`
15613} 17216}
@@ -15622,14 +17225,13 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string {
15622 return s.String() 17225 return s.String()
15623} 17226}
15624 17227
15625// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest
15626type PutBucketLifecycleInput struct { 17228type PutBucketLifecycleInput struct {
15627 _ struct{} `type:"structure" payload:"LifecycleConfiguration"` 17229 _ struct{} `type:"structure" payload:"LifecycleConfiguration"`
15628 17230
15629 // Bucket is a required field 17231 // Bucket is a required field
15630 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 17232 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15631 17233
15632 LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` 17234 LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
15633} 17235}
15634 17236
15635// String returns the string representation 17237// String returns the string representation
@@ -15666,13 +17268,19 @@ func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput {
15666 return s 17268 return s
15667} 17269}
15668 17270
17271func (s *PutBucketLifecycleInput) getBucket() (v string) {
17272 if s.Bucket == nil {
17273 return v
17274 }
17275 return *s.Bucket
17276}
17277
15669// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. 17278// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
15670func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { 17279func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput {
15671 s.LifecycleConfiguration = v 17280 s.LifecycleConfiguration = v
15672 return s 17281 return s
15673} 17282}
15674 17283
15675// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput
15676type PutBucketLifecycleOutput struct { 17284type PutBucketLifecycleOutput struct {
15677 _ struct{} `type:"structure"` 17285 _ struct{} `type:"structure"`
15678} 17286}
@@ -15687,7 +17295,6 @@ func (s PutBucketLifecycleOutput) GoString() string {
15687 return s.String() 17295 return s.String()
15688} 17296}
15689 17297
15690// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest
15691type PutBucketLoggingInput struct { 17298type PutBucketLoggingInput struct {
15692 _ struct{} `type:"structure" payload:"BucketLoggingStatus"` 17299 _ struct{} `type:"structure" payload:"BucketLoggingStatus"`
15693 17300
@@ -15695,7 +17302,7 @@ type PutBucketLoggingInput struct {
15695 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 17302 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15696 17303
15697 // BucketLoggingStatus is a required field 17304 // BucketLoggingStatus is a required field
15698 BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` 17305 BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
15699} 17306}
15700 17307
15701// String returns the string representation 17308// String returns the string representation
@@ -15735,13 +17342,19 @@ func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput {
15735 return s 17342 return s
15736} 17343}
15737 17344
17345func (s *PutBucketLoggingInput) getBucket() (v string) {
17346 if s.Bucket == nil {
17347 return v
17348 }
17349 return *s.Bucket
17350}
17351
15738// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. 17352// SetBucketLoggingStatus sets the BucketLoggingStatus field's value.
15739func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { 17353func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput {
15740 s.BucketLoggingStatus = v 17354 s.BucketLoggingStatus = v
15741 return s 17355 return s
15742} 17356}
15743 17357
15744// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput
15745type PutBucketLoggingOutput struct { 17358type PutBucketLoggingOutput struct {
15746 _ struct{} `type:"structure"` 17359 _ struct{} `type:"structure"`
15747} 17360}
@@ -15756,7 +17369,6 @@ func (s PutBucketLoggingOutput) GoString() string {
15756 return s.String() 17369 return s.String()
15757} 17370}
15758 17371
15759// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest
15760type PutBucketMetricsConfigurationInput struct { 17372type PutBucketMetricsConfigurationInput struct {
15761 _ struct{} `type:"structure" payload:"MetricsConfiguration"` 17373 _ struct{} `type:"structure" payload:"MetricsConfiguration"`
15762 17374
@@ -15773,7 +17385,7 @@ type PutBucketMetricsConfigurationInput struct {
15773 // Specifies the metrics configuration. 17385 // Specifies the metrics configuration.
15774 // 17386 //
15775 // MetricsConfiguration is a required field 17387 // MetricsConfiguration is a required field
15776 MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"` 17388 MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
15777} 17389}
15778 17390
15779// String returns the string representation 17391// String returns the string representation
@@ -15816,6 +17428,13 @@ func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetri
15816 return s 17428 return s
15817} 17429}
15818 17430
17431func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) {
17432 if s.Bucket == nil {
17433 return v
17434 }
17435 return *s.Bucket
17436}
17437
15819// SetId sets the Id field's value. 17438// SetId sets the Id field's value.
15820func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { 17439func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput {
15821 s.Id = &v 17440 s.Id = &v
@@ -15828,7 +17447,6 @@ func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsC
15828 return s 17447 return s
15829} 17448}
15830 17449
15831// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput
15832type PutBucketMetricsConfigurationOutput struct { 17450type PutBucketMetricsConfigurationOutput struct {
15833 _ struct{} `type:"structure"` 17451 _ struct{} `type:"structure"`
15834} 17452}
@@ -15843,7 +17461,6 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string {
15843 return s.String() 17461 return s.String()
15844} 17462}
15845 17463
15846// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest
15847type PutBucketNotificationConfigurationInput struct { 17464type PutBucketNotificationConfigurationInput struct {
15848 _ struct{} `type:"structure" payload:"NotificationConfiguration"` 17465 _ struct{} `type:"structure" payload:"NotificationConfiguration"`
15849 17466
@@ -15854,7 +17471,7 @@ type PutBucketNotificationConfigurationInput struct {
15854 // this element is empty, notifications are turned off on the bucket. 17471 // this element is empty, notifications are turned off on the bucket.
15855 // 17472 //
15856 // NotificationConfiguration is a required field 17473 // NotificationConfiguration is a required field
15857 NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` 17474 NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
15858} 17475}
15859 17476
15860// String returns the string representation 17477// String returns the string representation
@@ -15894,13 +17511,19 @@ func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucket
15894 return s 17511 return s
15895} 17512}
15896 17513
17514func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) {
17515 if s.Bucket == nil {
17516 return v
17517 }
17518 return *s.Bucket
17519}
17520
15897// SetNotificationConfiguration sets the NotificationConfiguration field's value. 17521// SetNotificationConfiguration sets the NotificationConfiguration field's value.
15898func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { 17522func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput {
15899 s.NotificationConfiguration = v 17523 s.NotificationConfiguration = v
15900 return s 17524 return s
15901} 17525}
15902 17526
15903// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput
15904type PutBucketNotificationConfigurationOutput struct { 17527type PutBucketNotificationConfigurationOutput struct {
15905 _ struct{} `type:"structure"` 17528 _ struct{} `type:"structure"`
15906} 17529}
@@ -15915,7 +17538,6 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string {
15915 return s.String() 17538 return s.String()
15916} 17539}
15917 17540
15918// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest
15919type PutBucketNotificationInput struct { 17541type PutBucketNotificationInput struct {
15920 _ struct{} `type:"structure" payload:"NotificationConfiguration"` 17542 _ struct{} `type:"structure" payload:"NotificationConfiguration"`
15921 17543
@@ -15923,7 +17545,7 @@ type PutBucketNotificationInput struct {
15923 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 17545 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15924 17546
15925 // NotificationConfiguration is a required field 17547 // NotificationConfiguration is a required field
15926 NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` 17548 NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
15927} 17549}
15928 17550
15929// String returns the string representation 17551// String returns the string representation
@@ -15958,13 +17580,19 @@ func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationI
15958 return s 17580 return s
15959} 17581}
15960 17582
17583func (s *PutBucketNotificationInput) getBucket() (v string) {
17584 if s.Bucket == nil {
17585 return v
17586 }
17587 return *s.Bucket
17588}
17589
15961// SetNotificationConfiguration sets the NotificationConfiguration field's value. 17590// SetNotificationConfiguration sets the NotificationConfiguration field's value.
15962func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { 17591func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput {
15963 s.NotificationConfiguration = v 17592 s.NotificationConfiguration = v
15964 return s 17593 return s
15965} 17594}
15966 17595
15967// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput
15968type PutBucketNotificationOutput struct { 17596type PutBucketNotificationOutput struct {
15969 _ struct{} `type:"structure"` 17597 _ struct{} `type:"structure"`
15970} 17598}
@@ -15979,13 +17607,16 @@ func (s PutBucketNotificationOutput) GoString() string {
15979 return s.String() 17607 return s.String()
15980} 17608}
15981 17609
15982// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest
15983type PutBucketPolicyInput struct { 17610type PutBucketPolicyInput struct {
15984 _ struct{} `type:"structure" payload:"Policy"` 17611 _ struct{} `type:"structure" payload:"Policy"`
15985 17612
15986 // Bucket is a required field 17613 // Bucket is a required field
15987 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 17614 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15988 17615
17616 // Set this parameter to true to confirm that you want to remove your permissions
17617 // to change this bucket policy in the future.
17618 ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"`
17619
15989 // The bucket policy as a JSON document. 17620 // The bucket policy as a JSON document.
15990 // 17621 //
15991 // Policy is a required field 17622 // Policy is a required field
@@ -16024,13 +17655,25 @@ func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput {
16024 return s 17655 return s
16025} 17656}
16026 17657
17658func (s *PutBucketPolicyInput) getBucket() (v string) {
17659 if s.Bucket == nil {
17660 return v
17661 }
17662 return *s.Bucket
17663}
17664
17665// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value.
17666func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput {
17667 s.ConfirmRemoveSelfBucketAccess = &v
17668 return s
17669}
17670
16027// SetPolicy sets the Policy field's value. 17671// SetPolicy sets the Policy field's value.
16028func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { 17672func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput {
16029 s.Policy = &v 17673 s.Policy = &v
16030 return s 17674 return s
16031} 17675}
16032 17676
16033// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput
16034type PutBucketPolicyOutput struct { 17677type PutBucketPolicyOutput struct {
16035 _ struct{} `type:"structure"` 17678 _ struct{} `type:"structure"`
16036} 17679}
@@ -16045,7 +17688,6 @@ func (s PutBucketPolicyOutput) GoString() string {
16045 return s.String() 17688 return s.String()
16046} 17689}
16047 17690
16048// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest
16049type PutBucketReplicationInput struct { 17691type PutBucketReplicationInput struct {
16050 _ struct{} `type:"structure" payload:"ReplicationConfiguration"` 17692 _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
16051 17693
@@ -16056,7 +17698,7 @@ type PutBucketReplicationInput struct {
16056 // replication configuration size can be up to 2 MB. 17698 // replication configuration size can be up to 2 MB.
16057 // 17699 //
16058 // ReplicationConfiguration is a required field 17700 // ReplicationConfiguration is a required field
16059 ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` 17701 ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
16060} 17702}
16061 17703
16062// String returns the string representation 17704// String returns the string representation
@@ -16096,13 +17738,19 @@ func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInp
16096 return s 17738 return s
16097} 17739}
16098 17740
17741func (s *PutBucketReplicationInput) getBucket() (v string) {
17742 if s.Bucket == nil {
17743 return v
17744 }
17745 return *s.Bucket
17746}
17747
16099// SetReplicationConfiguration sets the ReplicationConfiguration field's value. 17748// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
16100func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { 17749func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput {
16101 s.ReplicationConfiguration = v 17750 s.ReplicationConfiguration = v
16102 return s 17751 return s
16103} 17752}
16104 17753
16105// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput
16106type PutBucketReplicationOutput struct { 17754type PutBucketReplicationOutput struct {
16107 _ struct{} `type:"structure"` 17755 _ struct{} `type:"structure"`
16108} 17756}
@@ -16117,7 +17765,6 @@ func (s PutBucketReplicationOutput) GoString() string {
16117 return s.String() 17765 return s.String()
16118} 17766}
16119 17767
16120// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest
16121type PutBucketRequestPaymentInput struct { 17768type PutBucketRequestPaymentInput struct {
16122 _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` 17769 _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"`
16123 17770
@@ -16125,7 +17772,7 @@ type PutBucketRequestPaymentInput struct {
16125 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 17772 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16126 17773
16127 // RequestPaymentConfiguration is a required field 17774 // RequestPaymentConfiguration is a required field
16128 RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` 17775 RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
16129} 17776}
16130 17777
16131// String returns the string representation 17778// String returns the string representation
@@ -16165,13 +17812,19 @@ func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaym
16165 return s 17812 return s
16166} 17813}
16167 17814
17815func (s *PutBucketRequestPaymentInput) getBucket() (v string) {
17816 if s.Bucket == nil {
17817 return v
17818 }
17819 return *s.Bucket
17820}
17821
16168// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. 17822// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value.
16169func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { 17823func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput {
16170 s.RequestPaymentConfiguration = v 17824 s.RequestPaymentConfiguration = v
16171 return s 17825 return s
16172} 17826}
16173 17827
16174// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput
16175type PutBucketRequestPaymentOutput struct { 17828type PutBucketRequestPaymentOutput struct {
16176 _ struct{} `type:"structure"` 17829 _ struct{} `type:"structure"`
16177} 17830}
@@ -16186,7 +17839,6 @@ func (s PutBucketRequestPaymentOutput) GoString() string {
16186 return s.String() 17839 return s.String()
16187} 17840}
16188 17841
16189// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest
16190type PutBucketTaggingInput struct { 17842type PutBucketTaggingInput struct {
16191 _ struct{} `type:"structure" payload:"Tagging"` 17843 _ struct{} `type:"structure" payload:"Tagging"`
16192 17844
@@ -16194,7 +17846,7 @@ type PutBucketTaggingInput struct {
16194 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 17846 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16195 17847
16196 // Tagging is a required field 17848 // Tagging is a required field
16197 Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` 17849 Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
16198} 17850}
16199 17851
16200// String returns the string representation 17852// String returns the string representation
@@ -16234,13 +17886,19 @@ func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput {
16234 return s 17886 return s
16235} 17887}
16236 17888
17889func (s *PutBucketTaggingInput) getBucket() (v string) {
17890 if s.Bucket == nil {
17891 return v
17892 }
17893 return *s.Bucket
17894}
17895
16237// SetTagging sets the Tagging field's value. 17896// SetTagging sets the Tagging field's value.
16238func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { 17897func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput {
16239 s.Tagging = v 17898 s.Tagging = v
16240 return s 17899 return s
16241} 17900}
16242 17901
16243// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput
16244type PutBucketTaggingOutput struct { 17902type PutBucketTaggingOutput struct {
16245 _ struct{} `type:"structure"` 17903 _ struct{} `type:"structure"`
16246} 17904}
@@ -16255,7 +17913,6 @@ func (s PutBucketTaggingOutput) GoString() string {
16255 return s.String() 17913 return s.String()
16256} 17914}
16257 17915
16258// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest
16259type PutBucketVersioningInput struct { 17916type PutBucketVersioningInput struct {
16260 _ struct{} `type:"structure" payload:"VersioningConfiguration"` 17917 _ struct{} `type:"structure" payload:"VersioningConfiguration"`
16261 17918
@@ -16267,7 +17924,7 @@ type PutBucketVersioningInput struct {
16267 MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` 17924 MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
16268 17925
16269 // VersioningConfiguration is a required field 17926 // VersioningConfiguration is a required field
16270 VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` 17927 VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
16271} 17928}
16272 17929
16273// String returns the string representation 17930// String returns the string representation
@@ -16302,6 +17959,13 @@ func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput
16302 return s 17959 return s
16303} 17960}
16304 17961
17962func (s *PutBucketVersioningInput) getBucket() (v string) {
17963 if s.Bucket == nil {
17964 return v
17965 }
17966 return *s.Bucket
17967}
17968
16305// SetMFA sets the MFA field's value. 17969// SetMFA sets the MFA field's value.
16306func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { 17970func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput {
16307 s.MFA = &v 17971 s.MFA = &v
@@ -16314,7 +17978,6 @@ func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfi
16314 return s 17978 return s
16315} 17979}
16316 17980
16317// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput
16318type PutBucketVersioningOutput struct { 17981type PutBucketVersioningOutput struct {
16319 _ struct{} `type:"structure"` 17982 _ struct{} `type:"structure"`
16320} 17983}
@@ -16329,7 +17992,6 @@ func (s PutBucketVersioningOutput) GoString() string {
16329 return s.String() 17992 return s.String()
16330} 17993}
16331 17994
16332// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest
16333type PutBucketWebsiteInput struct { 17995type PutBucketWebsiteInput struct {
16334 _ struct{} `type:"structure" payload:"WebsiteConfiguration"` 17996 _ struct{} `type:"structure" payload:"WebsiteConfiguration"`
16335 17997
@@ -16337,7 +17999,7 @@ type PutBucketWebsiteInput struct {
16337 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 17999 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16338 18000
16339 // WebsiteConfiguration is a required field 18001 // WebsiteConfiguration is a required field
16340 WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` 18002 WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
16341} 18003}
16342 18004
16343// String returns the string representation 18005// String returns the string representation
@@ -16377,13 +18039,19 @@ func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput {
16377 return s 18039 return s
16378} 18040}
16379 18041
18042func (s *PutBucketWebsiteInput) getBucket() (v string) {
18043 if s.Bucket == nil {
18044 return v
18045 }
18046 return *s.Bucket
18047}
18048
16380// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. 18049// SetWebsiteConfiguration sets the WebsiteConfiguration field's value.
16381func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { 18050func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput {
16382 s.WebsiteConfiguration = v 18051 s.WebsiteConfiguration = v
16383 return s 18052 return s
16384} 18053}
16385 18054
16386// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput
16387type PutBucketWebsiteOutput struct { 18055type PutBucketWebsiteOutput struct {
16388 _ struct{} `type:"structure"` 18056 _ struct{} `type:"structure"`
16389} 18057}
@@ -16398,14 +18066,13 @@ func (s PutBucketWebsiteOutput) GoString() string {
16398 return s.String() 18066 return s.String()
16399} 18067}
16400 18068
16401// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest
16402type PutObjectAclInput struct { 18069type PutObjectAclInput struct {
16403 _ struct{} `type:"structure" payload:"AccessControlPolicy"` 18070 _ struct{} `type:"structure" payload:"AccessControlPolicy"`
16404 18071
16405 // The canned ACL to apply to the object. 18072 // The canned ACL to apply to the object.
16406 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` 18073 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
16407 18074
16408 AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` 18075 AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
16409 18076
16410 // Bucket is a required field 18077 // Bucket is a required field
16411 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` 18078 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -16491,6 +18158,13 @@ func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput {
16491 return s 18158 return s
16492} 18159}
16493 18160
18161func (s *PutObjectAclInput) getBucket() (v string) {
18162 if s.Bucket == nil {
18163 return v
18164 }
18165 return *s.Bucket
18166}
18167
16494// SetGrantFullControl sets the GrantFullControl field's value. 18168// SetGrantFullControl sets the GrantFullControl field's value.
16495func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { 18169func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput {
16496 s.GrantFullControl = &v 18170 s.GrantFullControl = &v
@@ -16539,7 +18213,6 @@ func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput {
16539 return s 18213 return s
16540} 18214}
16541 18215
16542// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput
16543type PutObjectAclOutput struct { 18216type PutObjectAclOutput struct {
16544 _ struct{} `type:"structure"` 18217 _ struct{} `type:"structure"`
16545 18218
@@ -16564,7 +18237,6 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput {
16564 return s 18237 return s
16565} 18238}
16566 18239
16567// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest
16568type PutObjectInput struct { 18240type PutObjectInput struct {
16569 _ struct{} `type:"structure" payload:"Body"` 18241 _ struct{} `type:"structure" payload:"Body"`
16570 18242
@@ -16597,11 +18269,14 @@ type PutObjectInput struct {
16597 // body cannot be determined automatically. 18269 // body cannot be determined automatically.
16598 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` 18270 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
16599 18271
18272 // The base64-encoded 128-bit MD5 digest of the part data.
18273 ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
18274
16600 // A standard MIME type describing the format of the object data. 18275 // A standard MIME type describing the format of the object data.
16601 ContentType *string `location:"header" locationName:"Content-Type" type:"string"` 18276 ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
16602 18277
16603 // The date and time at which the object is no longer cacheable. 18278 // The date and time at which the object is no longer cacheable.
16604 Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` 18279 Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
16605 18280
16606 // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. 18281 // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
16607 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` 18282 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
@@ -16713,6 +18388,13 @@ func (s *PutObjectInput) SetBucket(v string) *PutObjectInput {
16713 return s 18388 return s
16714} 18389}
16715 18390
18391func (s *PutObjectInput) getBucket() (v string) {
18392 if s.Bucket == nil {
18393 return v
18394 }
18395 return *s.Bucket
18396}
18397
16716// SetCacheControl sets the CacheControl field's value. 18398// SetCacheControl sets the CacheControl field's value.
16717func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { 18399func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput {
16718 s.CacheControl = &v 18400 s.CacheControl = &v
@@ -16743,6 +18425,12 @@ func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput {
16743 return s 18425 return s
16744} 18426}
16745 18427
18428// SetContentMD5 sets the ContentMD5 field's value.
18429func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput {
18430 s.ContentMD5 = &v
18431 return s
18432}
18433
16746// SetContentType sets the ContentType field's value. 18434// SetContentType sets the ContentType field's value.
16747func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { 18435func (s *PutObjectInput) SetContentType(v string) *PutObjectInput {
16748 s.ContentType = &v 18436 s.ContentType = &v
@@ -16809,6 +18497,13 @@ func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput {
16809 return s 18497 return s
16810} 18498}
16811 18499
18500func (s *PutObjectInput) getSSECustomerKey() (v string) {
18501 if s.SSECustomerKey == nil {
18502 return v
18503 }
18504 return *s.SSECustomerKey
18505}
18506
16812// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. 18507// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
16813func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { 18508func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput {
16814 s.SSECustomerKeyMD5 = &v 18509 s.SSECustomerKeyMD5 = &v
@@ -16845,7 +18540,6 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput {
16845 return s 18540 return s
16846} 18541}
16847 18542
16848// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput
16849type PutObjectOutput struct { 18543type PutObjectOutput struct {
16850 _ struct{} `type:"structure"` 18544 _ struct{} `type:"structure"`
16851 18545
@@ -16940,7 +18634,6 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput {
16940 return s 18634 return s
16941} 18635}
16942 18636
16943// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest
16944type PutObjectTaggingInput struct { 18637type PutObjectTaggingInput struct {
16945 _ struct{} `type:"structure" payload:"Tagging"` 18638 _ struct{} `type:"structure" payload:"Tagging"`
16946 18639
@@ -16951,7 +18644,7 @@ type PutObjectTaggingInput struct {
16951 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` 18644 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
16952 18645
16953 // Tagging is a required field 18646 // Tagging is a required field
16954 Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` 18647 Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
16955 18648
16956 VersionId *string `location:"querystring" locationName:"versionId" type:"string"` 18649 VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
16957} 18650}
@@ -16999,6 +18692,13 @@ func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput {
16999 return s 18692 return s
17000} 18693}
17001 18694
18695func (s *PutObjectTaggingInput) getBucket() (v string) {
18696 if s.Bucket == nil {
18697 return v
18698 }
18699 return *s.Bucket
18700}
18701
17002// SetKey sets the Key field's value. 18702// SetKey sets the Key field's value.
17003func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { 18703func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput {
17004 s.Key = &v 18704 s.Key = &v
@@ -17017,7 +18717,6 @@ func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput {
17017 return s 18717 return s
17018} 18718}
17019 18719
17020// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput
17021type PutObjectTaggingOutput struct { 18720type PutObjectTaggingOutput struct {
17022 _ struct{} `type:"structure"` 18721 _ struct{} `type:"structure"`
17023 18722
@@ -17042,7 +18741,6 @@ func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput
17042 18741
17043// Container for specifying an configuration when you want Amazon S3 to publish 18742// Container for specifying an configuration when you want Amazon S3 to publish
17044// events to an Amazon Simple Queue Service (Amazon SQS) queue. 18743// events to an Amazon Simple Queue Service (Amazon SQS) queue.
17045// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration
17046type QueueConfiguration struct { 18744type QueueConfiguration struct {
17047 _ struct{} `type:"structure"` 18745 _ struct{} `type:"structure"`
17048 18746
@@ -17051,6 +18749,7 @@ type QueueConfiguration struct {
17051 18749
17052 // Container for object key name filtering rules. For information about key 18750 // Container for object key name filtering rules. For information about key
17053 // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) 18751 // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
18752 // in the Amazon Simple Storage Service Developer Guide.
17054 Filter *NotificationConfigurationFilter `type:"structure"` 18753 Filter *NotificationConfigurationFilter `type:"structure"`
17055 18754
17056 // Optional unique identifier for configurations in a notification configuration. 18755 // Optional unique identifier for configurations in a notification configuration.
@@ -17114,7 +18813,6 @@ func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration {
17114 return s 18813 return s
17115} 18814}
17116 18815
17117// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated
17118type QueueConfigurationDeprecated struct { 18816type QueueConfigurationDeprecated struct {
17119 _ struct{} `type:"structure"` 18817 _ struct{} `type:"structure"`
17120 18818
@@ -17164,7 +18862,45 @@ func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDep
17164 return s 18862 return s
17165} 18863}
17166 18864
17167// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect 18865type RecordsEvent struct {
18866 _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"`
18867
18868 // The byte array of partial, one or more result records.
18869 //
18870 // Payload is automatically base64 encoded/decoded by the SDK.
18871 Payload []byte `type:"blob"`
18872}
18873
18874// String returns the string representation
18875func (s RecordsEvent) String() string {
18876 return awsutil.Prettify(s)
18877}
18878
18879// GoString returns the string representation
18880func (s RecordsEvent) GoString() string {
18881 return s.String()
18882}
18883
18884// SetPayload sets the Payload field's value.
18885func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent {
18886 s.Payload = v
18887 return s
18888}
18889
18890// The RecordsEvent is and event in the SelectObjectContentEventStream group of events.
18891func (s *RecordsEvent) eventSelectObjectContentEventStream() {}
18892
18893// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value.
18894// This method is only used internally within the SDK's EventStream handling.
18895func (s *RecordsEvent) UnmarshalEvent(
18896 payloadUnmarshaler protocol.PayloadUnmarshaler,
18897 msg eventstream.Message,
18898) error {
18899 s.Payload = make([]byte, len(msg.Payload))
18900 copy(s.Payload, msg.Payload)
18901 return nil
18902}
18903
17168type Redirect struct { 18904type Redirect struct {
17169 _ struct{} `type:"structure"` 18905 _ struct{} `type:"structure"`
17170 18906
@@ -17233,7 +18969,6 @@ func (s *Redirect) SetReplaceKeyWith(v string) *Redirect {
17233 return s 18969 return s
17234} 18970}
17235 18971
17236// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo
17237type RedirectAllRequestsTo struct { 18972type RedirectAllRequestsTo struct {
17238 _ struct{} `type:"structure"` 18973 _ struct{} `type:"structure"`
17239 18974
@@ -17284,7 +19019,6 @@ func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo {
17284 19019
17285// Container for replication rules. You can add as many as 1,000 rules. Total 19020// Container for replication rules. You can add as many as 1,000 rules. Total
17286// replication configuration size can be up to 2 MB. 19021// replication configuration size can be up to 2 MB.
17287// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration
17288type ReplicationConfiguration struct { 19022type ReplicationConfiguration struct {
17289 _ struct{} `type:"structure"` 19023 _ struct{} `type:"structure"`
17290 19024
@@ -17349,10 +19083,12 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo
17349 return s 19083 return s
17350} 19084}
17351 19085
17352// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule 19086// Container for information about a particular replication rule.
17353type ReplicationRule struct { 19087type ReplicationRule struct {
17354 _ struct{} `type:"structure"` 19088 _ struct{} `type:"structure"`
17355 19089
19090 // Container for replication destination information.
19091 //
17356 // Destination is a required field 19092 // Destination is a required field
17357 Destination *Destination `type:"structure" required:"true"` 19093 Destination *Destination `type:"structure" required:"true"`
17358 19094
@@ -17366,6 +19102,9 @@ type ReplicationRule struct {
17366 // Prefix is a required field 19102 // Prefix is a required field
17367 Prefix *string `type:"string" required:"true"` 19103 Prefix *string `type:"string" required:"true"`
17368 19104
19105 // Container for filters that define which source objects should be replicated.
19106 SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"`
19107
17369 // The rule is ignored if status is not Enabled. 19108 // The rule is ignored if status is not Enabled.
17370 // 19109 //
17371 // Status is a required field 19110 // Status is a required field
@@ -17399,6 +19138,11 @@ func (s *ReplicationRule) Validate() error {
17399 invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) 19138 invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
17400 } 19139 }
17401 } 19140 }
19141 if s.SourceSelectionCriteria != nil {
19142 if err := s.SourceSelectionCriteria.Validate(); err != nil {
19143 invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams))
19144 }
19145 }
17402 19146
17403 if invalidParams.Len() > 0 { 19147 if invalidParams.Len() > 0 {
17404 return invalidParams 19148 return invalidParams
@@ -17424,13 +19168,18 @@ func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule {
17424 return s 19168 return s
17425} 19169}
17426 19170
19171// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value.
19172func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule {
19173 s.SourceSelectionCriteria = v
19174 return s
19175}
19176
17427// SetStatus sets the Status field's value. 19177// SetStatus sets the Status field's value.
17428func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { 19178func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
17429 s.Status = &v 19179 s.Status = &v
17430 return s 19180 return s
17431} 19181}
17432 19182
17433// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration
17434type RequestPaymentConfiguration struct { 19183type RequestPaymentConfiguration struct {
17435 _ struct{} `type:"structure"` 19184 _ struct{} `type:"structure"`
17436 19185
@@ -17469,7 +19218,30 @@ func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfigur
17469 return s 19218 return s
17470} 19219}
17471 19220
17472// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest 19221type RequestProgress struct {
19222 _ struct{} `type:"structure"`
19223
19224 // Specifies whether periodic QueryProgress frames should be sent. Valid values:
19225 // TRUE, FALSE. Default value: FALSE.
19226 Enabled *bool `type:"boolean"`
19227}
19228
19229// String returns the string representation
19230func (s RequestProgress) String() string {
19231 return awsutil.Prettify(s)
19232}
19233
19234// GoString returns the string representation
19235func (s RequestProgress) GoString() string {
19236 return s.String()
19237}
19238
19239// SetEnabled sets the Enabled field's value.
19240func (s *RequestProgress) SetEnabled(v bool) *RequestProgress {
19241 s.Enabled = &v
19242 return s
19243}
19244
17473type RestoreObjectInput struct { 19245type RestoreObjectInput struct {
17474 _ struct{} `type:"structure" payload:"RestoreRequest"` 19246 _ struct{} `type:"structure" payload:"RestoreRequest"`
17475 19247
@@ -17485,7 +19257,8 @@ type RestoreObjectInput struct {
17485 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html 19257 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
17486 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` 19258 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
17487 19259
17488 RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` 19260 // Container for restore job parameters.
19261 RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
17489 19262
17490 VersionId *string `location:"querystring" locationName:"versionId" type:"string"` 19263 VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
17491} 19264}
@@ -17530,6 +19303,13 @@ func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput {
17530 return s 19303 return s
17531} 19304}
17532 19305
19306func (s *RestoreObjectInput) getBucket() (v string) {
19307 if s.Bucket == nil {
19308 return v
19309 }
19310 return *s.Bucket
19311}
19312
17533// SetKey sets the Key field's value. 19313// SetKey sets the Key field's value.
17534func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { 19314func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput {
17535 s.Key = &v 19315 s.Key = &v
@@ -17554,13 +19334,16 @@ func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput {
17554 return s 19334 return s
17555} 19335}
17556 19336
17557// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput
17558type RestoreObjectOutput struct { 19337type RestoreObjectOutput struct {
17559 _ struct{} `type:"structure"` 19338 _ struct{} `type:"structure"`
17560 19339
17561 // If present, indicates that the requester was successfully charged for the 19340 // If present, indicates that the requester was successfully charged for the
17562 // request. 19341 // request.
17563 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` 19342 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
19343
19344 // Indicates the path in the provided S3 output location where Select results
19345 // will be restored to.
19346 RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"`
17564} 19347}
17565 19348
17566// String returns the string representation 19349// String returns the string representation
@@ -17579,17 +19362,38 @@ func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput {
17579 return s 19362 return s
17580} 19363}
17581 19364
17582// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest 19365// SetRestoreOutputPath sets the RestoreOutputPath field's value.
19366func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput {
19367 s.RestoreOutputPath = &v
19368 return s
19369}
19370
19371// Container for restore job parameters.
17583type RestoreRequest struct { 19372type RestoreRequest struct {
17584 _ struct{} `type:"structure"` 19373 _ struct{} `type:"structure"`
17585 19374
17586 // Lifetime of the active copy in days 19375 // Lifetime of the active copy in days. Do not use with restores that specify
17587 // 19376 // OutputLocation.
17588 // Days is a required field 19377 Days *int64 `type:"integer"`
17589 Days *int64 `type:"integer" required:"true"` 19378
19379 // The optional description for the job.
19380 Description *string `type:"string"`
17590 19381
17591 // Glacier related prameters pertaining to this job. 19382 // Glacier related parameters pertaining to this job. Do not use with restores
19383 // that specify OutputLocation.
17592 GlacierJobParameters *GlacierJobParameters `type:"structure"` 19384 GlacierJobParameters *GlacierJobParameters `type:"structure"`
19385
19386 // Describes the location where the restore job's output is stored.
19387 OutputLocation *OutputLocation `type:"structure"`
19388
19389 // Describes the parameters for Select job types.
19390 SelectParameters *SelectParameters `type:"structure"`
19391
19392 // Glacier retrieval tier at which the restore will be processed.
19393 Tier *string `type:"string" enum:"Tier"`
19394
19395 // Type of restore request.
19396 Type *string `type:"string" enum:"RestoreRequestType"`
17593} 19397}
17594 19398
17595// String returns the string representation 19399// String returns the string representation
@@ -17605,14 +19409,21 @@ func (s RestoreRequest) GoString() string {
17605// Validate inspects the fields of the type to determine if they are valid. 19409// Validate inspects the fields of the type to determine if they are valid.
17606func (s *RestoreRequest) Validate() error { 19410func (s *RestoreRequest) Validate() error {
17607 invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} 19411 invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"}
17608 if s.Days == nil {
17609 invalidParams.Add(request.NewErrParamRequired("Days"))
17610 }
17611 if s.GlacierJobParameters != nil { 19412 if s.GlacierJobParameters != nil {
17612 if err := s.GlacierJobParameters.Validate(); err != nil { 19413 if err := s.GlacierJobParameters.Validate(); err != nil {
17613 invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) 19414 invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams))
17614 } 19415 }
17615 } 19416 }
19417 if s.OutputLocation != nil {
19418 if err := s.OutputLocation.Validate(); err != nil {
19419 invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams))
19420 }
19421 }
19422 if s.SelectParameters != nil {
19423 if err := s.SelectParameters.Validate(); err != nil {
19424 invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams))
19425 }
19426 }
17616 19427
17617 if invalidParams.Len() > 0 { 19428 if invalidParams.Len() > 0 {
17618 return invalidParams 19429 return invalidParams
@@ -17626,13 +19437,42 @@ func (s *RestoreRequest) SetDays(v int64) *RestoreRequest {
17626 return s 19437 return s
17627} 19438}
17628 19439
19440// SetDescription sets the Description field's value.
19441func (s *RestoreRequest) SetDescription(v string) *RestoreRequest {
19442 s.Description = &v
19443 return s
19444}
19445
17629// SetGlacierJobParameters sets the GlacierJobParameters field's value. 19446// SetGlacierJobParameters sets the GlacierJobParameters field's value.
17630func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { 19447func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest {
17631 s.GlacierJobParameters = v 19448 s.GlacierJobParameters = v
17632 return s 19449 return s
17633} 19450}
17634 19451
17635// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule 19452// SetOutputLocation sets the OutputLocation field's value.
19453func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest {
19454 s.OutputLocation = v
19455 return s
19456}
19457
19458// SetSelectParameters sets the SelectParameters field's value.
19459func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest {
19460 s.SelectParameters = v
19461 return s
19462}
19463
19464// SetTier sets the Tier field's value.
19465func (s *RestoreRequest) SetTier(v string) *RestoreRequest {
19466 s.Tier = &v
19467 return s
19468}
19469
19470// SetType sets the Type field's value.
19471func (s *RestoreRequest) SetType(v string) *RestoreRequest {
19472 s.Type = &v
19473 return s
19474}
19475
17636type RoutingRule struct { 19476type RoutingRule struct {
17637 _ struct{} `type:"structure"` 19477 _ struct{} `type:"structure"`
17638 19478
@@ -17685,7 +19525,6 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule {
17685 return s 19525 return s
17686} 19526}
17687 19527
17688// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule
17689type Rule struct { 19528type Rule struct {
17690 _ struct{} `type:"structure"` 19529 _ struct{} `type:"structure"`
17691 19530
@@ -17706,10 +19545,11 @@ type Rule struct {
17706 NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` 19545 NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
17707 19546
17708 // Container for the transition rule that describes when noncurrent objects 19547 // Container for the transition rule that describes when noncurrent objects
17709 // transition to the STANDARD_IA or GLACIER storage class. If your bucket is 19548 // transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your
17710 // versioning-enabled (or versioning is suspended), you can set this action 19549 // bucket is versioning-enabled (or versioning is suspended), you can set this
17711 // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA 19550 // action to request that Amazon S3 transition noncurrent object versions to
17712 // or GLACIER storage class at a specific period in the object's lifetime. 19551 // the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period
19552 // in the object's lifetime.
17713 NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` 19553 NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"`
17714 19554
17715 // Prefix identifying one or more objects to which the rule applies. 19555 // Prefix identifying one or more objects to which the rule applies.
@@ -17800,7 +19640,879 @@ func (s *Rule) SetTransition(v *Transition) *Rule {
17800 return s 19640 return s
17801} 19641}
17802 19642
17803// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis 19643// Specifies the use of SSE-KMS to encrypt delievered Inventory reports.
19644type SSEKMS struct {
19645 _ struct{} `locationName:"SSE-KMS" type:"structure"`
19646
19647 // Specifies the ID of the AWS Key Management Service (KMS) master encryption
19648 // key to use for encrypting Inventory reports.
19649 //
19650 // KeyId is a required field
19651 KeyId *string `type:"string" required:"true"`
19652}
19653
19654// String returns the string representation
19655func (s SSEKMS) String() string {
19656 return awsutil.Prettify(s)
19657}
19658
19659// GoString returns the string representation
19660func (s SSEKMS) GoString() string {
19661 return s.String()
19662}
19663
19664// Validate inspects the fields of the type to determine if they are valid.
19665func (s *SSEKMS) Validate() error {
19666 invalidParams := request.ErrInvalidParams{Context: "SSEKMS"}
19667 if s.KeyId == nil {
19668 invalidParams.Add(request.NewErrParamRequired("KeyId"))
19669 }
19670
19671 if invalidParams.Len() > 0 {
19672 return invalidParams
19673 }
19674 return nil
19675}
19676
19677// SetKeyId sets the KeyId field's value.
19678func (s *SSEKMS) SetKeyId(v string) *SSEKMS {
19679 s.KeyId = &v
19680 return s
19681}
19682
19683// Specifies the use of SSE-S3 to encrypt delievered Inventory reports.
19684type SSES3 struct {
19685 _ struct{} `locationName:"SSE-S3" type:"structure"`
19686}
19687
19688// String returns the string representation
19689func (s SSES3) String() string {
19690 return awsutil.Prettify(s)
19691}
19692
19693// GoString returns the string representation
19694func (s SSES3) GoString() string {
19695 return s.String()
19696}
19697
19698// SelectObjectContentEventStream provides handling of EventStreams for
19699// the SelectObjectContent API.
19700//
19701// Use this type to receive SelectObjectContentEventStream events. The events
19702// can be read from the Events channel member.
19703//
19704// The events that can be received are:
19705//
19706// * ContinuationEvent
19707// * EndEvent
19708// * ProgressEvent
19709// * RecordsEvent
19710// * StatsEvent
19711type SelectObjectContentEventStream struct {
19712 // Reader is the EventStream reader for the SelectObjectContentEventStream
19713 // events. This value is automatically set by the SDK when the API call is made
19714 // Use this member when unit testing your code with the SDK to mock out the
19715 // EventStream Reader.
19716 //
19717 // Must not be nil.
19718 Reader SelectObjectContentEventStreamReader
19719
19720 // StreamCloser is the io.Closer for the EventStream connection. For HTTP
19721 // EventStream this is the response Body. The stream will be closed when
19722 // the Close method of the EventStream is called.
19723 StreamCloser io.Closer
19724}
19725
19726// Close closes the EventStream. This will also cause the Events channel to be
19727// closed. You can use the closing of the Events channel to terminate your
19728// application's read from the API's EventStream.
19729//
19730// Will close the underlying EventStream reader. For EventStream over HTTP
19731// connection this will also close the HTTP connection.
19732//
19733// Close must be called when done using the EventStream API. Not calling Close
19734// may result in resource leaks.
19735func (es *SelectObjectContentEventStream) Close() (err error) {
19736 es.Reader.Close()
19737 return es.Err()
19738}
19739
19740// Err returns any error that occurred while reading EventStream Events from
19741// the service API's response. Returns nil if there were no errors.
19742func (es *SelectObjectContentEventStream) Err() error {
19743 if err := es.Reader.Err(); err != nil {
19744 return err
19745 }
19746 es.StreamCloser.Close()
19747
19748 return nil
19749}
19750
19751// Events returns a channel to read EventStream Events from the
19752// SelectObjectContent API.
19753//
19754// These events are:
19755//
19756// * ContinuationEvent
19757// * EndEvent
19758// * ProgressEvent
19759// * RecordsEvent
19760// * StatsEvent
19761func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent {
19762 return es.Reader.Events()
19763}
19764
19765// SelectObjectContentEventStreamEvent groups together all EventStream
19766// events read from the SelectObjectContent API.
19767//
19768// These events are:
19769//
19770// * ContinuationEvent
19771// * EndEvent
19772// * ProgressEvent
19773// * RecordsEvent
19774// * StatsEvent
19775type SelectObjectContentEventStreamEvent interface {
19776 eventSelectObjectContentEventStream()
19777}
19778
19779// SelectObjectContentEventStreamReader provides the interface for reading EventStream
19780// Events from the SelectObjectContent API. The
19781// default implementation for this interface will be SelectObjectContentEventStream.
19782//
19783// The reader's Close method must allow multiple concurrent calls.
19784//
19785// These events are:
19786//
19787// * ContinuationEvent
19788// * EndEvent
19789// * ProgressEvent
19790// * RecordsEvent
19791// * StatsEvent
19792type SelectObjectContentEventStreamReader interface {
19793 // Returns a channel of events as they are read from the event stream.
19794 Events() <-chan SelectObjectContentEventStreamEvent
19795
19796 // Close will close the underlying event stream reader. For event stream over
19797 // HTTP this will also close the HTTP connection.
19798 Close() error
19799
19800 // Returns any error that has occured while reading from the event stream.
19801 Err() error
19802}
19803
19804type readSelectObjectContentEventStream struct {
19805 eventReader *eventstreamapi.EventReader
19806 stream chan SelectObjectContentEventStreamEvent
19807 errVal atomic.Value
19808
19809 done chan struct{}
19810 closeOnce sync.Once
19811}
19812
19813func newReadSelectObjectContentEventStream(
19814 reader io.ReadCloser,
19815 unmarshalers request.HandlerList,
19816 logger aws.Logger,
19817 logLevel aws.LogLevelType,
19818) *readSelectObjectContentEventStream {
19819 r := &readSelectObjectContentEventStream{
19820 stream: make(chan SelectObjectContentEventStreamEvent),
19821 done: make(chan struct{}),
19822 }
19823
19824 r.eventReader = eventstreamapi.NewEventReader(
19825 reader,
19826 protocol.HandlerPayloadUnmarshal{
19827 Unmarshalers: unmarshalers,
19828 },
19829 r.unmarshalerForEventType,
19830 )
19831 r.eventReader.UseLogger(logger, logLevel)
19832
19833 return r
19834}
19835
19836// Close will close the underlying event stream reader. For EventStream over
19837// HTTP this will also close the HTTP connection.
19838func (r *readSelectObjectContentEventStream) Close() error {
19839 r.closeOnce.Do(r.safeClose)
19840
19841 return r.Err()
19842}
19843
19844func (r *readSelectObjectContentEventStream) safeClose() {
19845 close(r.done)
19846 err := r.eventReader.Close()
19847 if err != nil {
19848 r.errVal.Store(err)
19849 }
19850}
19851
19852func (r *readSelectObjectContentEventStream) Err() error {
19853 if v := r.errVal.Load(); v != nil {
19854 return v.(error)
19855 }
19856
19857 return nil
19858}
19859
19860func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent {
19861 return r.stream
19862}
19863
19864func (r *readSelectObjectContentEventStream) readEventStream() {
19865 defer close(r.stream)
19866
19867 for {
19868 event, err := r.eventReader.ReadEvent()
19869 if err != nil {
19870 if err == io.EOF {
19871 return
19872 }
19873 select {
19874 case <-r.done:
19875 // If closed already ignore the error
19876 return
19877 default:
19878 }
19879 r.errVal.Store(err)
19880 return
19881 }
19882
19883 select {
19884 case r.stream <- event.(SelectObjectContentEventStreamEvent):
19885 case <-r.done:
19886 return
19887 }
19888 }
19889}
19890
19891func (r *readSelectObjectContentEventStream) unmarshalerForEventType(
19892 eventType string,
19893) (eventstreamapi.Unmarshaler, error) {
19894 switch eventType {
19895 case "Cont":
19896 return &ContinuationEvent{}, nil
19897
19898 case "End":
19899 return &EndEvent{}, nil
19900
19901 case "Progress":
19902 return &ProgressEvent{}, nil
19903
19904 case "Records":
19905 return &RecordsEvent{}, nil
19906
19907 case "Stats":
19908 return &StatsEvent{}, nil
19909 default:
19910 return nil, awserr.New(
19911 request.ErrCodeSerialization,
19912 fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType),
19913 nil,
19914 )
19915 }
19916}
19917
19918// Request to filter the contents of an Amazon S3 object based on a simple Structured
19919// Query Language (SQL) statement. In the request, along with the SQL expression,
19920// you must also specify a data serialization format (JSON or CSV) of the object.
19921// Amazon S3 uses this to parse object data into records, and returns only records
19922// that match the specified SQL expression. You must also specify the data serialization
19923// format for the response. For more information, go to S3Select API Documentation
19924// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html).
19925type SelectObjectContentInput struct {
19926 _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
19927
19928 // The S3 Bucket.
19929 //
19930 // Bucket is a required field
19931 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
19932
19933 // The expression that is used to query the object.
19934 //
19935 // Expression is a required field
19936 Expression *string `type:"string" required:"true"`
19937
19938 // The type of the provided expression (e.g., SQL).
19939 //
19940 // ExpressionType is a required field
19941 ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"`
19942
19943 // Describes the format of the data in the object that is being queried.
19944 //
19945 // InputSerialization is a required field
19946 InputSerialization *InputSerialization `type:"structure" required:"true"`
19947
19948 // The Object Key.
19949 //
19950 // Key is a required field
19951 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
19952
19953 // Describes the format of the data that you want Amazon S3 to return in response.
19954 //
19955 // OutputSerialization is a required field
19956 OutputSerialization *OutputSerialization `type:"structure" required:"true"`
19957
19958 // Specifies if periodic request progress information should be enabled.
19959 RequestProgress *RequestProgress `type:"structure"`
19960
19961 // The SSE Algorithm used to encrypt the object. For more information, go to
19962 // Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
19963 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
19964
19965 // The SSE Customer Key. For more information, go to Server-Side Encryption
19966 // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
19967 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
19968
19969 // The SSE Customer Key MD5. For more information, go to Server-Side Encryption
19970 // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
19971 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
19972}
19973
19974// String returns the string representation
19975func (s SelectObjectContentInput) String() string {
19976 return awsutil.Prettify(s)
19977}
19978
19979// GoString returns the string representation
19980func (s SelectObjectContentInput) GoString() string {
19981 return s.String()
19982}
19983
19984// Validate inspects the fields of the type to determine if they are valid.
19985func (s *SelectObjectContentInput) Validate() error {
19986 invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"}
19987 if s.Bucket == nil {
19988 invalidParams.Add(request.NewErrParamRequired("Bucket"))
19989 }
19990 if s.Expression == nil {
19991 invalidParams.Add(request.NewErrParamRequired("Expression"))
19992 }
19993 if s.ExpressionType == nil {
19994 invalidParams.Add(request.NewErrParamRequired("ExpressionType"))
19995 }
19996 if s.InputSerialization == nil {
19997 invalidParams.Add(request.NewErrParamRequired("InputSerialization"))
19998 }
19999 if s.Key == nil {
20000 invalidParams.Add(request.NewErrParamRequired("Key"))
20001 }
20002 if s.Key != nil && len(*s.Key) < 1 {
20003 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
20004 }
20005 if s.OutputSerialization == nil {
20006 invalidParams.Add(request.NewErrParamRequired("OutputSerialization"))
20007 }
20008
20009 if invalidParams.Len() > 0 {
20010 return invalidParams
20011 }
20012 return nil
20013}
20014
20015// SetBucket sets the Bucket field's value.
20016func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput {
20017 s.Bucket = &v
20018 return s
20019}
20020
20021func (s *SelectObjectContentInput) getBucket() (v string) {
20022 if s.Bucket == nil {
20023 return v
20024 }
20025 return *s.Bucket
20026}
20027
20028// SetExpression sets the Expression field's value.
20029func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput {
20030 s.Expression = &v
20031 return s
20032}
20033
20034// SetExpressionType sets the ExpressionType field's value.
20035func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput {
20036 s.ExpressionType = &v
20037 return s
20038}
20039
20040// SetInputSerialization sets the InputSerialization field's value.
20041func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput {
20042 s.InputSerialization = v
20043 return s
20044}
20045
20046// SetKey sets the Key field's value.
20047func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput {
20048 s.Key = &v
20049 return s
20050}
20051
20052// SetOutputSerialization sets the OutputSerialization field's value.
20053func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput {
20054 s.OutputSerialization = v
20055 return s
20056}
20057
20058// SetRequestProgress sets the RequestProgress field's value.
20059func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput {
20060 s.RequestProgress = v
20061 return s
20062}
20063
20064// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
20065func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput {
20066 s.SSECustomerAlgorithm = &v
20067 return s
20068}
20069
20070// SetSSECustomerKey sets the SSECustomerKey field's value.
20071func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput {
20072 s.SSECustomerKey = &v
20073 return s
20074}
20075
20076func (s *SelectObjectContentInput) getSSECustomerKey() (v string) {
20077 if s.SSECustomerKey == nil {
20078 return v
20079 }
20080 return *s.SSECustomerKey
20081}
20082
20083// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
20084func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput {
20085 s.SSECustomerKeyMD5 = &v
20086 return s
20087}
20088
20089type SelectObjectContentOutput struct {
20090 _ struct{} `type:"structure" payload:"Payload"`
20091
20092 // Use EventStream to use the API's stream.
20093 EventStream *SelectObjectContentEventStream `type:"structure"`
20094}
20095
20096// String returns the string representation
20097func (s SelectObjectContentOutput) String() string {
20098 return awsutil.Prettify(s)
20099}
20100
20101// GoString returns the string representation
20102func (s SelectObjectContentOutput) GoString() string {
20103 return s.String()
20104}
20105
20106// SetEventStream sets the EventStream field's value.
20107func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput {
20108 s.EventStream = v
20109 return s
20110}
20111
20112func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) {
20113 if r.Error != nil {
20114 return
20115 }
20116 reader := newReadSelectObjectContentEventStream(
20117 r.HTTPResponse.Body,
20118 r.Handlers.UnmarshalStream,
20119 r.Config.Logger,
20120 r.Config.LogLevel.Value(),
20121 )
20122 go reader.readEventStream()
20123
20124 eventStream := &SelectObjectContentEventStream{
20125 StreamCloser: r.HTTPResponse.Body,
20126 Reader: reader,
20127 }
20128 s.EventStream = eventStream
20129}
20130
20131// Describes the parameters for Select job types.
20132type SelectParameters struct {
20133 _ struct{} `type:"structure"`
20134
20135 // The expression that is used to query the object.
20136 //
20137 // Expression is a required field
20138 Expression *string `type:"string" required:"true"`
20139
20140 // The type of the provided expression (e.g., SQL).
20141 //
20142 // ExpressionType is a required field
20143 ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"`
20144
20145 // Describes the serialization format of the object.
20146 //
20147 // InputSerialization is a required field
20148 InputSerialization *InputSerialization `type:"structure" required:"true"`
20149
20150 // Describes how the results of the Select job are serialized.
20151 //
20152 // OutputSerialization is a required field
20153 OutputSerialization *OutputSerialization `type:"structure" required:"true"`
20154}
20155
20156// String returns the string representation
20157func (s SelectParameters) String() string {
20158 return awsutil.Prettify(s)
20159}
20160
20161// GoString returns the string representation
20162func (s SelectParameters) GoString() string {
20163 return s.String()
20164}
20165
20166// Validate inspects the fields of the type to determine if they are valid.
20167func (s *SelectParameters) Validate() error {
20168 invalidParams := request.ErrInvalidParams{Context: "SelectParameters"}
20169 if s.Expression == nil {
20170 invalidParams.Add(request.NewErrParamRequired("Expression"))
20171 }
20172 if s.ExpressionType == nil {
20173 invalidParams.Add(request.NewErrParamRequired("ExpressionType"))
20174 }
20175 if s.InputSerialization == nil {
20176 invalidParams.Add(request.NewErrParamRequired("InputSerialization"))
20177 }
20178 if s.OutputSerialization == nil {
20179 invalidParams.Add(request.NewErrParamRequired("OutputSerialization"))
20180 }
20181
20182 if invalidParams.Len() > 0 {
20183 return invalidParams
20184 }
20185 return nil
20186}
20187
20188// SetExpression sets the Expression field's value.
20189func (s *SelectParameters) SetExpression(v string) *SelectParameters {
20190 s.Expression = &v
20191 return s
20192}
20193
20194// SetExpressionType sets the ExpressionType field's value.
20195func (s *SelectParameters) SetExpressionType(v string) *SelectParameters {
20196 s.ExpressionType = &v
20197 return s
20198}
20199
20200// SetInputSerialization sets the InputSerialization field's value.
20201func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters {
20202 s.InputSerialization = v
20203 return s
20204}
20205
20206// SetOutputSerialization sets the OutputSerialization field's value.
20207func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters {
20208 s.OutputSerialization = v
20209 return s
20210}
20211
20212// Describes the default server-side encryption to apply to new objects in the
20213// bucket. If Put Object request does not specify any server-side encryption,
20214// this default encryption will be applied.
20215type ServerSideEncryptionByDefault struct {
20216 _ struct{} `type:"structure"`
20217
20218 // KMS master key ID to use for the default encryption. This parameter is allowed
20219 // if SSEAlgorithm is aws:kms.
20220 KMSMasterKeyID *string `type:"string"`
20221
20222 // Server-side encryption algorithm to use for the default encryption.
20223 //
20224 // SSEAlgorithm is a required field
20225 SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"`
20226}
20227
20228// String returns the string representation
20229func (s ServerSideEncryptionByDefault) String() string {
20230 return awsutil.Prettify(s)
20231}
20232
20233// GoString returns the string representation
20234func (s ServerSideEncryptionByDefault) GoString() string {
20235 return s.String()
20236}
20237
20238// Validate inspects the fields of the type to determine if they are valid.
20239func (s *ServerSideEncryptionByDefault) Validate() error {
20240 invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"}
20241 if s.SSEAlgorithm == nil {
20242 invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm"))
20243 }
20244
20245 if invalidParams.Len() > 0 {
20246 return invalidParams
20247 }
20248 return nil
20249}
20250
20251// SetKMSMasterKeyID sets the KMSMasterKeyID field's value.
20252func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault {
20253 s.KMSMasterKeyID = &v
20254 return s
20255}
20256
20257// SetSSEAlgorithm sets the SSEAlgorithm field's value.
20258func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault {
20259 s.SSEAlgorithm = &v
20260 return s
20261}
20262
20263// Container for server-side encryption configuration rules. Currently S3 supports
20264// one rule only.
20265type ServerSideEncryptionConfiguration struct {
20266 _ struct{} `type:"structure"`
20267
20268 // Container for information about a particular server-side encryption configuration
20269 // rule.
20270 //
20271 // Rules is a required field
20272 Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
20273}
20274
20275// String returns the string representation
20276func (s ServerSideEncryptionConfiguration) String() string {
20277 return awsutil.Prettify(s)
20278}
20279
20280// GoString returns the string representation
20281func (s ServerSideEncryptionConfiguration) GoString() string {
20282 return s.String()
20283}
20284
20285// Validate inspects the fields of the type to determine if they are valid.
20286func (s *ServerSideEncryptionConfiguration) Validate() error {
20287 invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"}
20288 if s.Rules == nil {
20289 invalidParams.Add(request.NewErrParamRequired("Rules"))
20290 }
20291 if s.Rules != nil {
20292 for i, v := range s.Rules {
20293 if v == nil {
20294 continue
20295 }
20296 if err := v.Validate(); err != nil {
20297 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
20298 }
20299 }
20300 }
20301
20302 if invalidParams.Len() > 0 {
20303 return invalidParams
20304 }
20305 return nil
20306}
20307
20308// SetRules sets the Rules field's value.
20309func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration {
20310 s.Rules = v
20311 return s
20312}
20313
20314// Container for information about a particular server-side encryption configuration
20315// rule.
20316type ServerSideEncryptionRule struct {
20317 _ struct{} `type:"structure"`
20318
20319 // Describes the default server-side encryption to apply to new objects in the
20320 // bucket. If Put Object request does not specify any server-side encryption,
20321 // this default encryption will be applied.
20322 ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"`
20323}
20324
20325// String returns the string representation
20326func (s ServerSideEncryptionRule) String() string {
20327 return awsutil.Prettify(s)
20328}
20329
20330// GoString returns the string representation
20331func (s ServerSideEncryptionRule) GoString() string {
20332 return s.String()
20333}
20334
20335// Validate inspects the fields of the type to determine if they are valid.
20336func (s *ServerSideEncryptionRule) Validate() error {
20337 invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"}
20338 if s.ApplyServerSideEncryptionByDefault != nil {
20339 if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil {
20340 invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams))
20341 }
20342 }
20343
20344 if invalidParams.Len() > 0 {
20345 return invalidParams
20346 }
20347 return nil
20348}
20349
20350// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value.
20351func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule {
20352 s.ApplyServerSideEncryptionByDefault = v
20353 return s
20354}
20355
20356// Container for filters that define which source objects should be replicated.
20357type SourceSelectionCriteria struct {
20358 _ struct{} `type:"structure"`
20359
20360 // Container for filter information of selection of KMS Encrypted S3 objects.
20361 SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"`
20362}
20363
20364// String returns the string representation
20365func (s SourceSelectionCriteria) String() string {
20366 return awsutil.Prettify(s)
20367}
20368
20369// GoString returns the string representation
20370func (s SourceSelectionCriteria) GoString() string {
20371 return s.String()
20372}
20373
20374// Validate inspects the fields of the type to determine if they are valid.
20375func (s *SourceSelectionCriteria) Validate() error {
20376 invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"}
20377 if s.SseKmsEncryptedObjects != nil {
20378 if err := s.SseKmsEncryptedObjects.Validate(); err != nil {
20379 invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams))
20380 }
20381 }
20382
20383 if invalidParams.Len() > 0 {
20384 return invalidParams
20385 }
20386 return nil
20387}
20388
20389// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value.
20390func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria {
20391 s.SseKmsEncryptedObjects = v
20392 return s
20393}
20394
20395// Container for filter information of selection of KMS Encrypted S3 objects.
20396type SseKmsEncryptedObjects struct {
20397 _ struct{} `type:"structure"`
20398
20399 // The replication for KMS encrypted S3 objects is disabled if status is not
20400 // Enabled.
20401 //
20402 // Status is a required field
20403 Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"`
20404}
20405
20406// String returns the string representation
20407func (s SseKmsEncryptedObjects) String() string {
20408 return awsutil.Prettify(s)
20409}
20410
20411// GoString returns the string representation
20412func (s SseKmsEncryptedObjects) GoString() string {
20413 return s.String()
20414}
20415
20416// Validate inspects the fields of the type to determine if they are valid.
20417func (s *SseKmsEncryptedObjects) Validate() error {
20418 invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"}
20419 if s.Status == nil {
20420 invalidParams.Add(request.NewErrParamRequired("Status"))
20421 }
20422
20423 if invalidParams.Len() > 0 {
20424 return invalidParams
20425 }
20426 return nil
20427}
20428
20429// SetStatus sets the Status field's value.
20430func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects {
20431 s.Status = &v
20432 return s
20433}
20434
20435type Stats struct {
20436 _ struct{} `type:"structure"`
20437
20438 // Total number of uncompressed object bytes processed.
20439 BytesProcessed *int64 `type:"long"`
20440
20441 // Total number of bytes of records payload data returned.
20442 BytesReturned *int64 `type:"long"`
20443
20444 // Total number of object bytes scanned.
20445 BytesScanned *int64 `type:"long"`
20446}
20447
20448// String returns the string representation
20449func (s Stats) String() string {
20450 return awsutil.Prettify(s)
20451}
20452
20453// GoString returns the string representation
20454func (s Stats) GoString() string {
20455 return s.String()
20456}
20457
20458// SetBytesProcessed sets the BytesProcessed field's value.
20459func (s *Stats) SetBytesProcessed(v int64) *Stats {
20460 s.BytesProcessed = &v
20461 return s
20462}
20463
20464// SetBytesReturned sets the BytesReturned field's value.
20465func (s *Stats) SetBytesReturned(v int64) *Stats {
20466 s.BytesReturned = &v
20467 return s
20468}
20469
20470// SetBytesScanned sets the BytesScanned field's value.
20471func (s *Stats) SetBytesScanned(v int64) *Stats {
20472 s.BytesScanned = &v
20473 return s
20474}
20475
20476type StatsEvent struct {
20477 _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"`
20478
20479 // The Stats event details.
20480 Details *Stats `locationName:"Details" type:"structure"`
20481}
20482
20483// String returns the string representation
20484func (s StatsEvent) String() string {
20485 return awsutil.Prettify(s)
20486}
20487
20488// GoString returns the string representation
20489func (s StatsEvent) GoString() string {
20490 return s.String()
20491}
20492
20493// SetDetails sets the Details field's value.
20494func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent {
20495 s.Details = v
20496 return s
20497}
20498
20499// The StatsEvent is and event in the SelectObjectContentEventStream group of events.
20500func (s *StatsEvent) eventSelectObjectContentEventStream() {}
20501
20502// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value.
20503// This method is only used internally within the SDK's EventStream handling.
20504func (s *StatsEvent) UnmarshalEvent(
20505 payloadUnmarshaler protocol.PayloadUnmarshaler,
20506 msg eventstream.Message,
20507) error {
20508 if err := payloadUnmarshaler.UnmarshalPayload(
20509 bytes.NewReader(msg.Payload), s,
20510 ); err != nil {
20511 return err
20512 }
20513 return nil
20514}
20515
17804type StorageClassAnalysis struct { 20516type StorageClassAnalysis struct {
17805 _ struct{} `type:"structure"` 20517 _ struct{} `type:"structure"`
17806 20518
@@ -17840,7 +20552,6 @@ func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport)
17840 return s 20552 return s
17841} 20553}
17842 20554
17843// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport
17844type StorageClassAnalysisDataExport struct { 20555type StorageClassAnalysisDataExport struct {
17845 _ struct{} `type:"structure"` 20556 _ struct{} `type:"structure"`
17846 20557
@@ -17898,7 +20609,6 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora
17898 return s 20609 return s
17899} 20610}
17900 20611
17901// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag
17902type Tag struct { 20612type Tag struct {
17903 _ struct{} `type:"structure"` 20613 _ struct{} `type:"structure"`
17904 20614
@@ -17954,7 +20664,6 @@ func (s *Tag) SetValue(v string) *Tag {
17954 return s 20664 return s
17955} 20665}
17956 20666
17957// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging
17958type Tagging struct { 20667type Tagging struct {
17959 _ struct{} `type:"structure"` 20668 _ struct{} `type:"structure"`
17960 20669
@@ -18001,11 +20710,10 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging {
18001 return s 20710 return s
18002} 20711}
18003 20712
18004// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant
18005type TargetGrant struct { 20713type TargetGrant struct {
18006 _ struct{} `type:"structure"` 20714 _ struct{} `type:"structure"`
18007 20715
18008 Grantee *Grantee `type:"structure"` 20716 Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
18009 20717
18010 // Logging permissions assigned to the Grantee for the bucket. 20718 // Logging permissions assigned to the Grantee for the bucket.
18011 Permission *string `type:"string" enum:"BucketLogsPermission"` 20719 Permission *string `type:"string" enum:"BucketLogsPermission"`
@@ -18050,7 +20758,6 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant {
18050 20758
18051// Container for specifying the configuration when you want Amazon S3 to publish 20759// Container for specifying the configuration when you want Amazon S3 to publish
18052// events to an Amazon Simple Notification Service (Amazon SNS) topic. 20760// events to an Amazon Simple Notification Service (Amazon SNS) topic.
18053// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration
18054type TopicConfiguration struct { 20761type TopicConfiguration struct {
18055 _ struct{} `type:"structure"` 20762 _ struct{} `type:"structure"`
18056 20763
@@ -18059,6 +20766,7 @@ type TopicConfiguration struct {
18059 20766
18060 // Container for object key name filtering rules. For information about key 20767 // Container for object key name filtering rules. For information about key
18061 // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) 20768 // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
20769 // in the Amazon Simple Storage Service Developer Guide.
18062 Filter *NotificationConfigurationFilter `type:"structure"` 20770 Filter *NotificationConfigurationFilter `type:"structure"`
18063 20771
18064 // Optional unique identifier for configurations in a notification configuration. 20772 // Optional unique identifier for configurations in a notification configuration.
@@ -18122,7 +20830,6 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration {
18122 return s 20830 return s
18123} 20831}
18124 20832
18125// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated
18126type TopicConfigurationDeprecated struct { 20833type TopicConfigurationDeprecated struct {
18127 _ struct{} `type:"structure"` 20834 _ struct{} `type:"structure"`
18128 20835
@@ -18174,7 +20881,6 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep
18174 return s 20881 return s
18175} 20882}
18176 20883
18177// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition
18178type Transition struct { 20884type Transition struct {
18179 _ struct{} `type:"structure"` 20885 _ struct{} `type:"structure"`
18180 20886
@@ -18218,7 +20924,6 @@ func (s *Transition) SetStorageClass(v string) *Transition {
18218 return s 20924 return s
18219} 20925}
18220 20926
18221// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest
18222type UploadPartCopyInput struct { 20927type UploadPartCopyInput struct {
18223 _ struct{} `type:"structure"` 20928 _ struct{} `type:"structure"`
18224 20929
@@ -18235,14 +20940,14 @@ type UploadPartCopyInput struct {
18235 CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` 20940 CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
18236 20941
18237 // Copies the object if it has been modified since the specified time. 20942 // Copies the object if it has been modified since the specified time.
18238 CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` 20943 CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
18239 20944
18240 // Copies the object if its entity tag (ETag) is different than the specified 20945 // Copies the object if its entity tag (ETag) is different than the specified
18241 // ETag. 20946 // ETag.
18242 CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` 20947 CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
18243 20948
18244 // Copies the object if it hasn't been modified since the specified time. 20949 // Copies the object if it hasn't been modified since the specified time.
18245 CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` 20950 CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
18246 20951
18247 // The range of bytes to copy from the source object. The range value must use 20952 // The range of bytes to copy from the source object. The range value must use
18248 // the form bytes=first-last, where the first and last are the zero-based byte 20953 // the form bytes=first-last, where the first and last are the zero-based byte
@@ -18345,6 +21050,13 @@ func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput {
18345 return s 21050 return s
18346} 21051}
18347 21052
21053func (s *UploadPartCopyInput) getBucket() (v string) {
21054 if s.Bucket == nil {
21055 return v
21056 }
21057 return *s.Bucket
21058}
21059
18348// SetCopySource sets the CopySource field's value. 21060// SetCopySource sets the CopySource field's value.
18349func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { 21061func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput {
18350 s.CopySource = &v 21062 s.CopySource = &v
@@ -18393,6 +21105,13 @@ func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartC
18393 return s 21105 return s
18394} 21106}
18395 21107
21108func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) {
21109 if s.CopySourceSSECustomerKey == nil {
21110 return v
21111 }
21112 return *s.CopySourceSSECustomerKey
21113}
21114
18396// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. 21115// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
18397func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { 21116func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput {
18398 s.CopySourceSSECustomerKeyMD5 = &v 21117 s.CopySourceSSECustomerKeyMD5 = &v
@@ -18429,6 +21148,13 @@ func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput {
18429 return s 21148 return s
18430} 21149}
18431 21150
21151func (s *UploadPartCopyInput) getSSECustomerKey() (v string) {
21152 if s.SSECustomerKey == nil {
21153 return v
21154 }
21155 return *s.SSECustomerKey
21156}
21157
18432// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. 21158// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
18433func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { 21159func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput {
18434 s.SSECustomerKeyMD5 = &v 21160 s.SSECustomerKeyMD5 = &v
@@ -18441,7 +21167,6 @@ func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput {
18441 return s 21167 return s
18442} 21168}
18443 21169
18444// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput
18445type UploadPartCopyOutput struct { 21170type UploadPartCopyOutput struct {
18446 _ struct{} `type:"structure" payload:"CopyPartResult"` 21171 _ struct{} `type:"structure" payload:"CopyPartResult"`
18447 21172
@@ -18526,7 +21251,6 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy
18526 return s 21251 return s
18527} 21252}
18528 21253
18529// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest
18530type UploadPartInput struct { 21254type UploadPartInput struct {
18531 _ struct{} `type:"structure" payload:"Body"` 21255 _ struct{} `type:"structure" payload:"Body"`
18532 21256
@@ -18542,6 +21266,9 @@ type UploadPartInput struct {
18542 // body cannot be determined automatically. 21266 // body cannot be determined automatically.
18543 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` 21267 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
18544 21268
21269 // The base64-encoded 128-bit MD5 digest of the part data.
21270 ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
21271
18545 // Object key for which the multipart upload was initiated. 21272 // Object key for which the multipart upload was initiated.
18546 // 21273 //
18547 // Key is a required field 21274 // Key is a required field
@@ -18628,12 +21355,25 @@ func (s *UploadPartInput) SetBucket(v string) *UploadPartInput {
18628 return s 21355 return s
18629} 21356}
18630 21357
21358func (s *UploadPartInput) getBucket() (v string) {
21359 if s.Bucket == nil {
21360 return v
21361 }
21362 return *s.Bucket
21363}
21364
18631// SetContentLength sets the ContentLength field's value. 21365// SetContentLength sets the ContentLength field's value.
18632func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { 21366func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput {
18633 s.ContentLength = &v 21367 s.ContentLength = &v
18634 return s 21368 return s
18635} 21369}
18636 21370
21371// SetContentMD5 sets the ContentMD5 field's value.
21372func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput {
21373 s.ContentMD5 = &v
21374 return s
21375}
21376
18637// SetKey sets the Key field's value. 21377// SetKey sets the Key field's value.
18638func (s *UploadPartInput) SetKey(v string) *UploadPartInput { 21378func (s *UploadPartInput) SetKey(v string) *UploadPartInput {
18639 s.Key = &v 21379 s.Key = &v
@@ -18664,6 +21404,13 @@ func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput {
18664 return s 21404 return s
18665} 21405}
18666 21406
21407func (s *UploadPartInput) getSSECustomerKey() (v string) {
21408 if s.SSECustomerKey == nil {
21409 return v
21410 }
21411 return *s.SSECustomerKey
21412}
21413
18667// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. 21414// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
18668func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { 21415func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput {
18669 s.SSECustomerKeyMD5 = &v 21416 s.SSECustomerKeyMD5 = &v
@@ -18676,7 +21423,6 @@ func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput {
18676 return s 21423 return s
18677} 21424}
18678 21425
18679// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput
18680type UploadPartOutput struct { 21426type UploadPartOutput struct {
18681 _ struct{} `type:"structure"` 21427 _ struct{} `type:"structure"`
18682 21428
@@ -18752,7 +21498,6 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput {
18752 return s 21498 return s
18753} 21499}
18754 21500
18755// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration
18756type VersioningConfiguration struct { 21501type VersioningConfiguration struct {
18757 _ struct{} `type:"structure"` 21502 _ struct{} `type:"structure"`
18758 21503
@@ -18787,7 +21532,6 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration {
18787 return s 21532 return s
18788} 21533}
18789 21534
18790// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration
18791type WebsiteConfiguration struct { 21535type WebsiteConfiguration struct {
18792 _ struct{} `type:"structure"` 21536 _ struct{} `type:"structure"`
18793 21537
@@ -18950,6 +21694,17 @@ const (
18950 BucketVersioningStatusSuspended = "Suspended" 21694 BucketVersioningStatusSuspended = "Suspended"
18951) 21695)
18952 21696
21697const (
21698 // CompressionTypeNone is a CompressionType enum value
21699 CompressionTypeNone = "NONE"
21700
21701 // CompressionTypeGzip is a CompressionType enum value
21702 CompressionTypeGzip = "GZIP"
21703
21704 // CompressionTypeBzip2 is a CompressionType enum value
21705 CompressionTypeBzip2 = "BZIP2"
21706)
21707
18953// Requests Amazon S3 to encode the object keys in the response and specifies 21708// Requests Amazon S3 to encode the object keys in the response and specifies
18954// the encoding method to use. An object key may contain any Unicode character; 21709// the encoding method to use. An object key may contain any Unicode character;
18955// however, XML 1.0 parser cannot parse some characters, such as characters 21710// however, XML 1.0 parser cannot parse some characters, such as characters
@@ -19000,6 +21755,22 @@ const (
19000) 21755)
19001 21756
19002const ( 21757const (
21758 // ExpressionTypeSql is a ExpressionType enum value
21759 ExpressionTypeSql = "SQL"
21760)
21761
21762const (
21763 // FileHeaderInfoUse is a FileHeaderInfo enum value
21764 FileHeaderInfoUse = "USE"
21765
21766 // FileHeaderInfoIgnore is a FileHeaderInfo enum value
21767 FileHeaderInfoIgnore = "IGNORE"
21768
21769 // FileHeaderInfoNone is a FileHeaderInfo enum value
21770 FileHeaderInfoNone = "NONE"
21771)
21772
21773const (
19003 // FilterRuleNamePrefix is a FilterRuleName enum value 21774 // FilterRuleNamePrefix is a FilterRuleName enum value
19004 FilterRuleNamePrefix = "prefix" 21775 FilterRuleNamePrefix = "prefix"
19005 21776
@@ -19010,6 +21781,9 @@ const (
19010const ( 21781const (
19011 // InventoryFormatCsv is a InventoryFormat enum value 21782 // InventoryFormatCsv is a InventoryFormat enum value
19012 InventoryFormatCsv = "CSV" 21783 InventoryFormatCsv = "CSV"
21784
21785 // InventoryFormatOrc is a InventoryFormat enum value
21786 InventoryFormatOrc = "ORC"
19013) 21787)
19014 21788
19015const ( 21789const (
@@ -19046,6 +21820,17 @@ const (
19046 21820
19047 // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value 21821 // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value
19048 InventoryOptionalFieldReplicationStatus = "ReplicationStatus" 21822 InventoryOptionalFieldReplicationStatus = "ReplicationStatus"
21823
21824 // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value
21825 InventoryOptionalFieldEncryptionStatus = "EncryptionStatus"
21826)
21827
21828const (
21829 // JSONTypeDocument is a JSONType enum value
21830 JSONTypeDocument = "DOCUMENT"
21831
21832 // JSONTypeLines is a JSONType enum value
21833 JSONTypeLines = "LINES"
19049) 21834)
19050 21835
19051const ( 21836const (
@@ -19104,6 +21889,12 @@ const (
19104 21889
19105 // ObjectStorageClassGlacier is a ObjectStorageClass enum value 21890 // ObjectStorageClassGlacier is a ObjectStorageClass enum value
19106 ObjectStorageClassGlacier = "GLACIER" 21891 ObjectStorageClassGlacier = "GLACIER"
21892
21893 // ObjectStorageClassStandardIa is a ObjectStorageClass enum value
21894 ObjectStorageClassStandardIa = "STANDARD_IA"
21895
21896 // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value
21897 ObjectStorageClassOnezoneIa = "ONEZONE_IA"
19107) 21898)
19108 21899
19109const ( 21900const (
@@ -19112,6 +21903,11 @@ const (
19112) 21903)
19113 21904
19114const ( 21905const (
21906 // OwnerOverrideDestination is a OwnerOverride enum value
21907 OwnerOverrideDestination = "Destination"
21908)
21909
21910const (
19115 // PayerRequester is a Payer enum value 21911 // PayerRequester is a Payer enum value
19116 PayerRequester = "Requester" 21912 PayerRequester = "Requester"
19117 21913
@@ -19145,6 +21941,14 @@ const (
19145) 21941)
19146 21942
19147const ( 21943const (
21944 // QuoteFieldsAlways is a QuoteFields enum value
21945 QuoteFieldsAlways = "ALWAYS"
21946
21947 // QuoteFieldsAsneeded is a QuoteFields enum value
21948 QuoteFieldsAsneeded = "ASNEEDED"
21949)
21950
21951const (
19148 // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value 21952 // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value
19149 ReplicationRuleStatusEnabled = "Enabled" 21953 ReplicationRuleStatusEnabled = "Enabled"
19150 21954
@@ -19183,6 +21987,11 @@ const (
19183) 21987)
19184 21988
19185const ( 21989const (
21990 // RestoreRequestTypeSelect is a RestoreRequestType enum value
21991 RestoreRequestTypeSelect = "SELECT"
21992)
21993
21994const (
19186 // ServerSideEncryptionAes256 is a ServerSideEncryption enum value 21995 // ServerSideEncryptionAes256 is a ServerSideEncryption enum value
19187 ServerSideEncryptionAes256 = "AES256" 21996 ServerSideEncryptionAes256 = "AES256"
19188 21997
@@ -19191,6 +22000,14 @@ const (
19191) 22000)
19192 22001
19193const ( 22002const (
22003 // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value
22004 SseKmsEncryptedObjectsStatusEnabled = "Enabled"
22005
22006 // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value
22007 SseKmsEncryptedObjectsStatusDisabled = "Disabled"
22008)
22009
22010const (
19194 // StorageClassStandard is a StorageClass enum value 22011 // StorageClassStandard is a StorageClass enum value
19195 StorageClassStandard = "STANDARD" 22012 StorageClassStandard = "STANDARD"
19196 22013
@@ -19199,6 +22016,9 @@ const (
19199 22016
19200 // StorageClassStandardIa is a StorageClass enum value 22017 // StorageClassStandardIa is a StorageClass enum value
19201 StorageClassStandardIa = "STANDARD_IA" 22018 StorageClassStandardIa = "STANDARD_IA"
22019
22020 // StorageClassOnezoneIa is a StorageClass enum value
22021 StorageClassOnezoneIa = "ONEZONE_IA"
19202) 22022)
19203 22023
19204const ( 22024const (
@@ -19231,6 +22051,9 @@ const (
19231 22051
19232 // TransitionStorageClassStandardIa is a TransitionStorageClass enum value 22052 // TransitionStorageClassStandardIa is a TransitionStorageClass enum value
19233 TransitionStorageClassStandardIa = "STANDARD_IA" 22053 TransitionStorageClassStandardIa = "STANDARD_IA"
22054
22055 // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value
22056 TransitionStorageClassOnezoneIa = "ONEZONE_IA"
19234) 22057)
19235 22058
19236const ( 22059const (
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
new file mode 100644
index 0000000..5c8ce5c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
@@ -0,0 +1,249 @@
1package s3
2
3import (
4 "bytes"
5 "crypto/md5"
6 "crypto/sha256"
7 "encoding/base64"
8 "encoding/hex"
9 "fmt"
10 "hash"
11 "io"
12
13 "github.com/aws/aws-sdk-go/aws"
14 "github.com/aws/aws-sdk-go/aws/awserr"
15 "github.com/aws/aws-sdk-go/aws/request"
16 "github.com/aws/aws-sdk-go/internal/sdkio"
17)
18
19const (
20 contentMD5Header = "Content-Md5"
21 contentSha256Header = "X-Amz-Content-Sha256"
22 amzTeHeader = "X-Amz-Te"
23 amzTxEncodingHeader = "X-Amz-Transfer-Encoding"
24
25 appendMD5TxEncoding = "append-md5"
26)
27
28// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
29// require it.
30func contentMD5(r *request.Request) {
31 h := md5.New()
32
33 if !aws.IsReaderSeekable(r.Body) {
34 if r.Config.Logger != nil {
35 r.Config.Logger.Log(fmt.Sprintf(
36 "Unable to compute Content-MD5 for unseekable body, S3.%s",
37 r.Operation.Name))
38 }
39 return
40 }
41
42 if _, err := copySeekableBody(h, r.Body); err != nil {
43 r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
44 return
45 }
46
47 // encode the md5 checksum in base64 and set the request header.
48 v := base64.StdEncoding.EncodeToString(h.Sum(nil))
49 r.HTTPRequest.Header.Set(contentMD5Header, v)
50}
51
52// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
53// request. If the body is not seekable or S3DisableContentMD5Validation set
54// this handler will be ignored.
55func computeBodyHashes(r *request.Request) {
56 if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
57 return
58 }
59 if r.IsPresigned() {
60 return
61 }
62 if r.Error != nil || !aws.IsReaderSeekable(r.Body) {
63 return
64 }
65
66 var md5Hash, sha256Hash hash.Hash
67 hashers := make([]io.Writer, 0, 2)
68
69 // Determine upfront which hashes can be set without overriding user
70 // provide header data.
71 if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 {
72 md5Hash = md5.New()
73 hashers = append(hashers, md5Hash)
74 }
75
76 if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 {
77 sha256Hash = sha256.New()
78 hashers = append(hashers, sha256Hash)
79 }
80
81 // Create the destination writer based on the hashes that are not already
82 // provided by the user.
83 var dst io.Writer
84 switch len(hashers) {
85 case 0:
86 return
87 case 1:
88 dst = hashers[0]
89 default:
90 dst = io.MultiWriter(hashers...)
91 }
92
93 if _, err := copySeekableBody(dst, r.Body); err != nil {
94 r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
95 return
96 }
97
98 // For the hashes created, set the associated headers that the user did not
99 // already provide.
100 if md5Hash != nil {
101 sum := make([]byte, md5.Size)
102 encoded := make([]byte, md5Base64EncLen)
103
104 base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0]))
105 r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)}
106 }
107
108 if sha256Hash != nil {
109 encoded := make([]byte, sha256HexEncLen)
110 sum := make([]byte, sha256.Size)
111
112 hex.Encode(encoded, sha256Hash.Sum(sum[0:0]))
113 r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)}
114 }
115}
116
117const (
118 md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen
119 sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
120)
121
122func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
123 curPos, err := src.Seek(0, sdkio.SeekCurrent)
124 if err != nil {
125 return 0, err
126 }
127
128 // hash the body. seek back to the first position after reading to reset
129 // the body for transmission. copy errors may be assumed to be from the
130 // body.
131 n, err := io.Copy(dst, src)
132 if err != nil {
133 return n, err
134 }
135
136 _, err = src.Seek(curPos, sdkio.SeekStart)
137 if err != nil {
138 return n, err
139 }
140
141 return n, nil
142}
143
144// Adds the x-amz-te: append_md5 header to the request. This requests the service
145// responds with a trailing MD5 checksum.
146//
147// Will not ask for append MD5 if disabled, the request is presigned or,
148// or the API operation does not support content MD5 validation.
149func askForTxEncodingAppendMD5(r *request.Request) {
150 if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
151 return
152 }
153 if r.IsPresigned() {
154 return
155 }
156 r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding)
157}
158
159func useMD5ValidationReader(r *request.Request) {
160 if r.Error != nil {
161 return
162 }
163
164 if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding {
165 return
166 }
167
168 var bodyReader *io.ReadCloser
169 var contentLen int64
170 switch tv := r.Data.(type) {
171 case *GetObjectOutput:
172 bodyReader = &tv.Body
173 contentLen = aws.Int64Value(tv.ContentLength)
174 // Update ContentLength hiden the trailing MD5 checksum.
175 tv.ContentLength = aws.Int64(contentLen - md5.Size)
176 tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range"))
177 default:
178 r.Error = awserr.New("ChecksumValidationError",
179 fmt.Sprintf("%s: %s header received on unsupported API, %s",
180 amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name,
181 ), nil)
182 return
183 }
184
185 if contentLen < md5.Size {
186 r.Error = awserr.New("ChecksumValidationError",
187 fmt.Sprintf("invalid Content-Length %d for %s %s",
188 contentLen, appendMD5TxEncoding, amzTxEncodingHeader,
189 ), nil)
190 return
191 }
192
193 // Wrap and swap the response body reader with the validation reader.
194 *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size)
195}
196
197type md5ValidationReader struct {
198 rawReader io.ReadCloser
199 payload io.Reader
200 hash hash.Hash
201
202 payloadLen int64
203 read int64
204}
205
206func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader {
207 h := md5.New()
208 return &md5ValidationReader{
209 rawReader: reader,
210 payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h),
211 hash: h,
212 payloadLen: payloadLen,
213 }
214}
215
216func (v *md5ValidationReader) Read(p []byte) (n int, err error) {
217 n, err = v.payload.Read(p)
218 if err != nil && err != io.EOF {
219 return n, err
220 }
221
222 v.read += int64(n)
223
224 if err == io.EOF {
225 if v.read != v.payloadLen {
226 return n, io.ErrUnexpectedEOF
227 }
228 expectSum := make([]byte, md5.Size)
229 actualSum := make([]byte, md5.Size)
230 if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil {
231 return n, sumReadErr
232 }
233 actualSum = v.hash.Sum(actualSum[0:0])
234 if !bytes.Equal(expectSum, actualSum) {
235 return n, awserr.New("InvalidChecksum",
236 fmt.Sprintf("expected MD5 checksum %s, got %s",
237 hex.EncodeToString(expectSum),
238 hex.EncodeToString(actualSum),
239 ),
240 nil)
241 }
242 }
243
244 return n, err
245}
246
247func (v *md5ValidationReader) Close() error {
248 return v.rawReader.Close()
249}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
deleted file mode 100644
index 9fc5df9..0000000
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
+++ /dev/null
@@ -1,36 +0,0 @@
1package s3
2
3import (
4 "crypto/md5"
5 "encoding/base64"
6 "io"
7
8 "github.com/aws/aws-sdk-go/aws/awserr"
9 "github.com/aws/aws-sdk-go/aws/request"
10)
11
12// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
13// require it.
14func contentMD5(r *request.Request) {
15 h := md5.New()
16
17 // hash the body. seek back to the first position after reading to reset
18 // the body for transmission. copy errors may be assumed to be from the
19 // body.
20 _, err := io.Copy(h, r.Body)
21 if err != nil {
22 r.Error = awserr.New("ContentMD5", "failed to read body", err)
23 return
24 }
25 _, err = r.Body.Seek(0, 0)
26 if err != nil {
27 r.Error = awserr.New("ContentMD5", "failed to seek body", err)
28 return
29 }
30
31 // encode the md5 checksum in base64 and set the request header.
32 sum := h.Sum(nil)
33 sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
34 base64.StdEncoding.Encode(sum64, sum)
35 r.HTTPRequest.Header.Set("Content-MD5", string(sum64))
36}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
index 8463347..a55beab 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -42,5 +42,29 @@ func defaultInitRequestFn(r *request.Request) {
42 r.Handlers.Validate.PushFront(populateLocationConstraint) 42 r.Handlers.Validate.PushFront(populateLocationConstraint)
43 case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: 43 case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
44 r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) 44 r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
45 case opPutObject, opUploadPart:
46 r.Handlers.Build.PushBack(computeBodyHashes)
47 // Disabled until #1837 root issue is resolved.
48 // case opGetObject:
49 // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5)
50 // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader)
45 } 51 }
46} 52}
53
54// bucketGetter is an accessor interface to grab the "Bucket" field from
55// an S3 type.
56type bucketGetter interface {
57 getBucket() string
58}
59
60// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey"
61// field from an S3 type.
62type sseCustomerKeyGetter interface {
63 getSSECustomerKey() string
64}
65
66// copySourceSSECustomerKeyGetter is an accessor interface to grab the
67// "CopySourceSSECustomerKey" field from an S3 type.
68type copySourceSSECustomerKeyGetter interface {
69 getCopySourceSSECustomerKey() string
70}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
index f045fd0..0def022 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
@@ -10,69 +10,17 @@
10// 10//
11// Using the Client 11// Using the Client
12// 12//
13// To use the client for Amazon Simple Storage Service you will first need 13// To contact Amazon Simple Storage Service with the SDK use the New function to create
14// to create a new instance of it. 14// a new service client. With that client you can make API requests to the service.
15// These clients are safe to use concurrently.
15// 16//
16// When creating a client for an AWS service you'll first need to have a Session 17// See the SDK's documentation for more information on how to use the SDK.
17// already created. The Session provides configuration that can be shared
18// between multiple service clients. Additional configuration can be applied to
19// the Session and service's client when they are constructed. The aws package's
20// Config type contains several fields such as Region for the AWS Region the
21// client should make API requests too. The optional Config value can be provided
22// as the variadic argument for Sessions and client creation.
23//
24// Once the service's client is created you can use it to make API requests the
25// AWS service. These clients are safe to use concurrently.
26//
27// // Create a session to share configuration, and load external configuration.
28// sess := session.Must(session.NewSession())
29//
30// // Create the service's client with the session.
31// svc := s3.New(sess)
32//
33// See the SDK's documentation for more information on how to use service clients.
34// https://docs.aws.amazon.com/sdk-for-go/api/ 18// https://docs.aws.amazon.com/sdk-for-go/api/
35// 19//
36// See aws package's Config type for more information on configuration options. 20// See aws.Config documentation for more information on configuring SDK clients.
37// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config 21// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
38// 22//
39// See the Amazon Simple Storage Service client S3 for more 23// See the Amazon Simple Storage Service client S3 for more
40// information on creating the service's client. 24// information on creating client for this service.
41// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New 25// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
42//
43// Once the client is created you can make an API request to the service.
44// Each API method takes a input parameter, and returns the service response
45// and an error.
46//
47// The API method will document which error codes the service can be returned
48// by the operation if the service models the API operation's errors. These
49// errors will also be available as const strings prefixed with "ErrCode".
50//
51// result, err := svc.AbortMultipartUpload(params)
52// if err != nil {
53// // Cast err to awserr.Error to handle specific error codes.
54// aerr, ok := err.(awserr.Error)
55// if ok && aerr.Code() == <error code to check for> {
56// // Specific error code handling
57// }
58// return err
59// }
60//
61// fmt.Println("AbortMultipartUpload result:")
62// fmt.Println(result)
63//
64// Using the Client with Context
65//
66// The service's client also provides methods to make API requests with a Context
67// value. This allows you to control the timeout, and cancellation of pending
68// requests. These methods also take request Option as variadic parameter to apply
69// additional configuration to the API request.
70//
71// ctx := context.Background()
72//
73// result, err := svc.AbortMultipartUploadWithContext(ctx, params)
74//
75// See the request package documentation for more information on using Context pattern
76// with the SDK.
77// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
78package s3 26package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
index b794a63..39b912c 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
@@ -35,7 +35,7 @@
35// 35//
36// The s3manager package's Downloader provides concurrently downloading of Objects 36// The s3manager package's Downloader provides concurrently downloading of Objects
37// from S3. The Downloader will write S3 Object content with an io.WriterAt. 37// from S3. The Downloader will write S3 Object content with an io.WriterAt.
38// Once the Downloader instance is created you can call Upload concurrently from 38// Once the Downloader instance is created you can call Download concurrently from
39// multiple goroutines safely. 39// multiple goroutines safely.
40// 40//
41// // The session the S3 Downloader will use 41// // The session the S3 Downloader will use
@@ -56,7 +56,7 @@
56// Key: aws.String(myString), 56// Key: aws.String(myString),
57// }) 57// })
58// if err != nil { 58// if err != nil {
59// return fmt.Errorf("failed to upload file, %v", err) 59// return fmt.Errorf("failed to download file, %v", err)
60// } 60// }
61// fmt.Printf("file downloaded, %d bytes\n", n) 61// fmt.Printf("file downloaded, %d bytes\n", n)
62// 62//
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
index ec3ffe4..a7fbc2d 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -8,7 +8,6 @@ import (
8 8
9 "github.com/aws/aws-sdk-go/aws" 9 "github.com/aws/aws-sdk-go/aws"
10 "github.com/aws/aws-sdk-go/aws/awserr" 10 "github.com/aws/aws-sdk-go/aws/awserr"
11 "github.com/aws/aws-sdk-go/aws/awsutil"
12 "github.com/aws/aws-sdk-go/aws/request" 11 "github.com/aws/aws-sdk-go/aws/request"
13) 12)
14 13
@@ -113,15 +112,9 @@ func updateEndpointForAccelerate(r *request.Request) {
113// Attempts to retrieve the bucket name from the request input parameters. 112// Attempts to retrieve the bucket name from the request input parameters.
114// If no bucket is found, or the field is empty "", false will be returned. 113// If no bucket is found, or the field is empty "", false will be returned.
115func bucketNameFromReqParams(params interface{}) (string, bool) { 114func bucketNameFromReqParams(params interface{}) (string, bool) {
116 b, _ := awsutil.ValuesAtPath(params, "Bucket") 115 if iface, ok := params.(bucketGetter); ok {
117 if len(b) == 0 { 116 b := iface.getBucket()
118 return "", false 117 return b, len(b) > 0
119 }
120
121 if bucket, ok := b[0].(*string); ok {
122 if bucketStr := aws.StringValue(bucket); bucketStr != "" {
123 return bucketStr, true
124 }
125 } 118 }
126 119
127 return "", false 120 return "", false
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
index 614e477..20de53f 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -29,8 +29,9 @@ var initRequest func(*request.Request)
29 29
30// Service information constants 30// Service information constants
31const ( 31const (
32 ServiceName = "s3" // Service endpoint prefix API calls made to. 32 ServiceName = "s3" // Name of service.
33 EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. 33 EndpointsID = ServiceName // ID to lookup a service endpoint with.
34 ServiceID = "S3" // ServiceID is a unique identifer of a specific service.
34) 35)
35 36
36// New creates a new instance of the S3 client with a session. 37// New creates a new instance of the S3 client with a session.
@@ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
55 cfg, 56 cfg,
56 metadata.ClientInfo{ 57 metadata.ClientInfo{
57 ServiceName: ServiceName, 58 ServiceName: ServiceName,
59 ServiceID: ServiceID,
58 SigningName: signingName, 60 SigningName: signingName,
59 SigningRegion: signingRegion, 61 SigningRegion: signingRegion,
60 Endpoint: endpoint, 62 Endpoint: endpoint,
@@ -71,6 +73,8 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
71 svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) 73 svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
72 svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) 74 svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
73 75
76 svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler)
77
74 // Run custom client initialization if present 78 // Run custom client initialization if present
75 if initClient != nil { 79 if initClient != nil {
76 initClient(svc.Client) 80 initClient(svc.Client)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
index 268ea2f..8010c4f 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
@@ -5,17 +5,27 @@ import (
5 "encoding/base64" 5 "encoding/base64"
6 6
7 "github.com/aws/aws-sdk-go/aws/awserr" 7 "github.com/aws/aws-sdk-go/aws/awserr"
8 "github.com/aws/aws-sdk-go/aws/awsutil"
9 "github.com/aws/aws-sdk-go/aws/request" 8 "github.com/aws/aws-sdk-go/aws/request"
10) 9)
11 10
12var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) 11var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
13 12
14func validateSSERequiresSSL(r *request.Request) { 13func validateSSERequiresSSL(r *request.Request) {
15 if r.HTTPRequest.URL.Scheme != "https" { 14 if r.HTTPRequest.URL.Scheme == "https" {
16 p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") 15 return
17 if len(p) > 0 { 16 }
17
18 if iface, ok := r.Params.(sseCustomerKeyGetter); ok {
19 if len(iface.getSSECustomerKey()) > 0 {
20 r.Error = errSSERequiresSSL
21 return
22 }
23 }
24
25 if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
26 if len(iface.getCopySourceSSECustomerKey()) > 0 {
18 r.Error = errSSERequiresSSL 27 r.Error = errSSERequiresSSL
28 return
19 } 29 }
20 } 30 }
21} 31}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
index 5a78fd3..9f33efc 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
@@ -7,6 +7,7 @@ import (
7 7
8 "github.com/aws/aws-sdk-go/aws/awserr" 8 "github.com/aws/aws-sdk-go/aws/awserr"
9 "github.com/aws/aws-sdk-go/aws/request" 9 "github.com/aws/aws-sdk-go/aws/request"
10 "github.com/aws/aws-sdk-go/internal/sdkio"
10) 11)
11 12
12func copyMultipartStatusOKUnmarhsalError(r *request.Request) { 13func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
@@ -17,7 +18,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
17 } 18 }
18 body := bytes.NewReader(b) 19 body := bytes.NewReader(b)
19 r.HTTPResponse.Body = ioutil.NopCloser(body) 20 r.HTTPResponse.Body = ioutil.NopCloser(body)
20 defer body.Seek(0, 0) 21 defer body.Seek(0, sdkio.SeekStart)
21 22
22 if body.Len() == 0 { 23 if body.Len() == 0 {
23 // If there is no body don't attempt to parse the body. 24 // If there is no body don't attempt to parse the body.
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
index cccfa8c..2596c69 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
@@ -11,7 +11,7 @@ import (
11 11
12// WaitUntilBucketExists uses the Amazon S3 API operation 12// WaitUntilBucketExists uses the Amazon S3 API operation
13// HeadBucket to wait for a condition to be met before returning. 13// HeadBucket to wait for a condition to be met before returning.
14// If the condition is not meet within the max attempt window an error will 14// If the condition is not met within the max attempt window, an error will
15// be returned. 15// be returned.
16func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { 16func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
17 return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) 17 return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
@@ -72,7 +72,7 @@ func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucket
72 72
73// WaitUntilBucketNotExists uses the Amazon S3 API operation 73// WaitUntilBucketNotExists uses the Amazon S3 API operation
74// HeadBucket to wait for a condition to be met before returning. 74// HeadBucket to wait for a condition to be met before returning.
75// If the condition is not meet within the max attempt window an error will 75// If the condition is not met within the max attempt window, an error will
76// be returned. 76// be returned.
77func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { 77func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
78 return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) 78 return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
@@ -118,7 +118,7 @@ func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBuc
118 118
119// WaitUntilObjectExists uses the Amazon S3 API operation 119// WaitUntilObjectExists uses the Amazon S3 API operation
120// HeadObject to wait for a condition to be met before returning. 120// HeadObject to wait for a condition to be met before returning.
121// If the condition is not meet within the max attempt window an error will 121// If the condition is not met within the max attempt window, an error will
122// be returned. 122// be returned.
123func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { 123func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
124 return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) 124 return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
@@ -169,7 +169,7 @@ func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObject
169 169
170// WaitUntilObjectNotExists uses the Amazon S3 API operation 170// WaitUntilObjectNotExists uses the Amazon S3 API operation
171// HeadObject to wait for a condition to be met before returning. 171// HeadObject to wait for a condition to be met before returning.
172// If the condition is not meet within the max attempt window an error will 172// If the condition is not met within the max attempt window, an error will
173// be returned. 173// be returned.
174func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { 174func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
175 return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) 175 return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index e5c105f..6f89a79 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -14,19 +14,18 @@ const opAssumeRole = "AssumeRole"
14 14
15// AssumeRoleRequest generates a "aws/request.Request" representing the 15// AssumeRoleRequest generates a "aws/request.Request" representing the
16// client's request for the AssumeRole operation. The "output" return 16// client's request for the AssumeRole operation. The "output" return
17// value can be used to capture response data after the request's "Send" method 17// value will be populated with the request's response once the request completes
18// is called. 18// successfuly.
19// 19//
20// See AssumeRole for usage and error information. 20// Use "Send" method on the returned Request to send the API call to the service.
21// the "output" return value is not valid until after Send returns without error.
21// 22//
22// Creating a request object using this method should be used when you want to inject 23// See AssumeRole for more information on using the AssumeRole
23// custom logic into the request's lifecycle using a custom handler, or if you want to 24// API call, and error handling.
24// access properties on the request object before or after sending the request. If 25//
25// you just want the service response, call the AssumeRole method directly 26// This method is useful when you want to inject custom logic or configuration
26// instead. 27// into the SDK's request lifecycle. Such as custom headers, or retry logic.
27// 28//
28// Note: You must call the "Send" method on the returned request object in order
29// to execute the request.
30// 29//
31// // Example sending a request using the AssumeRoleRequest method. 30// // Example sending a request using the AssumeRoleRequest method.
32// req, resp := client.AssumeRoleRequest(params) 31// req, resp := client.AssumeRoleRequest(params)
@@ -36,7 +35,7 @@ const opAssumeRole = "AssumeRole"
36// fmt.Println(resp) 35// fmt.Println(resp)
37// } 36// }
38// 37//
39// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole 38// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
40func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { 39func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
41 op := &request.Operation{ 40 op := &request.Operation{
42 Name: opAssumeRole, 41 Name: opAssumeRole,
@@ -89,9 +88,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
89// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) 88// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
90// in the IAM User Guide. 89// in the IAM User Guide.
91// 90//
92// The temporary security credentials are valid for the duration that you specified 91// By default, the temporary security credentials created by AssumeRole last
93// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a 92// for one hour. However, you can use the optional DurationSeconds parameter
94// maximum of 3600 seconds (1 hour). The default is 1 hour. 93// to specify the duration of your session. You can provide a value from 900
94// seconds (15 minutes) up to the maximum session duration setting for the role.
95// This setting can have a value from 1 hour to 12 hours. To learn how to view
96// the maximum value for your role, see View the Maximum Session Duration Setting
97// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
98// in the IAM User Guide. The maximum session duration limit applies when you
99// use the AssumeRole* API operations or the assume-role* CLI operations but
100// does not apply when you use those operations to create a console URL. For
101// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
102// in the IAM User Guide.
95// 103//
96// The temporary security credentials created by AssumeRole can be used to make 104// The temporary security credentials created by AssumeRole can be used to make
97// API calls to any AWS service with the following exception: you cannot call 105// API calls to any AWS service with the following exception: you cannot call
@@ -122,7 +130,12 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
122// the user to call AssumeRole on the ARN of the role in the other account. 130// the user to call AssumeRole on the ARN of the role in the other account.
123// If the user is in the same account as the role, then you can either attach 131// If the user is in the same account as the role, then you can either attach
124// a policy to the user (identical to the previous different account user), 132// a policy to the user (identical to the previous different account user),
125// or you can add the user as a principal directly in the role's trust policy 133// or you can add the user as a principal directly in the role's trust policy.
134// In this case, the trust policy acts as the only resource-based policy in
135// IAM, and users in the same account as the role do not need explicit permission
136// to assume the role. For more information about trust policies and resource-based
137// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
138// in the IAM User Guide.
126// 139//
127// Using MFA with AssumeRole 140// Using MFA with AssumeRole
128// 141//
@@ -169,7 +182,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
169// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 182// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
170// in the IAM User Guide. 183// in the IAM User Guide.
171// 184//
172// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole 185// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
173func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { 186func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
174 req, out := c.AssumeRoleRequest(input) 187 req, out := c.AssumeRoleRequest(input)
175 return out, req.Send() 188 return out, req.Send()
@@ -195,19 +208,18 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
195 208
196// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the 209// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
197// client's request for the AssumeRoleWithSAML operation. The "output" return 210// client's request for the AssumeRoleWithSAML operation. The "output" return
198// value can be used to capture response data after the request's "Send" method 211// value will be populated with the request's response once the request completes
199// is called. 212// successfuly.
200// 213//
201// See AssumeRoleWithSAML for usage and error information. 214// Use "Send" method on the returned Request to send the API call to the service.
215// the "output" return value is not valid until after Send returns without error.
202// 216//
203// Creating a request object using this method should be used when you want to inject 217// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML
204// custom logic into the request's lifecycle using a custom handler, or if you want to 218// API call, and error handling.
205// access properties on the request object before or after sending the request. If 219//
206// you just want the service response, call the AssumeRoleWithSAML method directly 220// This method is useful when you want to inject custom logic or configuration
207// instead. 221// into the SDK's request lifecycle. Such as custom headers, or retry logic.
208// 222//
209// Note: You must call the "Send" method on the returned request object in order
210// to execute the request.
211// 223//
212// // Example sending a request using the AssumeRoleWithSAMLRequest method. 224// // Example sending a request using the AssumeRoleWithSAMLRequest method.
213// req, resp := client.AssumeRoleWithSAMLRequest(params) 225// req, resp := client.AssumeRoleWithSAMLRequest(params)
@@ -217,7 +229,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
217// fmt.Println(resp) 229// fmt.Println(resp)
218// } 230// }
219// 231//
220// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML 232// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
221func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { 233func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
222 op := &request.Operation{ 234 op := &request.Operation{
223 Name: opAssumeRoleWithSAML, 235 Name: opAssumeRoleWithSAML,
@@ -249,11 +261,20 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
249// an access key ID, a secret access key, and a security token. Applications 261// an access key ID, a secret access key, and a security token. Applications
250// can use these temporary security credentials to sign calls to AWS services. 262// can use these temporary security credentials to sign calls to AWS services.
251// 263//
252// The temporary security credentials are valid for the duration that you specified 264// By default, the temporary security credentials created by AssumeRoleWithSAML
253// when calling AssumeRole, or until the time specified in the SAML authentication 265// last for one hour. However, you can use the optional DurationSeconds parameter
254// response's SessionNotOnOrAfter value, whichever is shorter. The duration 266// to specify the duration of your session. Your role session lasts for the
255// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). 267// duration that you specify, or until the time specified in the SAML authentication
256// The default is 1 hour. 268// response's SessionNotOnOrAfter value, whichever is shorter. You can provide
269// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
270// duration setting for the role. This setting can have a value from 1 hour
271// to 12 hours. To learn how to view the maximum value for your role, see View
272// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
273// in the IAM User Guide. The maximum session duration limit applies when you
274// use the AssumeRole* API operations or the assume-role* CLI operations but
275// does not apply when you use those operations to create a console URL. For
276// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
277// in the IAM User Guide.
257// 278//
258// The temporary security credentials created by AssumeRoleWithSAML can be used 279// The temporary security credentials created by AssumeRoleWithSAML can be used
259// to make API calls to any AWS service with the following exception: you cannot 280// to make API calls to any AWS service with the following exception: you cannot
@@ -343,7 +364,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
343// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 364// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
344// in the IAM User Guide. 365// in the IAM User Guide.
345// 366//
346// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML 367// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
347func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { 368func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
348 req, out := c.AssumeRoleWithSAMLRequest(input) 369 req, out := c.AssumeRoleWithSAMLRequest(input)
349 return out, req.Send() 370 return out, req.Send()
@@ -369,19 +390,18 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
369 390
370// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the 391// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
371// client's request for the AssumeRoleWithWebIdentity operation. The "output" return 392// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
372// value can be used to capture response data after the request's "Send" method 393// value will be populated with the request's response once the request completes
373// is called. 394// successfuly.
374// 395//
375// See AssumeRoleWithWebIdentity for usage and error information. 396// Use "Send" method on the returned Request to send the API call to the service.
397// the "output" return value is not valid until after Send returns without error.
376// 398//
377// Creating a request object using this method should be used when you want to inject 399// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity
378// custom logic into the request's lifecycle using a custom handler, or if you want to 400// API call, and error handling.
379// access properties on the request object before or after sending the request. If 401//
380// you just want the service response, call the AssumeRoleWithWebIdentity method directly 402// This method is useful when you want to inject custom logic or configuration
381// instead. 403// into the SDK's request lifecycle. Such as custom headers, or retry logic.
382// 404//
383// Note: You must call the "Send" method on the returned request object in order
384// to execute the request.
385// 405//
386// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. 406// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
387// req, resp := client.AssumeRoleWithWebIdentityRequest(params) 407// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
@@ -391,7 +411,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
391// fmt.Println(resp) 411// fmt.Println(resp)
392// } 412// }
393// 413//
394// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity 414// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
395func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { 415func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
396 op := &request.Operation{ 416 op := &request.Operation{
397 Name: opAssumeRoleWithWebIdentity, 417 Name: opAssumeRoleWithWebIdentity,
@@ -441,9 +461,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
441// key ID, a secret access key, and a security token. Applications can use these 461// key ID, a secret access key, and a security token. Applications can use these
442// temporary security credentials to sign calls to AWS service APIs. 462// temporary security credentials to sign calls to AWS service APIs.
443// 463//
444// The credentials are valid for the duration that you specified when calling 464// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
445// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to 465// last for one hour. However, you can use the optional DurationSeconds parameter
446// a maximum of 3600 seconds (1 hour). The default is 1 hour. 466// to specify the duration of your session. You can provide a value from 900
467// seconds (15 minutes) up to the maximum session duration setting for the role.
468// This setting can have a value from 1 hour to 12 hours. To learn how to view
469// the maximum value for your role, see View the Maximum Session Duration Setting
470// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
471// in the IAM User Guide. The maximum session duration limit applies when you
472// use the AssumeRole* API operations or the assume-role* CLI operations but
473// does not apply when you use those operations to create a console URL. For
474// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
475// in the IAM User Guide.
447// 476//
448// The temporary security credentials created by AssumeRoleWithWebIdentity can 477// The temporary security credentials created by AssumeRoleWithWebIdentity can
449// be used to make API calls to any AWS service with the following exception: 478// be used to make API calls to any AWS service with the following exception:
@@ -495,7 +524,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
495// the information from these providers to get and use temporary security 524// the information from these providers to get and use temporary security
496// credentials. 525// credentials.
497// 526//
498// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). 527// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
499// This article discusses web identity federation and shows an example of 528// This article discusses web identity federation and shows an example of
500// how to use web identity federation to get access to content in Amazon 529// how to use web identity federation to get access to content in Amazon
501// S3. 530// S3.
@@ -546,7 +575,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
546// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 575// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
547// in the IAM User Guide. 576// in the IAM User Guide.
548// 577//
549// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity 578// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
550func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { 579func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
551 req, out := c.AssumeRoleWithWebIdentityRequest(input) 580 req, out := c.AssumeRoleWithWebIdentityRequest(input)
552 return out, req.Send() 581 return out, req.Send()
@@ -572,19 +601,18 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
572 601
573// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the 602// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
574// client's request for the DecodeAuthorizationMessage operation. The "output" return 603// client's request for the DecodeAuthorizationMessage operation. The "output" return
575// value can be used to capture response data after the request's "Send" method 604// value will be populated with the request's response once the request completes
576// is called. 605// successfuly.
577// 606//
578// See DecodeAuthorizationMessage for usage and error information. 607// Use "Send" method on the returned Request to send the API call to the service.
608// the "output" return value is not valid until after Send returns without error.
579// 609//
580// Creating a request object using this method should be used when you want to inject 610// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage
581// custom logic into the request's lifecycle using a custom handler, or if you want to 611// API call, and error handling.
582// access properties on the request object before or after sending the request. If 612//
583// you just want the service response, call the DecodeAuthorizationMessage method directly 613// This method is useful when you want to inject custom logic or configuration
584// instead. 614// into the SDK's request lifecycle. Such as custom headers, or retry logic.
585// 615//
586// Note: You must call the "Send" method on the returned request object in order
587// to execute the request.
588// 616//
589// // Example sending a request using the DecodeAuthorizationMessageRequest method. 617// // Example sending a request using the DecodeAuthorizationMessageRequest method.
590// req, resp := client.DecodeAuthorizationMessageRequest(params) 618// req, resp := client.DecodeAuthorizationMessageRequest(params)
@@ -594,7 +622,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
594// fmt.Println(resp) 622// fmt.Println(resp)
595// } 623// }
596// 624//
597// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage 625// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
598func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { 626func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
599 op := &request.Operation{ 627 op := &request.Operation{
600 Name: opDecodeAuthorizationMessage, 628 Name: opDecodeAuthorizationMessage,
@@ -659,7 +687,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
659// invalid. This can happen if the token contains invalid characters, such as 687// invalid. This can happen if the token contains invalid characters, such as
660// linebreaks. 688// linebreaks.
661// 689//
662// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage 690// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
663func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { 691func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
664 req, out := c.DecodeAuthorizationMessageRequest(input) 692 req, out := c.DecodeAuthorizationMessageRequest(input)
665 return out, req.Send() 693 return out, req.Send()
@@ -685,19 +713,18 @@ const opGetCallerIdentity = "GetCallerIdentity"
685 713
686// GetCallerIdentityRequest generates a "aws/request.Request" representing the 714// GetCallerIdentityRequest generates a "aws/request.Request" representing the
687// client's request for the GetCallerIdentity operation. The "output" return 715// client's request for the GetCallerIdentity operation. The "output" return
688// value can be used to capture response data after the request's "Send" method 716// value will be populated with the request's response once the request completes
689// is called. 717// successfuly.
718//
719// Use "Send" method on the returned Request to send the API call to the service.
720// the "output" return value is not valid until after Send returns without error.
690// 721//
691// See GetCallerIdentity for usage and error information. 722// See GetCallerIdentity for more information on using the GetCallerIdentity
723// API call, and error handling.
692// 724//
693// Creating a request object using this method should be used when you want to inject 725// This method is useful when you want to inject custom logic or configuration
694// custom logic into the request's lifecycle using a custom handler, or if you want to 726// into the SDK's request lifecycle. Such as custom headers, or retry logic.
695// access properties on the request object before or after sending the request. If
696// you just want the service response, call the GetCallerIdentity method directly
697// instead.
698// 727//
699// Note: You must call the "Send" method on the returned request object in order
700// to execute the request.
701// 728//
702// // Example sending a request using the GetCallerIdentityRequest method. 729// // Example sending a request using the GetCallerIdentityRequest method.
703// req, resp := client.GetCallerIdentityRequest(params) 730// req, resp := client.GetCallerIdentityRequest(params)
@@ -707,7 +734,7 @@ const opGetCallerIdentity = "GetCallerIdentity"
707// fmt.Println(resp) 734// fmt.Println(resp)
708// } 735// }
709// 736//
710// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity 737// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
711func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { 738func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
712 op := &request.Operation{ 739 op := &request.Operation{
713 Name: opGetCallerIdentity, 740 Name: opGetCallerIdentity,
@@ -735,7 +762,7 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ
735// 762//
736// See the AWS API reference guide for AWS Security Token Service's 763// See the AWS API reference guide for AWS Security Token Service's
737// API operation GetCallerIdentity for usage and error information. 764// API operation GetCallerIdentity for usage and error information.
738// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity 765// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
739func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { 766func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
740 req, out := c.GetCallerIdentityRequest(input) 767 req, out := c.GetCallerIdentityRequest(input)
741 return out, req.Send() 768 return out, req.Send()
@@ -761,19 +788,18 @@ const opGetFederationToken = "GetFederationToken"
761 788
762// GetFederationTokenRequest generates a "aws/request.Request" representing the 789// GetFederationTokenRequest generates a "aws/request.Request" representing the
763// client's request for the GetFederationToken operation. The "output" return 790// client's request for the GetFederationToken operation. The "output" return
764// value can be used to capture response data after the request's "Send" method 791// value will be populated with the request's response once the request completes
765// is called. 792// successfuly.
766// 793//
767// See GetFederationToken for usage and error information. 794// Use "Send" method on the returned Request to send the API call to the service.
795// the "output" return value is not valid until after Send returns without error.
768// 796//
769// Creating a request object using this method should be used when you want to inject 797// See GetFederationToken for more information on using the GetFederationToken
770// custom logic into the request's lifecycle using a custom handler, or if you want to 798// API call, and error handling.
771// access properties on the request object before or after sending the request. If 799//
772// you just want the service response, call the GetFederationToken method directly 800// This method is useful when you want to inject custom logic or configuration
773// instead. 801// into the SDK's request lifecycle. Such as custom headers, or retry logic.
774// 802//
775// Note: You must call the "Send" method on the returned request object in order
776// to execute the request.
777// 803//
778// // Example sending a request using the GetFederationTokenRequest method. 804// // Example sending a request using the GetFederationTokenRequest method.
779// req, resp := client.GetFederationTokenRequest(params) 805// req, resp := client.GetFederationTokenRequest(params)
@@ -783,7 +809,7 @@ const opGetFederationToken = "GetFederationToken"
783// fmt.Println(resp) 809// fmt.Println(resp)
784// } 810// }
785// 811//
786// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken 812// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
787func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { 813func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
788 op := &request.Operation{ 814 op := &request.Operation{
789 Name: opGetFederationToken, 815 Name: opGetFederationToken,
@@ -905,7 +931,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
905// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 931// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
906// in the IAM User Guide. 932// in the IAM User Guide.
907// 933//
908// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken 934// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
909func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { 935func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
910 req, out := c.GetFederationTokenRequest(input) 936 req, out := c.GetFederationTokenRequest(input)
911 return out, req.Send() 937 return out, req.Send()
@@ -931,19 +957,18 @@ const opGetSessionToken = "GetSessionToken"
931 957
932// GetSessionTokenRequest generates a "aws/request.Request" representing the 958// GetSessionTokenRequest generates a "aws/request.Request" representing the
933// client's request for the GetSessionToken operation. The "output" return 959// client's request for the GetSessionToken operation. The "output" return
934// value can be used to capture response data after the request's "Send" method 960// value will be populated with the request's response once the request completes
935// is called. 961// successfuly.
962//
963// Use "Send" method on the returned Request to send the API call to the service.
964// the "output" return value is not valid until after Send returns without error.
936// 965//
937// See GetSessionToken for usage and error information. 966// See GetSessionToken for more information on using the GetSessionToken
967// API call, and error handling.
938// 968//
939// Creating a request object using this method should be used when you want to inject 969// This method is useful when you want to inject custom logic or configuration
940// custom logic into the request's lifecycle using a custom handler, or if you want to 970// into the SDK's request lifecycle. Such as custom headers, or retry logic.
941// access properties on the request object before or after sending the request. If
942// you just want the service response, call the GetSessionToken method directly
943// instead.
944// 971//
945// Note: You must call the "Send" method on the returned request object in order
946// to execute the request.
947// 972//
948// // Example sending a request using the GetSessionTokenRequest method. 973// // Example sending a request using the GetSessionTokenRequest method.
949// req, resp := client.GetSessionTokenRequest(params) 974// req, resp := client.GetSessionTokenRequest(params)
@@ -953,7 +978,7 @@ const opGetSessionToken = "GetSessionToken"
953// fmt.Println(resp) 978// fmt.Println(resp)
954// } 979// }
955// 980//
956// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken 981// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
957func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { 982func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
958 op := &request.Operation{ 983 op := &request.Operation{
959 Name: opGetSessionToken, 984 Name: opGetSessionToken,
@@ -1034,7 +1059,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
1034// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) 1059// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
1035// in the IAM User Guide. 1060// in the IAM User Guide.
1036// 1061//
1037// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken 1062// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
1038func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { 1063func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
1039 req, out := c.GetSessionTokenRequest(input) 1064 req, out := c.GetSessionTokenRequest(input)
1040 return out, req.Send() 1065 return out, req.Send()
@@ -1056,20 +1081,27 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken
1056 return out, req.Send() 1081 return out, req.Send()
1057} 1082}
1058 1083
1059// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest
1060type AssumeRoleInput struct { 1084type AssumeRoleInput struct {
1061 _ struct{} `type:"structure"` 1085 _ struct{} `type:"structure"`
1062 1086
1063 // The duration, in seconds, of the role session. The value can range from 900 1087 // The duration, in seconds, of the role session. The value can range from 900
1064 // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set 1088 // seconds (15 minutes) up to the maximum session duration setting for the role.
1065 // to 3600 seconds. 1089 // This setting can have a value from 1 hour to 12 hours. If you specify a value
1090 // higher than this setting, the operation fails. For example, if you specify
1091 // a session duration of 12 hours, but your administrator set the maximum session
1092 // duration to 6 hours, your operation fails. To learn how to view the maximum
1093 // value for your role, see View the Maximum Session Duration Setting for a
1094 // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
1095 // in the IAM User Guide.
1066 // 1096 //
1067 // This is separate from the duration of a console session that you might request 1097 // By default, the value is set to 3600 seconds.
1068 // using the returned credentials. The request to the federation endpoint for 1098 //
1069 // a console sign-in token takes a SessionDuration parameter that specifies 1099 // The DurationSeconds parameter is separate from the duration of a console
1070 // the maximum length of the console session, separately from the DurationSeconds 1100 // session that you might request using the returned credentials. The request
1071 // parameter on this API. For more information, see Creating a URL that Enables 1101 // to the federation endpoint for a console sign-in token takes a SessionDuration
1072 // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) 1102 // parameter that specifies the maximum length of the console session. For more
1103 // information, see Creating a URL that Enables Federated Users to Access the
1104 // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
1073 // in the IAM User Guide. 1105 // in the IAM User Guide.
1074 DurationSeconds *int64 `min:"900" type:"integer"` 1106 DurationSeconds *int64 `min:"900" type:"integer"`
1075 1107
@@ -1248,7 +1280,6 @@ func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
1248 1280
1249// Contains the response to a successful AssumeRole request, including temporary 1281// Contains the response to a successful AssumeRole request, including temporary
1250// AWS credentials that can be used to make AWS requests. 1282// AWS credentials that can be used to make AWS requests.
1251// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse
1252type AssumeRoleOutput struct { 1283type AssumeRoleOutput struct {
1253 _ struct{} `type:"structure"` 1284 _ struct{} `type:"structure"`
1254 1285
@@ -1302,22 +1333,30 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
1302 return s 1333 return s
1303} 1334}
1304 1335
1305// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest
1306type AssumeRoleWithSAMLInput struct { 1336type AssumeRoleWithSAMLInput struct {
1307 _ struct{} `type:"structure"` 1337 _ struct{} `type:"structure"`
1308 1338
1309 // The duration, in seconds, of the role session. The value can range from 900 1339 // The duration, in seconds, of the role session. Your role session lasts for
1310 // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set 1340 // the duration that you specify for the DurationSeconds parameter, or until
1311 // to 3600 seconds. An expiration can also be specified in the SAML authentication 1341 // the time specified in the SAML authentication response's SessionNotOnOrAfter
1312 // response's SessionNotOnOrAfter value. The actual expiration time is whichever 1342 // value, whichever is shorter. You can provide a DurationSeconds value from
1313 // value is shorter. 1343 // 900 seconds (15 minutes) up to the maximum session duration setting for the
1344 // role. This setting can have a value from 1 hour to 12 hours. If you specify
1345 // a value higher than this setting, the operation fails. For example, if you
1346 // specify a session duration of 12 hours, but your administrator set the maximum
1347 // session duration to 6 hours, your operation fails. To learn how to view the
1348 // maximum value for your role, see View the Maximum Session Duration Setting
1349 // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
1350 // in the IAM User Guide.
1351 //
1352 // By default, the value is set to 3600 seconds.
1314 // 1353 //
1315 // This is separate from the duration of a console session that you might request 1354 // The DurationSeconds parameter is separate from the duration of a console
1316 // using the returned credentials. The request to the federation endpoint for 1355 // session that you might request using the returned credentials. The request
1317 // a console sign-in token takes a SessionDuration parameter that specifies 1356 // to the federation endpoint for a console sign-in token takes a SessionDuration
1318 // the maximum length of the console session, separately from the DurationSeconds 1357 // parameter that specifies the maximum length of the console session. For more
1319 // parameter on this API. For more information, see Enabling SAML 2.0 Federated 1358 // information, see Creating a URL that Enables Federated Users to Access the
1320 // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) 1359 // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
1321 // in the IAM User Guide. 1360 // in the IAM User Guide.
1322 DurationSeconds *int64 `min:"900" type:"integer"` 1361 DurationSeconds *int64 `min:"900" type:"integer"`
1323 1362
@@ -1443,7 +1482,6 @@ func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAML
1443 1482
1444// Contains the response to a successful AssumeRoleWithSAML request, including 1483// Contains the response to a successful AssumeRoleWithSAML request, including
1445// temporary AWS credentials that can be used to make AWS requests. 1484// temporary AWS credentials that can be used to make AWS requests.
1446// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse
1447type AssumeRoleWithSAMLOutput struct { 1485type AssumeRoleWithSAMLOutput struct {
1448 _ struct{} `type:"structure"` 1486 _ struct{} `type:"structure"`
1449 1487
@@ -1555,20 +1593,27 @@ func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLO
1555 return s 1593 return s
1556} 1594}
1557 1595
1558// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest
1559type AssumeRoleWithWebIdentityInput struct { 1596type AssumeRoleWithWebIdentityInput struct {
1560 _ struct{} `type:"structure"` 1597 _ struct{} `type:"structure"`
1561 1598
1562 // The duration, in seconds, of the role session. The value can range from 900 1599 // The duration, in seconds, of the role session. The value can range from 900
1563 // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set 1600 // seconds (15 minutes) up to the maximum session duration setting for the role.
1564 // to 3600 seconds. 1601 // This setting can have a value from 1 hour to 12 hours. If you specify a value
1602 // higher than this setting, the operation fails. For example, if you specify
1603 // a session duration of 12 hours, but your administrator set the maximum session
1604 // duration to 6 hours, your operation fails. To learn how to view the maximum
1605 // value for your role, see View the Maximum Session Duration Setting for a
1606 // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
1607 // in the IAM User Guide.
1608 //
1609 // By default, the value is set to 3600 seconds.
1565 // 1610 //
1566 // This is separate from the duration of a console session that you might request 1611 // The DurationSeconds parameter is separate from the duration of a console
1567 // using the returned credentials. The request to the federation endpoint for 1612 // session that you might request using the returned credentials. The request
1568 // a console sign-in token takes a SessionDuration parameter that specifies 1613 // to the federation endpoint for a console sign-in token takes a SessionDuration
1569 // the maximum length of the console session, separately from the DurationSeconds 1614 // parameter that specifies the maximum length of the console session. For more
1570 // parameter on this API. For more information, see Creating a URL that Enables 1615 // information, see Creating a URL that Enables Federated Users to Access the
1571 // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) 1616 // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
1572 // in the IAM User Guide. 1617 // in the IAM User Guide.
1573 DurationSeconds *int64 `min:"900" type:"integer"` 1618 DurationSeconds *int64 `min:"900" type:"integer"`
1574 1619
@@ -1718,7 +1763,6 @@ func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRo
1718 1763
1719// Contains the response to a successful AssumeRoleWithWebIdentity request, 1764// Contains the response to a successful AssumeRoleWithWebIdentity request,
1720// including temporary AWS credentials that can be used to make AWS requests. 1765// including temporary AWS credentials that can be used to make AWS requests.
1721// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse
1722type AssumeRoleWithWebIdentityOutput struct { 1766type AssumeRoleWithWebIdentityOutput struct {
1723 _ struct{} `type:"structure"` 1767 _ struct{} `type:"structure"`
1724 1768
@@ -1811,7 +1855,6 @@ func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v strin
1811 1855
1812// The identifiers for the temporary security credentials that the operation 1856// The identifiers for the temporary security credentials that the operation
1813// returns. 1857// returns.
1814// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
1815type AssumedRoleUser struct { 1858type AssumedRoleUser struct {
1816 _ struct{} `type:"structure"` 1859 _ struct{} `type:"structure"`
1817 1860
@@ -1854,7 +1897,6 @@ func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
1854} 1897}
1855 1898
1856// AWS credentials for API authentication. 1899// AWS credentials for API authentication.
1857// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials
1858type Credentials struct { 1900type Credentials struct {
1859 _ struct{} `type:"structure"` 1901 _ struct{} `type:"structure"`
1860 1902
@@ -1866,7 +1908,7 @@ type Credentials struct {
1866 // The date on which the current credentials expire. 1908 // The date on which the current credentials expire.
1867 // 1909 //
1868 // Expiration is a required field 1910 // Expiration is a required field
1869 Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` 1911 Expiration *time.Time `type:"timestamp" required:"true"`
1870 1912
1871 // The secret access key that can be used to sign requests. 1913 // The secret access key that can be used to sign requests.
1872 // 1914 //
@@ -1913,7 +1955,6 @@ func (s *Credentials) SetSessionToken(v string) *Credentials {
1913 return s 1955 return s
1914} 1956}
1915 1957
1916// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest
1917type DecodeAuthorizationMessageInput struct { 1958type DecodeAuthorizationMessageInput struct {
1918 _ struct{} `type:"structure"` 1959 _ struct{} `type:"structure"`
1919 1960
@@ -1958,7 +1999,6 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut
1958// A document that contains additional information about the authorization status 1999// A document that contains additional information about the authorization status
1959// of a request from an encoded message that is returned in response to an AWS 2000// of a request from an encoded message that is returned in response to an AWS
1960// request. 2001// request.
1961// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse
1962type DecodeAuthorizationMessageOutput struct { 2002type DecodeAuthorizationMessageOutput struct {
1963 _ struct{} `type:"structure"` 2003 _ struct{} `type:"structure"`
1964 2004
@@ -1983,7 +2023,6 @@ func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAu
1983} 2023}
1984 2024
1985// Identifiers for the federated user that is associated with the credentials. 2025// Identifiers for the federated user that is associated with the credentials.
1986// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser
1987type FederatedUser struct { 2026type FederatedUser struct {
1988 _ struct{} `type:"structure"` 2027 _ struct{} `type:"structure"`
1989 2028
@@ -2024,7 +2063,6 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
2024 return s 2063 return s
2025} 2064}
2026 2065
2027// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest
2028type GetCallerIdentityInput struct { 2066type GetCallerIdentityInput struct {
2029 _ struct{} `type:"structure"` 2067 _ struct{} `type:"structure"`
2030} 2068}
@@ -2041,7 +2079,6 @@ func (s GetCallerIdentityInput) GoString() string {
2041 2079
2042// Contains the response to a successful GetCallerIdentity request, including 2080// Contains the response to a successful GetCallerIdentity request, including
2043// information about the entity making the request. 2081// information about the entity making the request.
2044// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse
2045type GetCallerIdentityOutput struct { 2082type GetCallerIdentityOutput struct {
2046 _ struct{} `type:"structure"` 2083 _ struct{} `type:"structure"`
2047 2084
@@ -2087,7 +2124,6 @@ func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
2087 return s 2124 return s
2088} 2125}
2089 2126
2090// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest
2091type GetFederationTokenInput struct { 2127type GetFederationTokenInput struct {
2092 _ struct{} `type:"structure"` 2128 _ struct{} `type:"structure"`
2093 2129
@@ -2196,7 +2232,6 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
2196 2232
2197// Contains the response to a successful GetFederationToken request, including 2233// Contains the response to a successful GetFederationToken request, including
2198// temporary AWS credentials that can be used to make AWS requests. 2234// temporary AWS credentials that can be used to make AWS requests.
2199// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse
2200type GetFederationTokenOutput struct { 2235type GetFederationTokenOutput struct {
2201 _ struct{} `type:"structure"` 2236 _ struct{} `type:"structure"`
2202 2237
@@ -2249,7 +2284,6 @@ func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTo
2249 return s 2284 return s
2250} 2285}
2251 2286
2252// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest
2253type GetSessionTokenInput struct { 2287type GetSessionTokenInput struct {
2254 _ struct{} `type:"structure"` 2288 _ struct{} `type:"structure"`
2255 2289
@@ -2334,7 +2368,6 @@ func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
2334 2368
2335// Contains the response to a successful GetSessionToken request, including 2369// Contains the response to a successful GetSessionToken request, including
2336// temporary AWS credentials that can be used to make AWS requests. 2370// temporary AWS credentials that can be used to make AWS requests.
2337// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse
2338type GetSessionTokenOutput struct { 2371type GetSessionTokenOutput struct {
2339 _ struct{} `type:"structure"` 2372 _ struct{} `type:"structure"`
2340 2373
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
index d2af518..ef681ab 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -56,69 +56,17 @@
56// 56//
57// Using the Client 57// Using the Client
58// 58//
59// To use the client for AWS Security Token Service you will first need 59// To contact AWS Security Token Service with the SDK use the New function to create
60// to create a new instance of it. 60// a new service client. With that client you can make API requests to the service.
61// These clients are safe to use concurrently.
61// 62//
62// When creating a client for an AWS service you'll first need to have a Session 63// See the SDK's documentation for more information on how to use the SDK.
63// already created. The Session provides configuration that can be shared
64// between multiple service clients. Additional configuration can be applied to
65// the Session and service's client when they are constructed. The aws package's
66// Config type contains several fields such as Region for the AWS Region the
67// client should make API requests too. The optional Config value can be provided
68// as the variadic argument for Sessions and client creation.
69//
70// Once the service's client is created you can use it to make API requests the
71// AWS service. These clients are safe to use concurrently.
72//
73// // Create a session to share configuration, and load external configuration.
74// sess := session.Must(session.NewSession())
75//
76// // Create the service's client with the session.
77// svc := sts.New(sess)
78//
79// See the SDK's documentation for more information on how to use service clients.
80// https://docs.aws.amazon.com/sdk-for-go/api/ 64// https://docs.aws.amazon.com/sdk-for-go/api/
81// 65//
82// See aws package's Config type for more information on configuration options. 66// See aws.Config documentation for more information on configuring SDK clients.
83// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config 67// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
84// 68//
85// See the AWS Security Token Service client STS for more 69// See the AWS Security Token Service client STS for more
86// information on creating the service's client. 70// information on creating client for this service.
87// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New 71// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
88//
89// Once the client is created you can make an API request to the service.
90// Each API method takes a input parameter, and returns the service response
91// and an error.
92//
93// The API method will document which error codes the service can be returned
94// by the operation if the service models the API operation's errors. These
95// errors will also be available as const strings prefixed with "ErrCode".
96//
97// result, err := svc.AssumeRole(params)
98// if err != nil {
99// // Cast err to awserr.Error to handle specific error codes.
100// aerr, ok := err.(awserr.Error)
101// if ok && aerr.Code() == <error code to check for> {
102// // Specific error code handling
103// }
104// return err
105// }
106//
107// fmt.Println("AssumeRole result:")
108// fmt.Println(result)
109//
110// Using the Client with Context
111//
112// The service's client also provides methods to make API requests with a Context
113// value. This allows you to control the timeout, and cancellation of pending
114// requests. These methods also take request Option as variadic parameter to apply
115// additional configuration to the API request.
116//
117// ctx := context.Background()
118//
119// result, err := svc.AssumeRoleWithContext(ctx, params)
120//
121// See the request package documentation for more information on using Context pattern
122// with the SDK.
123// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
124package sts 72package sts
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
index 1ee5839..185c914 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -29,8 +29,9 @@ var initRequest func(*request.Request)
29 29
30// Service information constants 30// Service information constants
31const ( 31const (
32 ServiceName = "sts" // Service endpoint prefix API calls made to. 32 ServiceName = "sts" // Name of service.
33 EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. 33 EndpointsID = ServiceName // ID to lookup a service endpoint with.
34 ServiceID = "STS" // ServiceID is a unique identifer of a specific service.
34) 35)
35 36
36// New creates a new instance of the STS client with a session. 37// New creates a new instance of the STS client with a session.
@@ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
55 cfg, 56 cfg,
56 metadata.ClientInfo{ 57 metadata.ClientInfo{
57 ServiceName: ServiceName, 58 ServiceName: ServiceName,
59 ServiceID: ServiceID,
58 SigningName: signingName, 60 SigningName: signingName,
59 SigningRegion: signingRegion, 61 SigningRegion: signingRegion,
60 Endpoint: endpoint, 62 Endpoint: endpoint,
diff --git a/vendor/github.com/bgentry/speakeasy/.gitignore b/vendor/github.com/bgentry/speakeasy/.gitignore
new file mode 100644
index 0000000..9e13114
--- /dev/null
+++ b/vendor/github.com/bgentry/speakeasy/.gitignore
@@ -0,0 +1,2 @@
1example/example
2example/example.exe
diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS
new file mode 100644
index 0000000..ff177f6
--- /dev/null
+++ b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS
@@ -0,0 +1,201 @@
1 Apache License
2 Version 2.0, January 2004
3 http://www.apache.org/licenses/
4
5TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
71. Definitions.
8
9 "License" shall mean the terms and conditions for use, reproduction,
10 and distribution as defined by Sections 1 through 9 of this document.
11
12 "Licensor" shall mean the copyright owner or entity authorized by
13 the copyright owner that is granting the License.
14
15 "Legal Entity" shall mean the union of the acting entity and all
16 other entities that control, are controlled by, or are under common
17 control with that entity. For the purposes of this definition,
18 "control" means (i) the power, direct or indirect, to cause the
19 direction or management of such entity, whether by contract or
20 otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 outstanding shares, or (iii) beneficial ownership of such entity.
22
23 "You" (or "Your") shall mean an individual or Legal Entity
24 exercising permissions granted by this License.
25
26 "Source" form shall mean the preferred form for making modifications,
27 including but not limited to software source code, documentation
28 source, and configuration files.
29
30 "Object" form shall mean any form resulting from mechanical
31 transformation or translation of a Source form, including but
32 not limited to compiled object code, generated documentation,
33 and conversions to other media types.
34
35 "Work" shall mean the work of authorship, whether in Source or
36 Object form, made available under the License, as indicated by a
37 copyright notice that is included in or attached to the work
38 (an example is provided in the Appendix below).
39
40 "Derivative Works" shall mean any work, whether in Source or Object
41 form, that is based on (or derived from) the Work and for which the
42 editorial revisions, annotations, elaborations, or other modifications
43 represent, as a whole, an original work of authorship. For the purposes
44 of this License, Derivative Works shall not include works that remain
45 separable from, or merely link (or bind by name) to the interfaces of,
46 the Work and Derivative Works thereof.
47
48 "Contribution" shall mean any work of authorship, including
49 the original version of the Work and any modifications or additions
50 to that Work or Derivative Works thereof, that is intentionally
51 submitted to Licensor for inclusion in the Work by the copyright owner
52 or by an individual or Legal Entity authorized to submit on behalf of
53 the copyright owner. For the purposes of this definition, "submitted"
54 means any form of electronic, verbal, or written communication sent
55 to the Licensor or its representatives, including but not limited to
56 communication on electronic mailing lists, source code control systems,
57 and issue tracking systems that are managed by, or on behalf of, the
58 Licensor for the purpose of discussing and improving the Work, but
59 excluding communication that is conspicuously marked or otherwise
60 designated in writing by the copyright owner as "Not a Contribution."
61
62 "Contributor" shall mean Licensor and any individual or Legal Entity
63 on behalf of whom a Contribution has been received by Licensor and
64 subsequently incorporated within the Work.
65
662. Grant of Copyright License. Subject to the terms and conditions of
67 this License, each Contributor hereby grants to You a perpetual,
68 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 copyright license to reproduce, prepare Derivative Works of,
70 publicly display, publicly perform, sublicense, and distribute the
71 Work and such Derivative Works in Source or Object form.
72
733. Grant of Patent License. Subject to the terms and conditions of
74 this License, each Contributor hereby grants to You a perpetual,
75 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 (except as stated in this section) patent license to make, have made,
77 use, offer to sell, sell, import, and otherwise transfer the Work,
78 where such license applies only to those patent claims licensable
79 by such Contributor that are necessarily infringed by their
80 Contribution(s) alone or by combination of their Contribution(s)
81 with the Work to which such Contribution(s) was submitted. If You
82 institute patent litigation against any entity (including a
83 cross-claim or counterclaim in a lawsuit) alleging that the Work
84 or a Contribution incorporated within the Work constitutes direct
85 or contributory patent infringement, then any patent licenses
86 granted to You under this License for that Work shall terminate
87 as of the date such litigation is filed.
88
894. Redistribution. You may reproduce and distribute copies of the
90 Work or Derivative Works thereof in any medium, with or without
91 modifications, and in Source or Object form, provided that You
92 meet the following conditions:
93
94 (a) You must give any other recipients of the Work or
95 Derivative Works a copy of this License; and
96
97 (b) You must cause any modified files to carry prominent notices
98 stating that You changed the files; and
99
100 (c) You must retain, in the Source form of any Derivative Works
101 that You distribute, all copyright, patent, trademark, and
102 attribution notices from the Source form of the Work,
103 excluding those notices that do not pertain to any part of
104 the Derivative Works; and
105
106 (d) If the Work includes a "NOTICE" text file as part of its
107 distribution, then any Derivative Works that You distribute must
108 include a readable copy of the attribution notices contained
109 within such NOTICE file, excluding those notices that do not
110 pertain to any part of the Derivative Works, in at least one
111 of the following places: within a NOTICE text file distributed
112 as part of the Derivative Works; within the Source form or
113 documentation, if provided along with the Derivative Works; or,
114 within a display generated by the Derivative Works, if and
115 wherever such third-party notices normally appear. The contents
116 of the NOTICE file are for informational purposes only and
117 do not modify the License. You may add Your own attribution
118 notices within Derivative Works that You distribute, alongside
119 or as an addendum to the NOTICE text from the Work, provided
120 that such additional attribution notices cannot be construed
121 as modifying the License.
122
123 You may add Your own copyright statement to Your modifications and
124 may provide additional or different license terms and conditions
125 for use, reproduction, or distribution of Your modifications, or
126 for any such Derivative Works as a whole, provided Your use,
127 reproduction, and distribution of the Work otherwise complies with
128 the conditions stated in this License.
129
1305. Submission of Contributions. Unless You explicitly state otherwise,
131 any Contribution intentionally submitted for inclusion in the Work
132 by You to the Licensor shall be under the terms and conditions of
133 this License, without any additional terms or conditions.
134 Notwithstanding the above, nothing herein shall supersede or modify
135 the terms of any separate license agreement you may have executed
136 with Licensor regarding such Contributions.
137
1386. Trademarks. This License does not grant permission to use the trade
139 names, trademarks, service marks, or product names of the Licensor,
140 except as required for reasonable and customary use in describing the
141 origin of the Work and reproducing the content of the NOTICE file.
142
1437. Disclaimer of Warranty. Unless required by applicable law or
144 agreed to in writing, Licensor provides the Work (and each
145 Contributor provides its Contributions) on an "AS IS" BASIS,
146 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 implied, including, without limitation, any warranties or conditions
148 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 PARTICULAR PURPOSE. You are solely responsible for determining the
150 appropriateness of using or redistributing the Work and assume any
151 risks associated with Your exercise of permissions under this License.
152
1538. Limitation of Liability. In no event and under no legal theory,
154 whether in tort (including negligence), contract, or otherwise,
155 unless required by applicable law (such as deliberate and grossly
156 negligent acts) or agreed to in writing, shall any Contributor be
157 liable to You for damages, including any direct, indirect, special,
158 incidental, or consequential damages of any character arising as a
159 result of this License or out of the use or inability to use the
160 Work (including but not limited to damages for loss of goodwill,
161 work stoppage, computer failure or malfunction, or any and all
162 other commercial damages or losses), even if such Contributor
163 has been advised of the possibility of such damages.
164
1659. Accepting Warranty or Additional Liability. While redistributing
166 the Work or Derivative Works thereof, You may choose to offer,
167 and charge a fee for, acceptance of support, warranty, indemnity,
168 or other liability obligations and/or rights consistent with this
169 License. However, in accepting such obligations, You may act only
170 on Your own behalf and on Your sole responsibility, not on behalf
171 of any other Contributor, and only if You agree to indemnify,
172 defend, and hold each Contributor harmless for any liability
173 incurred by, or claims asserted against, such Contributor by reason
174 of your accepting any such warranty or additional liability.
175
176END OF TERMS AND CONDITIONS
177
178APPENDIX: How to apply the Apache License to your work.
179
180 To apply the Apache License to your work, attach the following
181 boilerplate notice, with the fields enclosed by brackets "[]"
182 replaced with your own identifying information. (Don't include
183 the brackets!) The text should be enclosed in the appropriate
184 comment syntax for the file format. We also recommend that a
185 file or class name and description of purpose be included on the
186 same "printed page" as the copyright notice for easier
187 identification within third-party archives.
188
189Copyright [2013] [the CloudFoundry Authors]
190
191Licensed under the Apache License, Version 2.0 (the "License");
192you may not use this file except in compliance with the License.
193You may obtain a copy of the License at
194
195 http://www.apache.org/licenses/LICENSE-2.0
196
197Unless required by applicable law or agreed to in writing, software
198distributed under the License is distributed on an "AS IS" BASIS,
199WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200See the License for the specific language governing permissions and
201limitations under the License.
diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md
new file mode 100644
index 0000000..fceda75
--- /dev/null
+++ b/vendor/github.com/bgentry/speakeasy/Readme.md
@@ -0,0 +1,30 @@
1# Speakeasy
2
3This package provides cross-platform Go (#golang) helpers for taking user input
4from the terminal while not echoing the input back (similar to `getpasswd`). The
5package uses syscalls to avoid any dependence on cgo, and is therefore
6compatible with cross-compiling.
7
8[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc]
9
10## Unicode
11
12Multi-byte unicode characters work successfully on Mac OS X. On Windows,
13however, this may be problematic (as is UTF in general on Windows). Other
14platforms have not been tested.
15
16## License
17
18The code herein was not written by me, but was compiled from two separate open
19source packages. Unix portions were imported from [gopass][gopass], while
20Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s
21[Windows terminal helpers][cf-ui-windows].
22
23The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly
24from the source (though I attempted to fill in the correct owner in the
25boilerplate copyright notice).
26
27[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI"
28[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers"
29[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org"
30[gopass]: https://code.google.com/p/gopass "gopass"
diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go
new file mode 100644
index 0000000..71c1dd1
--- /dev/null
+++ b/vendor/github.com/bgentry/speakeasy/speakeasy.go
@@ -0,0 +1,49 @@
1package speakeasy
2
3import (
4 "fmt"
5 "io"
6 "os"
7 "strings"
8)
9
10// Ask the user to enter a password with input hidden. prompt is a string to
11// display before the user's input. Returns the provided password, or an error
12// if the command failed.
13func Ask(prompt string) (password string, err error) {
14 return FAsk(os.Stdout, prompt)
15}
16
17// FAsk is the same as Ask, except it is possible to specify the file to write
18// the prompt to. If 'nil' is passed as the writer, no prompt will be written.
19func FAsk(wr io.Writer, prompt string) (password string, err error) {
20 if wr != nil && prompt != "" {
21 fmt.Fprint(wr, prompt) // Display the prompt.
22 }
23 password, err = getPassword()
24
25 // Carriage return after the user input.
26 if wr != nil {
27 fmt.Fprintln(wr, "")
28 }
29 return
30}
31
32func readline() (value string, err error) {
33 var valb []byte
34 var n int
35 b := make([]byte, 1)
36 for {
37 // read one byte at a time so we don't accidentally read extra bytes
38 n, err = os.Stdin.Read(b)
39 if err != nil && err != io.EOF {
40 return "", err
41 }
42 if n == 0 || b[0] == '\n' {
43 break
44 }
45 valb = append(valb, b[0])
46 }
47
48 return strings.TrimSuffix(string(valb), "\r"), nil
49}
diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go
new file mode 100644
index 0000000..d99fda1
--- /dev/null
+++ b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go
@@ -0,0 +1,93 @@
1// based on https://code.google.com/p/gopass
2// Author: johnsiilver@gmail.com (John Doak)
3//
4// Original code is based on code by RogerV in the golang-nuts thread:
5// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247
6
7// +build darwin dragonfly freebsd linux netbsd openbsd solaris
8
9package speakeasy
10
11import (
12 "fmt"
13 "os"
14 "os/signal"
15 "strings"
16 "syscall"
17)
18
19const sttyArg0 = "/bin/stty"
20
21var (
22 sttyArgvEOff = []string{"stty", "-echo"}
23 sttyArgvEOn = []string{"stty", "echo"}
24)
25
26// getPassword gets input hidden from the terminal from a user. This is
27// accomplished by turning off terminal echo, reading input from the user and
28// finally turning on terminal echo.
29func getPassword() (password string, err error) {
30 sig := make(chan os.Signal, 10)
31 brk := make(chan bool)
32
33 // File descriptors for stdin, stdout, and stderr.
34 fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()}
35
36 // Setup notifications of termination signals to channel sig, create a process to
37 // watch for these signals so we can turn back on echo if need be.
38 signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT,
39 syscall.SIGTERM)
40 go catchSignal(fd, sig, brk)
41
42 // Turn off the terminal echo.
43 pid, err := echoOff(fd)
44 if err != nil {
45 return "", err
46 }
47
48 // Turn on the terminal echo and stop listening for signals.
49 defer signal.Stop(sig)
50 defer close(brk)
51 defer echoOn(fd)
52
53 syscall.Wait4(pid, nil, 0, nil)
54
55 line, err := readline()
56 if err == nil {
57 password = strings.TrimSpace(line)
58 } else {
59 err = fmt.Errorf("failed during password entry: %s", err)
60 }
61
62 return password, err
63}
64
65// echoOff turns off the terminal echo.
66func echoOff(fd []uintptr) (int, error) {
67 pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd})
68 if err != nil {
69 return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err)
70 }
71 return pid, nil
72}
73
74// echoOn turns back on the terminal echo.
75func echoOn(fd []uintptr) {
76 // Turn on the terminal echo.
77 pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd})
78 if e == nil {
79 syscall.Wait4(pid, nil, 0, nil)
80 }
81}
82
83// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn
84// terminal echo back on before the program ends. Otherwise the user is left
85// with echo off on their terminal.
86func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) {
87 select {
88 case <-sig:
89 echoOn(fd)
90 os.Exit(-1)
91 case <-brk:
92 }
93}
diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go
new file mode 100644
index 0000000..c2093a8
--- /dev/null
+++ b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go
@@ -0,0 +1,41 @@
1// +build windows
2
3package speakeasy
4
5import (
6 "syscall"
7)
8
9// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT:
10// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
11const ENABLE_ECHO_INPUT = 0x0004
12
13func getPassword() (password string, err error) {
14 var oldMode uint32
15
16 err = syscall.GetConsoleMode(syscall.Stdin, &oldMode)
17 if err != nil {
18 return
19 }
20
21 var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT)
22
23 err = setConsoleMode(syscall.Stdin, newMode)
24 defer setConsoleMode(syscall.Stdin, oldMode)
25 if err != nil {
26 return
27 }
28
29 return readline()
30}
31
32func setConsoleMode(console syscall.Handle, mode uint32) (err error) {
33 dll := syscall.MustLoadDLL("kernel32")
34 proc := dll.MustFindProc("SetConsoleMode")
35 r, _, err := proc.Call(uintptr(console), uintptr(mode))
36
37 if r == 0 {
38 return err
39 }
40 return nil
41}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
index c836416..bc52e96 100644
--- a/vendor/github.com/davecgh/go-spew/LICENSE
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -2,7 +2,7 @@ ISC License
2 2
3Copyright (c) 2012-2016 Dave Collins <dave@davec.name> 3Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
4 4
5Permission to use, copy, modify, and distribute this software for any 5Permission to use, copy, modify, and/or distribute this software for any
6purpose with or without fee is hereby granted, provided that the above 6purpose with or without fee is hereby granted, provided that the above
7copyright notice and this permission notice appear in all copies. 7copyright notice and this permission notice appear in all copies.
8 8
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
index 8a4a658..7929947 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypass.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -16,7 +16,9 @@
16// when the code is not running on Google App Engine, compiled by GopherJS, and 16// when the code is not running on Google App Engine, compiled by GopherJS, and
17// "-tags safe" is not added to the go build command line. The "disableunsafe" 17// "-tags safe" is not added to the go build command line. The "disableunsafe"
18// tag is deprecated and thus should not be used. 18// tag is deprecated and thus should not be used.
19// +build !js,!appengine,!safe,!disableunsafe 19// Go versions prior to 1.4 are disabled because they use a different layout
20// for interfaces which make the implementation of unsafeReflectValue more complex.
21// +build !js,!appengine,!safe,!disableunsafe,go1.4
20 22
21package spew 23package spew
22 24
@@ -34,80 +36,49 @@ const (
34 ptrSize = unsafe.Sizeof((*byte)(nil)) 36 ptrSize = unsafe.Sizeof((*byte)(nil))
35) 37)
36 38
39type flag uintptr
40
37var ( 41var (
38 // offsetPtr, offsetScalar, and offsetFlag are the offsets for the 42 // flagRO indicates whether the value field of a reflect.Value
39 // internal reflect.Value fields. These values are valid before golang 43 // is read-only.
40 // commit ecccf07e7f9d which changed the format. The are also valid 44 flagRO flag
41 // after commit 82f48826c6c7 which changed the format again to mirror 45
42 // the original format. Code in the init function updates these offsets 46 // flagAddr indicates whether the address of the reflect.Value's
43 // as necessary. 47 // value may be taken.
44 offsetPtr = uintptr(ptrSize) 48 flagAddr flag
45 offsetScalar = uintptr(0)
46 offsetFlag = uintptr(ptrSize * 2)
47
48 // flagKindWidth and flagKindShift indicate various bits that the
49 // reflect package uses internally to track kind information.
50 //
51 // flagRO indicates whether or not the value field of a reflect.Value is
52 // read-only.
53 //
54 // flagIndir indicates whether the value field of a reflect.Value is
55 // the actual data or a pointer to the data.
56 //
57 // These values are valid before golang commit 90a7c3c86944 which
58 // changed their positions. Code in the init function updates these
59 // flags as necessary.
60 flagKindWidth = uintptr(5)
61 flagKindShift = uintptr(flagKindWidth - 1)
62 flagRO = uintptr(1 << 0)
63 flagIndir = uintptr(1 << 1)
64) 49)
65 50
66func init() { 51// flagKindMask holds the bits that make up the kind
67 // Older versions of reflect.Value stored small integers directly in the 52// part of the flags field. In all the supported versions,
68 // ptr field (which is named val in the older versions). Versions 53// it is in the lower 5 bits.
69 // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named 54const flagKindMask = flag(0x1f)
70 // scalar for this purpose which unfortunately came before the flag
71 // field, so the offset of the flag field is different for those
72 // versions.
73 //
74 // This code constructs a new reflect.Value from a known small integer
75 // and checks if the size of the reflect.Value struct indicates it has
76 // the scalar field. When it does, the offsets are updated accordingly.
77 vv := reflect.ValueOf(0xf00)
78 if unsafe.Sizeof(vv) == (ptrSize * 4) {
79 offsetScalar = ptrSize * 2
80 offsetFlag = ptrSize * 3
81 }
82 55
83 // Commit 90a7c3c86944 changed the flag positions such that the low 56// Different versions of Go have used different
84 // order bits are the kind. This code extracts the kind from the flags 57// bit layouts for the flags type. This table
85 // field and ensures it's the correct type. When it's not, the flag 58// records the known combinations.
86 // order has been changed to the newer format, so the flags are updated 59var okFlags = []struct {
87 // accordingly. 60 ro, addr flag
88 upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) 61}{{
89 upfv := *(*uintptr)(upf) 62 // From Go 1.4 to 1.5
90 flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift) 63 ro: 1 << 5,
91 if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) { 64 addr: 1 << 7,
92 flagKindShift = 0 65}, {
93 flagRO = 1 << 5 66 // Up to Go tip.
94 flagIndir = 1 << 6 67 ro: 1<<5 | 1<<6,
95 68 addr: 1 << 8,
96 // Commit adf9b30e5594 modified the flags to separate the 69}}
97 // flagRO flag into two bits which specifies whether or not the 70
98 // field is embedded. This causes flagIndir to move over a bit 71var flagValOffset = func() uintptr {
99 // and means that flagRO is the combination of either of the 72 field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
100 // original flagRO bit and the new bit. 73 if !ok {
101 // 74 panic("reflect.Value has no flag field")
102 // This code detects the change by extracting what used to be
103 // the indirect bit to ensure it's set. When it's not, the flag
104 // order has been changed to the newer format, so the flags are
105 // updated accordingly.
106 if upfv&flagIndir == 0 {
107 flagRO = 3 << 5
108 flagIndir = 1 << 7
109 }
110 } 75 }
76 return field.Offset
77}()
78
79// flagField returns a pointer to the flag field of a reflect.Value.
80func flagField(v *reflect.Value) *flag {
81 return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
111} 82}
112 83
113// unsafeReflectValue converts the passed reflect.Value into a one that bypasses 84// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@@ -119,34 +90,56 @@ func init() {
119// This allows us to check for implementations of the Stringer and error 90// This allows us to check for implementations of the Stringer and error
120// interfaces to be used for pretty printing ordinarily unaddressable and 91// interfaces to be used for pretty printing ordinarily unaddressable and
121// inaccessible values such as unexported struct fields. 92// inaccessible values such as unexported struct fields.
122func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { 93func unsafeReflectValue(v reflect.Value) reflect.Value {
123 indirects := 1 94 if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
124 vt := v.Type() 95 return v
125 upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
126 rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
127 if rvf&flagIndir != 0 {
128 vt = reflect.PtrTo(v.Type())
129 indirects++
130 } else if offsetScalar != 0 {
131 // The value is in the scalar field when it's not one of the
132 // reference types.
133 switch vt.Kind() {
134 case reflect.Uintptr:
135 case reflect.Chan:
136 case reflect.Func:
137 case reflect.Map:
138 case reflect.Ptr:
139 case reflect.UnsafePointer:
140 default:
141 upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
142 offsetScalar)
143 }
144 } 96 }
97 flagFieldPtr := flagField(&v)
98 *flagFieldPtr &^= flagRO
99 *flagFieldPtr |= flagAddr
100 return v
101}
145 102
146 pv := reflect.NewAt(vt, upv) 103// Sanity checks against future reflect package changes
147 rv = pv 104// to the type or semantics of the Value.flag field.
148 for i := 0; i < indirects; i++ { 105func init() {
149 rv = rv.Elem() 106 field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
107 if !ok {
108 panic("reflect.Value has no flag field")
109 }
110 if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
111 panic("reflect.Value flag field has changed kind")
112 }
113 type t0 int
114 var t struct {
115 A t0
116 // t0 will have flagEmbedRO set.
117 t0
118 // a will have flagStickyRO set
119 a t0
120 }
121 vA := reflect.ValueOf(t).FieldByName("A")
122 va := reflect.ValueOf(t).FieldByName("a")
123 vt0 := reflect.ValueOf(t).FieldByName("t0")
124
125 // Infer flagRO from the difference between the flags
126 // for the (otherwise identical) fields in t.
127 flagPublic := *flagField(&vA)
128 flagWithRO := *flagField(&va) | *flagField(&vt0)
129 flagRO = flagPublic ^ flagWithRO
130
131 // Infer flagAddr from the difference between a value
132 // taken from a pointer and not.
133 vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
134 flagNoPtr := *flagField(&vA)
135 flagPtr := *flagField(&vPtrA)
136 flagAddr = flagNoPtr ^ flagPtr
137
138 // Check that the inferred flags tally with one of the known versions.
139 for _, f := range okFlags {
140 if flagRO == f.ro && flagAddr == f.addr {
141 return
142 }
150 } 143 }
151 return rv 144 panic("reflect.Value read-only flag has changed semantics")
152} 145}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
index 1fe3cf3..205c28d 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -16,7 +16,7 @@
16// when the code is running on Google App Engine, compiled by GopherJS, or 16// when the code is running on Google App Engine, compiled by GopherJS, or
17// "-tags safe" is added to the go build command line. The "disableunsafe" 17// "-tags safe" is added to the go build command line. The "disableunsafe"
18// tag is deprecated and thus should not be used. 18// tag is deprecated and thus should not be used.
19// +build js appengine safe disableunsafe 19// +build js appengine safe disableunsafe !go1.4
20 20
21package spew 21package spew
22 22
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
index 7c519ff..1be8ce9 100644
--- a/vendor/github.com/davecgh/go-spew/spew/common.go
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
180 w.Write(closeParenBytes) 180 w.Write(closeParenBytes)
181} 181}
182 182
183// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' 183// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
184// prefix to Writer w. 184// prefix to Writer w.
185func printHexPtr(w io.Writer, p uintptr) { 185func printHexPtr(w io.Writer, p uintptr) {
186 // Null pointer. 186 // Null pointer.
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
index df1d582..f78d89f 100644
--- a/vendor/github.com/davecgh/go-spew/spew/dump.go
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -35,16 +35,16 @@ var (
35 35
36 // cCharRE is a regular expression that matches a cgo char. 36 // cCharRE is a regular expression that matches a cgo char.
37 // It is used to detect character arrays to hexdump them. 37 // It is used to detect character arrays to hexdump them.
38 cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") 38 cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
39 39
40 // cUnsignedCharRE is a regular expression that matches a cgo unsigned 40 // cUnsignedCharRE is a regular expression that matches a cgo unsigned
41 // char. It is used to detect unsigned character arrays to hexdump 41 // char. It is used to detect unsigned character arrays to hexdump
42 // them. 42 // them.
43 cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") 43 cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
44 44
45 // cUint8tCharRE is a regular expression that matches a cgo uint8_t. 45 // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
46 // It is used to detect uint8_t arrays to hexdump them. 46 // It is used to detect uint8_t arrays to hexdump them.
47 cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") 47 cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
48) 48)
49 49
50// dumpState contains information about the state of a dump operation. 50// dumpState contains information about the state of a dump operation.
@@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
143 // Display dereferenced value. 143 // Display dereferenced value.
144 d.w.Write(openParenBytes) 144 d.w.Write(openParenBytes)
145 switch { 145 switch {
146 case nilFound == true: 146 case nilFound:
147 d.w.Write(nilAngleBytes) 147 d.w.Write(nilAngleBytes)
148 148
149 case cycleFound == true: 149 case cycleFound:
150 d.w.Write(circularBytes) 150 d.w.Write(circularBytes)
151 151
152 default: 152 default:
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
index c49875b..b04edb7 100644
--- a/vendor/github.com/davecgh/go-spew/spew/format.go
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
182 182
183 // Display dereferenced value. 183 // Display dereferenced value.
184 switch { 184 switch {
185 case nilFound == true: 185 case nilFound:
186 f.fs.Write(nilAngleBytes) 186 f.fs.Write(nilAngleBytes)
187 187
188 case cycleFound == true: 188 case cycleFound:
189 f.fs.Write(circularShortBytes) 189 f.fs.Write(circularShortBytes)
190 190
191 default: 191 default:
diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml
index 0064ba1..65c872b 100644
--- a/vendor/github.com/go-ini/ini/.travis.yml
+++ b/vendor/github.com/go-ini/ini/.travis.yml
@@ -1,14 +1,15 @@
1sudo: false 1sudo: false
2language: go 2language: go
3
4go: 3go:
5 - 1.4 4 - 1.4.x
6 - 1.5 5 - 1.5.x
7 - 1.6 6 - 1.6.x
8 - tip 7 - 1.7.x
8 - master
9 9
10script: 10script:
11 - go get -v github.com/smartystreets/goconvey 11 - go get golang.org/x/tools/cmd/cover
12 - go get github.com/smartystreets/goconvey
12 - go test -v -cover -race 13 - go test -v -cover -race
13 14
14notifications: 15notifications:
diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md
index 22a4234..8594742 100644
--- a/vendor/github.com/go-ini/ini/README.md
+++ b/vendor/github.com/go-ini/ini/README.md
@@ -1,4 +1,4 @@
1INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) 1INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge)
2=== 2===
3 3
4![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) 4![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
@@ -106,6 +106,12 @@ cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
106 106
107The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. 107The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
108 108
109To generate such keys in your program, you could use `NewBooleanKey`:
110
111```go
112key, err := sec.NewBooleanKey("skip-host-cache")
113```
114
109#### Comment 115#### Comment
110 116
111Take care that following format will be treated as comment: 117Take care that following format will be treated as comment:
diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md
index 3b4fb66..163432d 100644
--- a/vendor/github.com/go-ini/ini/README_ZH.md
+++ b/vendor/github.com/go-ini/ini/README_ZH.md
@@ -99,6 +99,12 @@ cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
99 99
100这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 100这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
101 101
102如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`:
103
104```go
105key, err := sec.NewBooleanKey("skip-host-cache")
106```
107
102#### 关于注释 108#### 关于注释
103 109
104下述几种情况的内容将被视为注释: 110下述几种情况的内容将被视为注释:
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
index 77e0dbd..68d73aa 100644
--- a/vendor/github.com/go-ini/ini/ini.go
+++ b/vendor/github.com/go-ini/ini/ini.go
@@ -37,7 +37,7 @@ const (
37 37
38 // Maximum allowed depth when recursively substituing variable names. 38 // Maximum allowed depth when recursively substituing variable names.
39 _DEPTH_VALUES = 99 39 _DEPTH_VALUES = 99
40 _VERSION = "1.23.1" 40 _VERSION = "1.25.4"
41) 41)
42 42
43// Version returns current package version literal. 43// Version returns current package version literal.
@@ -176,6 +176,8 @@ type LoadOptions struct {
176 // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. 176 // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
177 // This type of keys are mostly used in my.cnf. 177 // This type of keys are mostly used in my.cnf.
178 AllowBooleanKeys bool 178 AllowBooleanKeys bool
179 // AllowShadows indicates whether to keep track of keys with same name under same section.
180 AllowShadows bool
179 // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise 181 // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
180 // conform to key/value pairs. Specify the names of those blocks here. 182 // conform to key/value pairs. Specify the names of those blocks here.
181 UnparseableSections []string 183 UnparseableSections []string
@@ -219,6 +221,12 @@ func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
219 return LoadSources(LoadOptions{Insensitive: true}, source, others...) 221 return LoadSources(LoadOptions{Insensitive: true}, source, others...)
220} 222}
221 223
224// InsensitiveLoad has exactly same functionality as Load function
225// except it allows have shadow keys.
226func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
227 return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
228}
229
222// Empty returns an empty file object. 230// Empty returns an empty file object.
223func Empty() *File { 231func Empty() *File {
224 // Ignore error here, we sure our data is good. 232 // Ignore error here, we sure our data is good.
@@ -441,6 +449,7 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
441 } 449 }
442 alignSpaces := bytes.Repeat([]byte(" "), alignLength) 450 alignSpaces := bytes.Repeat([]byte(" "), alignLength)
443 451
452 KEY_LIST:
444 for _, kname := range sec.keyList { 453 for _, kname := range sec.keyList {
445 key := sec.Key(kname) 454 key := sec.Key(kname)
446 if len(key.Comment) > 0 { 455 if len(key.Comment) > 0 {
@@ -467,28 +476,33 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
467 case strings.Contains(kname, "`"): 476 case strings.Contains(kname, "`"):
468 kname = `"""` + kname + `"""` 477 kname = `"""` + kname + `"""`
469 } 478 }
470 if _, err = buf.WriteString(kname); err != nil {
471 return 0, err
472 }
473 479
474 if key.isBooleanType { 480 for _, val := range key.ValueWithShadows() {
475 continue 481 if _, err = buf.WriteString(kname); err != nil {
476 } 482 return 0, err
483 }
477 484
478 // Write out alignment spaces before "=" sign 485 if key.isBooleanType {
479 if PrettyFormat { 486 if kname != sec.keyList[len(sec.keyList)-1] {
480 buf.Write(alignSpaces[:alignLength-len(kname)]) 487 buf.WriteString(LineBreak)
481 } 488 }
489 continue KEY_LIST
490 }
482 491
483 val := key.value 492 // Write out alignment spaces before "=" sign
484 // In case key value contains "\n", "`", "\"", "#" or ";" 493 if PrettyFormat {
485 if strings.ContainsAny(val, "\n`") { 494 buf.Write(alignSpaces[:alignLength-len(kname)])
486 val = `"""` + val + `"""` 495 }
487 } else if strings.ContainsAny(val, "#;") { 496
488 val = "`" + val + "`" 497 // In case key value contains "\n", "`", "\"", "#" or ";"
489 } 498 if strings.ContainsAny(val, "\n`") {
490 if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { 499 val = `"""` + val + `"""`
491 return 0, err 500 } else if strings.ContainsAny(val, "#;") {
501 val = "`" + val + "`"
502 }
503 if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
504 return 0, err
505 }
492 } 506 }
493 } 507 }
494 508
diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go
index 9738c55..852696f 100644
--- a/vendor/github.com/go-ini/ini/key.go
+++ b/vendor/github.com/go-ini/ini/key.go
@@ -15,6 +15,7 @@
15package ini 15package ini
16 16
17import ( 17import (
18 "errors"
18 "fmt" 19 "fmt"
19 "strconv" 20 "strconv"
20 "strings" 21 "strings"
@@ -29,9 +30,42 @@ type Key struct {
29 isAutoIncrement bool 30 isAutoIncrement bool
30 isBooleanType bool 31 isBooleanType bool
31 32
33 isShadow bool
34 shadows []*Key
35
32 Comment string 36 Comment string
33} 37}
34 38
39// newKey simply return a key object with given values.
40func newKey(s *Section, name, val string) *Key {
41 return &Key{
42 s: s,
43 name: name,
44 value: val,
45 }
46}
47
48func (k *Key) addShadow(val string) error {
49 if k.isShadow {
50 return errors.New("cannot add shadow to another shadow key")
51 } else if k.isAutoIncrement || k.isBooleanType {
52 return errors.New("cannot add shadow to auto-increment or boolean key")
53 }
54
55 shadow := newKey(k.s, k.name, val)
56 shadow.isShadow = true
57 k.shadows = append(k.shadows, shadow)
58 return nil
59}
60
61// AddShadow adds a new shadow key to itself.
62func (k *Key) AddShadow(val string) error {
63 if !k.s.f.options.AllowShadows {
64 return errors.New("shadow key is not allowed")
65 }
66 return k.addShadow(val)
67}
68
35// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv 69// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
36type ValueMapper func(string) string 70type ValueMapper func(string) string
37 71
@@ -45,16 +79,29 @@ func (k *Key) Value() string {
45 return k.value 79 return k.value
46} 80}
47 81
48// String returns string representation of value. 82// ValueWithShadows returns raw values of key and its shadows if any.
49func (k *Key) String() string { 83func (k *Key) ValueWithShadows() []string {
50 val := k.value 84 if len(k.shadows) == 0 {
85 return []string{k.value}
86 }
87 vals := make([]string, len(k.shadows)+1)
88 vals[0] = k.value
89 for i := range k.shadows {
90 vals[i+1] = k.shadows[i].value
91 }
92 return vals
93}
94
95// transformValue takes a raw value and transforms to its final string.
96func (k *Key) transformValue(val string) string {
51 if k.s.f.ValueMapper != nil { 97 if k.s.f.ValueMapper != nil {
52 val = k.s.f.ValueMapper(val) 98 val = k.s.f.ValueMapper(val)
53 } 99 }
54 if strings.Index(val, "%") == -1 { 100
101 // Fail-fast if no indicate char found for recursive value
102 if !strings.Contains(val, "%") {
55 return val 103 return val
56 } 104 }
57
58 for i := 0; i < _DEPTH_VALUES; i++ { 105 for i := 0; i < _DEPTH_VALUES; i++ {
59 vr := varPattern.FindString(val) 106 vr := varPattern.FindString(val)
60 if len(vr) == 0 { 107 if len(vr) == 0 {
@@ -78,6 +125,11 @@ func (k *Key) String() string {
78 return val 125 return val
79} 126}
80 127
128// String returns string representation of value.
129func (k *Key) String() string {
130 return k.transformValue(k.value)
131}
132
81// Validate accepts a validate function which can 133// Validate accepts a validate function which can
82// return modifed result as key value. 134// return modifed result as key value.
83func (k *Key) Validate(fn func(string) string) string { 135func (k *Key) Validate(fn func(string) string) string {
@@ -394,11 +446,31 @@ func (k *Key) Strings(delim string) []string {
394 446
395 vals := strings.Split(str, delim) 447 vals := strings.Split(str, delim)
396 for i := range vals { 448 for i := range vals {
449 // vals[i] = k.transformValue(strings.TrimSpace(vals[i]))
397 vals[i] = strings.TrimSpace(vals[i]) 450 vals[i] = strings.TrimSpace(vals[i])
398 } 451 }
399 return vals 452 return vals
400} 453}
401 454
455// StringsWithShadows returns list of string divided by given delimiter.
456// Shadows will also be appended if any.
457func (k *Key) StringsWithShadows(delim string) []string {
458 vals := k.ValueWithShadows()
459 results := make([]string, 0, len(vals)*2)
460 for i := range vals {
461 if len(vals) == 0 {
462 continue
463 }
464
465 results = append(results, strings.Split(vals[i], delim)...)
466 }
467
468 for i := range results {
469 results[i] = k.transformValue(strings.TrimSpace(results[i]))
470 }
471 return results
472}
473
402// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. 474// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
403func (k *Key) Float64s(delim string) []float64 { 475func (k *Key) Float64s(delim string) []float64 {
404 vals, _ := k.getFloat64s(delim, true, false) 476 vals, _ := k.getFloat64s(delim, true, false)
@@ -407,13 +479,13 @@ func (k *Key) Float64s(delim string) []float64 {
407 479
408// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. 480// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
409func (k *Key) Ints(delim string) []int { 481func (k *Key) Ints(delim string) []int {
410 vals, _ := k.getInts(delim, true, false) 482 vals, _ := k.parseInts(k.Strings(delim), true, false)
411 return vals 483 return vals
412} 484}
413 485
414// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. 486// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
415func (k *Key) Int64s(delim string) []int64 { 487func (k *Key) Int64s(delim string) []int64 {
416 vals, _ := k.getInt64s(delim, true, false) 488 vals, _ := k.parseInt64s(k.Strings(delim), true, false)
417 return vals 489 return vals
418} 490}
419 491
@@ -452,14 +524,14 @@ func (k *Key) ValidFloat64s(delim string) []float64 {
452// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will 524// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
453// not be included to result list. 525// not be included to result list.
454func (k *Key) ValidInts(delim string) []int { 526func (k *Key) ValidInts(delim string) []int {
455 vals, _ := k.getInts(delim, false, false) 527 vals, _ := k.parseInts(k.Strings(delim), false, false)
456 return vals 528 return vals
457} 529}
458 530
459// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, 531// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
460// then it will not be included to result list. 532// then it will not be included to result list.
461func (k *Key) ValidInt64s(delim string) []int64 { 533func (k *Key) ValidInt64s(delim string) []int64 {
462 vals, _ := k.getInt64s(delim, false, false) 534 vals, _ := k.parseInt64s(k.Strings(delim), false, false)
463 return vals 535 return vals
464} 536}
465 537
@@ -495,12 +567,12 @@ func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
495 567
496// StrictInts returns list of int divided by given delimiter or error on first invalid input. 568// StrictInts returns list of int divided by given delimiter or error on first invalid input.
497func (k *Key) StrictInts(delim string) ([]int, error) { 569func (k *Key) StrictInts(delim string) ([]int, error) {
498 return k.getInts(delim, false, true) 570 return k.parseInts(k.Strings(delim), false, true)
499} 571}
500 572
501// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. 573// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
502func (k *Key) StrictInt64s(delim string) ([]int64, error) { 574func (k *Key) StrictInt64s(delim string) ([]int64, error) {
503 return k.getInt64s(delim, false, true) 575 return k.parseInt64s(k.Strings(delim), false, true)
504} 576}
505 577
506// StrictUints returns list of uint divided by given delimiter or error on first invalid input. 578// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
@@ -541,9 +613,8 @@ func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]flo
541 return vals, nil 613 return vals, nil
542} 614}
543 615
544// getInts returns list of int divided by given delimiter. 616// parseInts transforms strings to ints.
545func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) { 617func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
546 strs := k.Strings(delim)
547 vals := make([]int, 0, len(strs)) 618 vals := make([]int, 0, len(strs))
548 for _, str := range strs { 619 for _, str := range strs {
549 val, err := strconv.Atoi(str) 620 val, err := strconv.Atoi(str)
@@ -557,9 +628,8 @@ func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, er
557 return vals, nil 628 return vals, nil
558} 629}
559 630
560// getInt64s returns list of int64 divided by given delimiter. 631// parseInt64s transforms strings to int64s.
561func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) { 632func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
562 strs := k.Strings(delim)
563 vals := make([]int64, 0, len(strs)) 633 vals := make([]int64, 0, len(strs))
564 for _, str := range strs { 634 for _, str := range strs {
565 val, err := strconv.ParseInt(str, 10, 64) 635 val, err := strconv.ParseInt(str, 10, 64)
diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go
index b0aabe3..673ef80 100644
--- a/vendor/github.com/go-ini/ini/parser.go
+++ b/vendor/github.com/go-ini/ini/parser.go
@@ -318,11 +318,14 @@ func (f *File) parse(reader io.Reader) (err error) {
318 if err != nil { 318 if err != nil {
319 // Treat as boolean key when desired, and whole line is key name. 319 // Treat as boolean key when desired, and whole line is key name.
320 if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { 320 if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
321 key, err := section.NewKey(string(line), "true") 321 kname, err := p.readValue(line, f.options.IgnoreContinuation)
322 if err != nil {
323 return err
324 }
325 key, err := section.NewBooleanKey(kname)
322 if err != nil { 326 if err != nil {
323 return err 327 return err
324 } 328 }
325 key.isBooleanType = true
326 key.Comment = strings.TrimSpace(p.comment.String()) 329 key.Comment = strings.TrimSpace(p.comment.String())
327 p.comment.Reset() 330 p.comment.Reset()
328 continue 331 continue
@@ -338,17 +341,16 @@ func (f *File) parse(reader io.Reader) (err error) {
338 p.count++ 341 p.count++
339 } 342 }
340 343
341 key, err := section.NewKey(kname, "") 344 value, err := p.readValue(line[offset:], f.options.IgnoreContinuation)
342 if err != nil { 345 if err != nil {
343 return err 346 return err
344 } 347 }
345 key.isAutoIncrement = isAutoIncr
346 348
347 value, err := p.readValue(line[offset:], f.options.IgnoreContinuation) 349 key, err := section.NewKey(kname, value)
348 if err != nil { 350 if err != nil {
349 return err 351 return err
350 } 352 }
351 key.SetValue(value) 353 key.isAutoIncrement = isAutoIncr
352 key.Comment = strings.TrimSpace(p.comment.String()) 354 key.Comment = strings.TrimSpace(p.comment.String())
353 p.comment.Reset() 355 p.comment.Reset()
354 } 356 }
diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go
index 45d2f3b..c9fa27e 100644
--- a/vendor/github.com/go-ini/ini/section.go
+++ b/vendor/github.com/go-ini/ini/section.go
@@ -68,20 +68,33 @@ func (s *Section) NewKey(name, val string) (*Key, error) {
68 } 68 }
69 69
70 if inSlice(name, s.keyList) { 70 if inSlice(name, s.keyList) {
71 s.keys[name].value = val 71 if s.f.options.AllowShadows {
72 if err := s.keys[name].addShadow(val); err != nil {
73 return nil, err
74 }
75 } else {
76 s.keys[name].value = val
77 }
72 return s.keys[name], nil 78 return s.keys[name], nil
73 } 79 }
74 80
75 s.keyList = append(s.keyList, name) 81 s.keyList = append(s.keyList, name)
76 s.keys[name] = &Key{ 82 s.keys[name] = newKey(s, name, val)
77 s: s,
78 name: name,
79 value: val,
80 }
81 s.keysHash[name] = val 83 s.keysHash[name] = val
82 return s.keys[name], nil 84 return s.keys[name], nil
83} 85}
84 86
87// NewBooleanKey creates a new boolean type key to given section.
88func (s *Section) NewBooleanKey(name string) (*Key, error) {
89 key, err := s.NewKey(name, "true")
90 if err != nil {
91 return nil, err
92 }
93
94 key.isBooleanType = true
95 return key, nil
96}
97
85// GetKey returns key in section by given name. 98// GetKey returns key in section by given name.
86func (s *Section) GetKey(name string) (*Key, error) { 99func (s *Section) GetKey(name string) (*Key, error) {
87 // FIXME: change to section level lock? 100 // FIXME: change to section level lock?
diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go
index 5ef38d8..509c682 100644
--- a/vendor/github.com/go-ini/ini/struct.go
+++ b/vendor/github.com/go-ini/ini/struct.go
@@ -78,8 +78,14 @@ func parseDelim(actual string) string {
78var reflectTime = reflect.TypeOf(time.Now()).Kind() 78var reflectTime = reflect.TypeOf(time.Now()).Kind()
79 79
80// setSliceWithProperType sets proper values to slice based on its type. 80// setSliceWithProperType sets proper values to slice based on its type.
81func setSliceWithProperType(key *Key, field reflect.Value, delim string) error { 81func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
82 strs := key.Strings(delim) 82 var strs []string
83 if allowShadow {
84 strs = key.StringsWithShadows(delim)
85 } else {
86 strs = key.Strings(delim)
87 }
88
83 numVals := len(strs) 89 numVals := len(strs)
84 if numVals == 0 { 90 if numVals == 0 {
85 return nil 91 return nil
@@ -92,9 +98,9 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
92 case reflect.String: 98 case reflect.String:
93 vals = strs 99 vals = strs
94 case reflect.Int: 100 case reflect.Int:
95 vals = key.Ints(delim) 101 vals, _ = key.parseInts(strs, true, false)
96 case reflect.Int64: 102 case reflect.Int64:
97 vals = key.Int64s(delim) 103 vals, _ = key.parseInt64s(strs, true, false)
98 case reflect.Uint: 104 case reflect.Uint:
99 vals = key.Uints(delim) 105 vals = key.Uints(delim)
100 case reflect.Uint64: 106 case reflect.Uint64:
@@ -133,7 +139,7 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
133// setWithProperType sets proper value to field based on its type, 139// setWithProperType sets proper value to field based on its type,
134// but it does not return error for failing parsing, 140// but it does not return error for failing parsing,
135// because we want to use default value that is already assigned to strcut. 141// because we want to use default value that is already assigned to strcut.
136func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { 142func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
137 switch t.Kind() { 143 switch t.Kind() {
138 case reflect.String: 144 case reflect.String:
139 if len(key.String()) == 0 { 145 if len(key.String()) == 0 {
@@ -187,13 +193,25 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
187 } 193 }
188 field.Set(reflect.ValueOf(timeVal)) 194 field.Set(reflect.ValueOf(timeVal))
189 case reflect.Slice: 195 case reflect.Slice:
190 return setSliceWithProperType(key, field, delim) 196 return setSliceWithProperType(key, field, delim, allowShadow)
191 default: 197 default:
192 return fmt.Errorf("unsupported type '%s'", t) 198 return fmt.Errorf("unsupported type '%s'", t)
193 } 199 }
194 return nil 200 return nil
195} 201}
196 202
203func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
204 opts := strings.SplitN(tag, ",", 3)
205 rawName = opts[0]
206 if len(opts) > 1 {
207 omitEmpty = opts[1] == "omitempty"
208 }
209 if len(opts) > 2 {
210 allowShadow = opts[2] == "allowshadow"
211 }
212 return rawName, omitEmpty, allowShadow
213}
214
197func (s *Section) mapTo(val reflect.Value) error { 215func (s *Section) mapTo(val reflect.Value) error {
198 if val.Kind() == reflect.Ptr { 216 if val.Kind() == reflect.Ptr {
199 val = val.Elem() 217 val = val.Elem()
@@ -209,8 +227,8 @@ func (s *Section) mapTo(val reflect.Value) error {
209 continue 227 continue
210 } 228 }
211 229
212 opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty 230 rawName, _, allowShadow := parseTagOptions(tag)
213 fieldName := s.parseFieldName(tpField.Name, opts[0]) 231 fieldName := s.parseFieldName(tpField.Name, rawName)
214 if len(fieldName) == 0 || !field.CanSet() { 232 if len(fieldName) == 0 || !field.CanSet() {
215 continue 233 continue
216 } 234 }
@@ -231,7 +249,8 @@ func (s *Section) mapTo(val reflect.Value) error {
231 } 249 }
232 250
233 if key, err := s.GetKey(fieldName); err == nil { 251 if key, err := s.GetKey(fieldName); err == nil {
234 if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { 252 delim := parseDelim(tpField.Tag.Get("delim"))
253 if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
235 return fmt.Errorf("error mapping field(%s): %v", fieldName, err) 254 return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
236 } 255 }
237 } 256 }
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
1# This source code refers to The Go Authors for copyright purposes.
2# The master list of authors is in the main Go distribution,
3# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
1# This source code was written by the Go contributors.
2# The master list of contributors is in the main Go distribution,
3# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..0f64693
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
1Copyright 2010 The Go Authors. All rights reserved.
2
3Redistribution and use in source and binary forms, with or without
4modification, are permitted provided that the following conditions are
5met:
6
7 * Redistributions of source code must retain the above copyright
8notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above
10copyright notice, this list of conditions and the following disclaimer
11in the documentation and/or other materials provided with the
12distribution.
13 * Neither the name of Google Inc. nor the names of its
14contributors may be used to endorse or promote products derived from
15this software without specific prior written permission.
16
17THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..3cd3249
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2011 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32// Protocol buffer deep copy and merge.
33// TODO: RawMessage.
34
35package proto
36
37import (
38 "fmt"
39 "log"
40 "reflect"
41 "strings"
42)
43
44// Clone returns a deep copy of a protocol buffer.
45func Clone(src Message) Message {
46 in := reflect.ValueOf(src)
47 if in.IsNil() {
48 return src
49 }
50 out := reflect.New(in.Type().Elem())
51 dst := out.Interface().(Message)
52 Merge(dst, src)
53 return dst
54}
55
56// Merger is the interface representing objects that can merge messages of the same type.
57type Merger interface {
58 // Merge merges src into this message.
59 // Required and optional fields that are set in src will be set to that value in dst.
60 // Elements of repeated fields will be appended.
61 //
62 // Merge may panic if called with a different argument type than the receiver.
63 Merge(src Message)
64}
65
66// generatedMerger is the custom merge method that generated protos will have.
67// We must add this method since a generate Merge method will conflict with
68// many existing protos that have a Merge data field already defined.
69type generatedMerger interface {
70 XXX_Merge(src Message)
71}
72
73// Merge merges src into dst.
74// Required and optional fields that are set in src will be set to that value in dst.
75// Elements of repeated fields will be appended.
76// Merge panics if src and dst are not the same type, or if dst is nil.
77func Merge(dst, src Message) {
78 if m, ok := dst.(Merger); ok {
79 m.Merge(src)
80 return
81 }
82
83 in := reflect.ValueOf(src)
84 out := reflect.ValueOf(dst)
85 if out.IsNil() {
86 panic("proto: nil destination")
87 }
88 if in.Type() != out.Type() {
89 panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
90 }
91 if in.IsNil() {
92 return // Merge from nil src is a noop
93 }
94 if m, ok := dst.(generatedMerger); ok {
95 m.XXX_Merge(src)
96 return
97 }
98 mergeStruct(out.Elem(), in.Elem())
99}
100
101func mergeStruct(out, in reflect.Value) {
102 sprop := GetProperties(in.Type())
103 for i := 0; i < in.NumField(); i++ {
104 f := in.Type().Field(i)
105 if strings.HasPrefix(f.Name, "XXX_") {
106 continue
107 }
108 mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
109 }
110
111 if emIn, err := extendable(in.Addr().Interface()); err == nil {
112 emOut, _ := extendable(out.Addr().Interface())
113 mIn, muIn := emIn.extensionsRead()
114 if mIn != nil {
115 mOut := emOut.extensionsWrite()
116 muIn.Lock()
117 mergeExtension(mOut, mIn)
118 muIn.Unlock()
119 }
120 }
121
122 uf := in.FieldByName("XXX_unrecognized")
123 if !uf.IsValid() {
124 return
125 }
126 uin := uf.Bytes()
127 if len(uin) > 0 {
128 out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
129 }
130}
131
132// mergeAny performs a merge between two values of the same type.
133// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
134// prop is set if this is a struct field (it may be nil).
135func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
136 if in.Type() == protoMessageType {
137 if !in.IsNil() {
138 if out.IsNil() {
139 out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
140 } else {
141 Merge(out.Interface().(Message), in.Interface().(Message))
142 }
143 }
144 return
145 }
146 switch in.Kind() {
147 case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
148 reflect.String, reflect.Uint32, reflect.Uint64:
149 if !viaPtr && isProto3Zero(in) {
150 return
151 }
152 out.Set(in)
153 case reflect.Interface:
154 // Probably a oneof field; copy non-nil values.
155 if in.IsNil() {
156 return
157 }
158 // Allocate destination if it is not set, or set to a different type.
159 // Otherwise we will merge as normal.
160 if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
161 out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
162 }
163 mergeAny(out.Elem(), in.Elem(), false, nil)
164 case reflect.Map:
165 if in.Len() == 0 {
166 return
167 }
168 if out.IsNil() {
169 out.Set(reflect.MakeMap(in.Type()))
170 }
171 // For maps with value types of *T or []byte we need to deep copy each value.
172 elemKind := in.Type().Elem().Kind()
173 for _, key := range in.MapKeys() {
174 var val reflect.Value
175 switch elemKind {
176 case reflect.Ptr:
177 val = reflect.New(in.Type().Elem().Elem())
178 mergeAny(val, in.MapIndex(key), false, nil)
179 case reflect.Slice:
180 val = in.MapIndex(key)
181 val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
182 default:
183 val = in.MapIndex(key)
184 }
185 out.SetMapIndex(key, val)
186 }
187 case reflect.Ptr:
188 if in.IsNil() {
189 return
190 }
191 if out.IsNil() {
192 out.Set(reflect.New(in.Elem().Type()))
193 }
194 mergeAny(out.Elem(), in.Elem(), true, nil)
195 case reflect.Slice:
196 if in.IsNil() {
197 return
198 }
199 if in.Type().Elem().Kind() == reflect.Uint8 {
200 // []byte is a scalar bytes field, not a repeated field.
201
202 // Edge case: if this is in a proto3 message, a zero length
203 // bytes field is considered the zero value, and should not
204 // be merged.
205 if prop != nil && prop.proto3 && in.Len() == 0 {
206 return
207 }
208
209 // Make a deep copy.
210 // Append to []byte{} instead of []byte(nil) so that we never end up
211 // with a nil result.
212 out.SetBytes(append([]byte{}, in.Bytes()...))
213 return
214 }
215 n := in.Len()
216 if out.IsNil() {
217 out.Set(reflect.MakeSlice(in.Type(), 0, n))
218 }
219 switch in.Type().Elem().Kind() {
220 case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
221 reflect.String, reflect.Uint32, reflect.Uint64:
222 out.Set(reflect.AppendSlice(out, in))
223 default:
224 for i := 0; i < n; i++ {
225 x := reflect.Indirect(reflect.New(in.Type().Elem()))
226 mergeAny(x, in.Index(i), false, nil)
227 out.Set(reflect.Append(out, x))
228 }
229 }
230 case reflect.Struct:
231 mergeStruct(out, in)
232 default:
233 // unknown type, so not a protocol buffer
234 log.Printf("proto: don't know how to copy %v", in)
235 }
236}
237
238func mergeExtension(out, in map[int32]Extension) {
239 for extNum, eIn := range in {
240 eOut := Extension{desc: eIn.desc}
241 if eIn.value != nil {
242 v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
243 mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
244 eOut.value = v.Interface()
245 }
246 if eIn.enc != nil {
247 eOut.enc = make([]byte, len(eIn.enc))
248 copy(eOut.enc, eIn.enc)
249 }
250
251 out[extNum] = eOut
252 }
253}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..d9aa3c4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,428 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2010 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package proto
33
34/*
35 * Routines for decoding protocol buffer data to construct in-memory representations.
36 */
37
38import (
39 "errors"
40 "fmt"
41 "io"
42)
43
44// errOverflow is returned when an integer is too large to be represented.
45var errOverflow = errors.New("proto: integer overflow")
46
47// ErrInternalBadWireType is returned by generated code when an incorrect
48// wire type is encountered. It does not get returned to user code.
49var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
50
51// DecodeVarint reads a varint-encoded integer from the slice.
52// It returns the integer and the number of bytes consumed, or
53// zero if there is not enough.
54// This is the format for the
55// int32, int64, uint32, uint64, bool, and enum
56// protocol buffer types.
57func DecodeVarint(buf []byte) (x uint64, n int) {
58 for shift := uint(0); shift < 64; shift += 7 {
59 if n >= len(buf) {
60 return 0, 0
61 }
62 b := uint64(buf[n])
63 n++
64 x |= (b & 0x7F) << shift
65 if (b & 0x80) == 0 {
66 return x, n
67 }
68 }
69
70 // The number is too large to represent in a 64-bit value.
71 return 0, 0
72}
73
74func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
75 i := p.index
76 l := len(p.buf)
77
78 for shift := uint(0); shift < 64; shift += 7 {
79 if i >= l {
80 err = io.ErrUnexpectedEOF
81 return
82 }
83 b := p.buf[i]
84 i++
85 x |= (uint64(b) & 0x7F) << shift
86 if b < 0x80 {
87 p.index = i
88 return
89 }
90 }
91
92 // The number is too large to represent in a 64-bit value.
93 err = errOverflow
94 return
95}
96
97// DecodeVarint reads a varint-encoded integer from the Buffer.
98// This is the format for the
99// int32, int64, uint32, uint64, bool, and enum
100// protocol buffer types.
101func (p *Buffer) DecodeVarint() (x uint64, err error) {
102 i := p.index
103 buf := p.buf
104
105 if i >= len(buf) {
106 return 0, io.ErrUnexpectedEOF
107 } else if buf[i] < 0x80 {
108 p.index++
109 return uint64(buf[i]), nil
110 } else if len(buf)-i < 10 {
111 return p.decodeVarintSlow()
112 }
113
114 var b uint64
115 // we already checked the first byte
116 x = uint64(buf[i]) - 0x80
117 i++
118
119 b = uint64(buf[i])
120 i++
121 x += b << 7
122 if b&0x80 == 0 {
123 goto done
124 }
125 x -= 0x80 << 7
126
127 b = uint64(buf[i])
128 i++
129 x += b << 14
130 if b&0x80 == 0 {
131 goto done
132 }
133 x -= 0x80 << 14
134
135 b = uint64(buf[i])
136 i++
137 x += b << 21
138 if b&0x80 == 0 {
139 goto done
140 }
141 x -= 0x80 << 21
142
143 b = uint64(buf[i])
144 i++
145 x += b << 28
146 if b&0x80 == 0 {
147 goto done
148 }
149 x -= 0x80 << 28
150
151 b = uint64(buf[i])
152 i++
153 x += b << 35
154 if b&0x80 == 0 {
155 goto done
156 }
157 x -= 0x80 << 35
158
159 b = uint64(buf[i])
160 i++
161 x += b << 42
162 if b&0x80 == 0 {
163 goto done
164 }
165 x -= 0x80 << 42
166
167 b = uint64(buf[i])
168 i++
169 x += b << 49
170 if b&0x80 == 0 {
171 goto done
172 }
173 x -= 0x80 << 49
174
175 b = uint64(buf[i])
176 i++
177 x += b << 56
178 if b&0x80 == 0 {
179 goto done
180 }
181 x -= 0x80 << 56
182
183 b = uint64(buf[i])
184 i++
185 x += b << 63
186 if b&0x80 == 0 {
187 goto done
188 }
189 // x -= 0x80 << 63 // Always zero.
190
191 return 0, errOverflow
192
193done:
194 p.index = i
195 return x, nil
196}
197
198// DecodeFixed64 reads a 64-bit integer from the Buffer.
199// This is the format for the
200// fixed64, sfixed64, and double protocol buffer types.
201func (p *Buffer) DecodeFixed64() (x uint64, err error) {
202 // x, err already 0
203 i := p.index + 8
204 if i < 0 || i > len(p.buf) {
205 err = io.ErrUnexpectedEOF
206 return
207 }
208 p.index = i
209
210 x = uint64(p.buf[i-8])
211 x |= uint64(p.buf[i-7]) << 8
212 x |= uint64(p.buf[i-6]) << 16
213 x |= uint64(p.buf[i-5]) << 24
214 x |= uint64(p.buf[i-4]) << 32
215 x |= uint64(p.buf[i-3]) << 40
216 x |= uint64(p.buf[i-2]) << 48
217 x |= uint64(p.buf[i-1]) << 56
218 return
219}
220
221// DecodeFixed32 reads a 32-bit integer from the Buffer.
222// This is the format for the
223// fixed32, sfixed32, and float protocol buffer types.
224func (p *Buffer) DecodeFixed32() (x uint64, err error) {
225 // x, err already 0
226 i := p.index + 4
227 if i < 0 || i > len(p.buf) {
228 err = io.ErrUnexpectedEOF
229 return
230 }
231 p.index = i
232
233 x = uint64(p.buf[i-4])
234 x |= uint64(p.buf[i-3]) << 8
235 x |= uint64(p.buf[i-2]) << 16
236 x |= uint64(p.buf[i-1]) << 24
237 return
238}
239
240// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
241// from the Buffer.
242// This is the format used for the sint64 protocol buffer type.
243func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
244 x, err = p.DecodeVarint()
245 if err != nil {
246 return
247 }
248 x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
249 return
250}
251
252// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
253// from the Buffer.
254// This is the format used for the sint32 protocol buffer type.
255func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
256 x, err = p.DecodeVarint()
257 if err != nil {
258 return
259 }
260 x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
261 return
262}
263
264// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
265// This is the format used for the bytes protocol buffer
266// type and for embedded messages.
267func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
268 n, err := p.DecodeVarint()
269 if err != nil {
270 return nil, err
271 }
272
273 nb := int(n)
274 if nb < 0 {
275 return nil, fmt.Errorf("proto: bad byte length %d", nb)
276 }
277 end := p.index + nb
278 if end < p.index || end > len(p.buf) {
279 return nil, io.ErrUnexpectedEOF
280 }
281
282 if !alloc {
283 // todo: check if can get more uses of alloc=false
284 buf = p.buf[p.index:end]
285 p.index += nb
286 return
287 }
288
289 buf = make([]byte, nb)
290 copy(buf, p.buf[p.index:])
291 p.index += nb
292 return
293}
294
295// DecodeStringBytes reads an encoded string from the Buffer.
296// This is the format used for the proto2 string type.
297func (p *Buffer) DecodeStringBytes() (s string, err error) {
298 buf, err := p.DecodeRawBytes(false)
299 if err != nil {
300 return
301 }
302 return string(buf), nil
303}
304
305// Unmarshaler is the interface representing objects that can
306// unmarshal themselves. The argument points to data that may be
307// overwritten, so implementations should not keep references to the
308// buffer.
309// Unmarshal implementations should not clear the receiver.
310// Any unmarshaled data should be merged into the receiver.
311// Callers of Unmarshal that do not want to retain existing data
312// should Reset the receiver before calling Unmarshal.
313type Unmarshaler interface {
314 Unmarshal([]byte) error
315}
316
317// newUnmarshaler is the interface representing objects that can
318// unmarshal themselves. The semantics are identical to Unmarshaler.
319//
320// This exists to support protoc-gen-go generated messages.
321// The proto package will stop type-asserting to this interface in the future.
322//
323// DO NOT DEPEND ON THIS.
324type newUnmarshaler interface {
325 XXX_Unmarshal([]byte) error
326}
327
328// Unmarshal parses the protocol buffer representation in buf and places the
329// decoded result in pb. If the struct underlying pb does not match
330// the data in buf, the results can be unpredictable.
331//
332// Unmarshal resets pb before starting to unmarshal, so any
333// existing data in pb is always removed. Use UnmarshalMerge
334// to preserve and append to existing data.
335func Unmarshal(buf []byte, pb Message) error {
336 pb.Reset()
337 if u, ok := pb.(newUnmarshaler); ok {
338 return u.XXX_Unmarshal(buf)
339 }
340 if u, ok := pb.(Unmarshaler); ok {
341 return u.Unmarshal(buf)
342 }
343 return NewBuffer(buf).Unmarshal(pb)
344}
345
346// UnmarshalMerge parses the protocol buffer representation in buf and
347// writes the decoded result to pb. If the struct underlying pb does not match
348// the data in buf, the results can be unpredictable.
349//
350// UnmarshalMerge merges into existing data in pb.
351// Most code should use Unmarshal instead.
352func UnmarshalMerge(buf []byte, pb Message) error {
353 if u, ok := pb.(newUnmarshaler); ok {
354 return u.XXX_Unmarshal(buf)
355 }
356 if u, ok := pb.(Unmarshaler); ok {
357 // NOTE: The history of proto have unfortunately been inconsistent
358 // whether Unmarshaler should or should not implicitly clear itself.
359 // Some implementations do, most do not.
360 // Thus, calling this here may or may not do what people want.
361 //
362 // See https://github.com/golang/protobuf/issues/424
363 return u.Unmarshal(buf)
364 }
365 return NewBuffer(buf).Unmarshal(pb)
366}
367
368// DecodeMessage reads a count-delimited message from the Buffer.
369func (p *Buffer) DecodeMessage(pb Message) error {
370 enc, err := p.DecodeRawBytes(false)
371 if err != nil {
372 return err
373 }
374 return NewBuffer(enc).Unmarshal(pb)
375}
376
377// DecodeGroup reads a tag-delimited group from the Buffer.
378// StartGroup tag is already consumed. This function consumes
379// EndGroup tag.
380func (p *Buffer) DecodeGroup(pb Message) error {
381 b := p.buf[p.index:]
382 x, y := findEndGroup(b)
383 if x < 0 {
384 return io.ErrUnexpectedEOF
385 }
386 err := Unmarshal(b[:x], pb)
387 p.index += y
388 return err
389}
390
391// Unmarshal parses the protocol buffer representation in the
392// Buffer and places the decoded result in pb. If the struct
393// underlying pb does not match the data in the buffer, the results can be
394// unpredictable.
395//
396// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
397func (p *Buffer) Unmarshal(pb Message) error {
398 // If the object can unmarshal itself, let it.
399 if u, ok := pb.(newUnmarshaler); ok {
400 err := u.XXX_Unmarshal(p.buf[p.index:])
401 p.index = len(p.buf)
402 return err
403 }
404 if u, ok := pb.(Unmarshaler); ok {
405 // NOTE: The history of proto have unfortunately been inconsistent
406 // whether Unmarshaler should or should not implicitly clear itself.
407 // Some implementations do, most do not.
408 // Thus, calling this here may or may not do what people want.
409 //
410 // See https://github.com/golang/protobuf/issues/424
411 err := u.Unmarshal(p.buf[p.index:])
412 p.index = len(p.buf)
413 return err
414 }
415
416 // Slow workaround for messages that aren't Unmarshalers.
417 // This includes some hand-coded .pb.go files and
418 // bootstrap protos.
419 // TODO: fix all of those and then add Unmarshal to
420 // the Message interface. Then:
421 // The cast above and code below can be deleted.
422 // The old unmarshaler can be deleted.
423 // Clients can call Unmarshal directly (can already do that, actually).
424 var info InternalMessageInfo
425 err := info.Unmarshal(pb, p.buf[p.index:])
426 p.index = len(p.buf)
427 return err
428}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 0000000..dea2617
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2017 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package proto
33
34import (
35 "fmt"
36 "reflect"
37 "strings"
38 "sync"
39 "sync/atomic"
40)
41
42type generatedDiscarder interface {
43 XXX_DiscardUnknown()
44}
45
46// DiscardUnknown recursively discards all unknown fields from this message
47// and all embedded messages.
48//
49// When unmarshaling a message with unrecognized fields, the tags and values
50// of such fields are preserved in the Message. This allows a later call to
51// marshal to be able to produce a message that continues to have those
52// unrecognized fields. To avoid this, DiscardUnknown is used to
53// explicitly clear the unknown fields after unmarshaling.
54//
55// For proto2 messages, the unknown fields of message extensions are only
56// discarded from messages that have been accessed via GetExtension.
57func DiscardUnknown(m Message) {
58 if m, ok := m.(generatedDiscarder); ok {
59 m.XXX_DiscardUnknown()
60 return
61 }
62 // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
63 // but the master branch has no implementation for InternalMessageInfo,
64 // so it would be more work to replicate that approach.
65 discardLegacy(m)
66}
67
68// DiscardUnknown recursively discards all unknown fields.
69func (a *InternalMessageInfo) DiscardUnknown(m Message) {
70 di := atomicLoadDiscardInfo(&a.discard)
71 if di == nil {
72 di = getDiscardInfo(reflect.TypeOf(m).Elem())
73 atomicStoreDiscardInfo(&a.discard, di)
74 }
75 di.discard(toPointer(&m))
76}
77
78type discardInfo struct {
79 typ reflect.Type
80
81 initialized int32 // 0: only typ is valid, 1: everything is valid
82 lock sync.Mutex
83
84 fields []discardFieldInfo
85 unrecognized field
86}
87
88type discardFieldInfo struct {
89 field field // Offset of field, guaranteed to be valid
90 discard func(src pointer)
91}
92
93var (
94 discardInfoMap = map[reflect.Type]*discardInfo{}
95 discardInfoLock sync.Mutex
96)
97
98func getDiscardInfo(t reflect.Type) *discardInfo {
99 discardInfoLock.Lock()
100 defer discardInfoLock.Unlock()
101 di := discardInfoMap[t]
102 if di == nil {
103 di = &discardInfo{typ: t}
104 discardInfoMap[t] = di
105 }
106 return di
107}
108
109func (di *discardInfo) discard(src pointer) {
110 if src.isNil() {
111 return // Nothing to do.
112 }
113
114 if atomic.LoadInt32(&di.initialized) == 0 {
115 di.computeDiscardInfo()
116 }
117
118 for _, fi := range di.fields {
119 sfp := src.offset(fi.field)
120 fi.discard(sfp)
121 }
122
123 // For proto2 messages, only discard unknown fields in message extensions
124 // that have been accessed via GetExtension.
125 if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
126 // Ignore lock since DiscardUnknown is not concurrency safe.
127 emm, _ := em.extensionsRead()
128 for _, mx := range emm {
129 if m, ok := mx.value.(Message); ok {
130 DiscardUnknown(m)
131 }
132 }
133 }
134
135 if di.unrecognized.IsValid() {
136 *src.offset(di.unrecognized).toBytes() = nil
137 }
138}
139
140func (di *discardInfo) computeDiscardInfo() {
141 di.lock.Lock()
142 defer di.lock.Unlock()
143 if di.initialized != 0 {
144 return
145 }
146 t := di.typ
147 n := t.NumField()
148
149 for i := 0; i < n; i++ {
150 f := t.Field(i)
151 if strings.HasPrefix(f.Name, "XXX_") {
152 continue
153 }
154
155 dfi := discardFieldInfo{field: toField(&f)}
156 tf := f.Type
157
158 // Unwrap tf to get its most basic type.
159 var isPointer, isSlice bool
160 if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
161 isSlice = true
162 tf = tf.Elem()
163 }
164 if tf.Kind() == reflect.Ptr {
165 isPointer = true
166 tf = tf.Elem()
167 }
168 if isPointer && isSlice && tf.Kind() != reflect.Struct {
169 panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
170 }
171
172 switch tf.Kind() {
173 case reflect.Struct:
174 switch {
175 case !isPointer:
176 panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
177 case isSlice: // E.g., []*pb.T
178 di := getDiscardInfo(tf)
179 dfi.discard = func(src pointer) {
180 sps := src.getPointerSlice()
181 for _, sp := range sps {
182 if !sp.isNil() {
183 di.discard(sp)
184 }
185 }
186 }
187 default: // E.g., *pb.T
188 di := getDiscardInfo(tf)
189 dfi.discard = func(src pointer) {
190 sp := src.getPointer()
191 if !sp.isNil() {
192 di.discard(sp)
193 }
194 }
195 }
196 case reflect.Map:
197 switch {
198 case isPointer || isSlice:
199 panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
200 default: // E.g., map[K]V
201 if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
202 dfi.discard = func(src pointer) {
203 sm := src.asPointerTo(tf).Elem()
204 if sm.Len() == 0 {
205 return
206 }
207 for _, key := range sm.MapKeys() {
208 val := sm.MapIndex(key)
209 DiscardUnknown(val.Interface().(Message))
210 }
211 }
212 } else {
213 dfi.discard = func(pointer) {} // Noop
214 }
215 }
216 case reflect.Interface:
217 // Must be oneof field.
218 switch {
219 case isPointer || isSlice:
220 panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
221 default: // E.g., interface{}
222 // TODO: Make this faster?
223 dfi.discard = func(src pointer) {
224 su := src.asPointerTo(tf).Elem()
225 if !su.IsNil() {
226 sv := su.Elem().Elem().Field(0)
227 if sv.Kind() == reflect.Ptr && sv.IsNil() {
228 return
229 }
230 switch sv.Type().Kind() {
231 case reflect.Ptr: // Proto struct (e.g., *T)
232 DiscardUnknown(sv.Interface().(Message))
233 }
234 }
235 }
236 }
237 default:
238 continue
239 }
240 di.fields = append(di.fields, dfi)
241 }
242
243 di.unrecognized = invalidField
244 if f, ok := t.FieldByName("XXX_unrecognized"); ok {
245 if f.Type != reflect.TypeOf([]byte{}) {
246 panic("expected XXX_unrecognized to be of type []byte")
247 }
248 di.unrecognized = toField(&f)
249 }
250
251 atomic.StoreInt32(&di.initialized, 1)
252}
253
254func discardLegacy(m Message) {
255 v := reflect.ValueOf(m)
256 if v.Kind() != reflect.Ptr || v.IsNil() {
257 return
258 }
259 v = v.Elem()
260 if v.Kind() != reflect.Struct {
261 return
262 }
263 t := v.Type()
264
265 for i := 0; i < v.NumField(); i++ {
266 f := t.Field(i)
267 if strings.HasPrefix(f.Name, "XXX_") {
268 continue
269 }
270 vf := v.Field(i)
271 tf := f.Type
272
273 // Unwrap tf to get its most basic type.
274 var isPointer, isSlice bool
275 if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
276 isSlice = true
277 tf = tf.Elem()
278 }
279 if tf.Kind() == reflect.Ptr {
280 isPointer = true
281 tf = tf.Elem()
282 }
283 if isPointer && isSlice && tf.Kind() != reflect.Struct {
284 panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
285 }
286
287 switch tf.Kind() {
288 case reflect.Struct:
289 switch {
290 case !isPointer:
291 panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
292 case isSlice: // E.g., []*pb.T
293 for j := 0; j < vf.Len(); j++ {
294 discardLegacy(vf.Index(j).Interface().(Message))
295 }
296 default: // E.g., *pb.T
297 discardLegacy(vf.Interface().(Message))
298 }
299 case reflect.Map:
300 switch {
301 case isPointer || isSlice:
302 panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
303 default: // E.g., map[K]V
304 tv := vf.Type().Elem()
305 if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
306 for _, key := range vf.MapKeys() {
307 val := vf.MapIndex(key)
308 discardLegacy(val.Interface().(Message))
309 }
310 }
311 }
312 case reflect.Interface:
313 // Must be oneof field.
314 switch {
315 case isPointer || isSlice:
316 panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
317 default: // E.g., test_proto.isCommunique_Union interface
318 if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
319 vf = vf.Elem() // E.g., *test_proto.Communique_Msg
320 if !vf.IsNil() {
321 vf = vf.Elem() // E.g., test_proto.Communique_Msg
322 vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
323 if vf.Kind() == reflect.Ptr {
324 discardLegacy(vf.Interface().(Message))
325 }
326 }
327 }
328 }
329 }
330 }
331
332 if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
333 if vf.Type() != reflect.TypeOf([]byte{}) {
334 panic("expected XXX_unrecognized to be of type []byte")
335 }
336 vf.Set(reflect.ValueOf([]byte(nil)))
337 }
338
339 // For proto2 messages, only discard unknown fields in message extensions
340 // that have been accessed via GetExtension.
341 if em, err := extendable(m); err == nil {
342 // Ignore lock since discardLegacy is not concurrency safe.
343 emm, _ := em.extensionsRead()
344 for _, mx := range emm {
345 if m, ok := mx.value.(Message); ok {
346 discardLegacy(m)
347 }
348 }
349 }
350}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..3abfed2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2010 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package proto
33
34/*
35 * Routines for encoding data into the wire format for protocol buffers.
36 */
37
38import (
39 "errors"
40 "reflect"
41)
42
43var (
44 // errRepeatedHasNil is the error returned if Marshal is called with
45 // a struct with a repeated field containing a nil element.
46 errRepeatedHasNil = errors.New("proto: repeated field has nil element")
47
48 // errOneofHasNil is the error returned if Marshal is called with
49 // a struct with a oneof field containing a nil element.
50 errOneofHasNil = errors.New("proto: oneof field has nil value")
51
52 // ErrNil is the error returned if Marshal is called with nil.
53 ErrNil = errors.New("proto: Marshal called with nil")
54
55 // ErrTooLarge is the error returned if Marshal is called with a
56 // message that encodes to >2GB.
57 ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
58)
59
60// The fundamental encoders that put bytes on the wire.
61// Those that take integer types all accept uint64 and are
62// therefore of type valueEncoder.
63
64const maxVarintBytes = 10 // maximum length of a varint
65
66// EncodeVarint returns the varint encoding of x.
67// This is the format for the
68// int32, int64, uint32, uint64, bool, and enum
69// protocol buffer types.
70// Not used by the package itself, but helpful to clients
71// wishing to use the same encoding.
72func EncodeVarint(x uint64) []byte {
73 var buf [maxVarintBytes]byte
74 var n int
75 for n = 0; x > 127; n++ {
76 buf[n] = 0x80 | uint8(x&0x7F)
77 x >>= 7
78 }
79 buf[n] = uint8(x)
80 n++
81 return buf[0:n]
82}
83
84// EncodeVarint writes a varint-encoded integer to the Buffer.
85// This is the format for the
86// int32, int64, uint32, uint64, bool, and enum
87// protocol buffer types.
88func (p *Buffer) EncodeVarint(x uint64) error {
89 for x >= 1<<7 {
90 p.buf = append(p.buf, uint8(x&0x7f|0x80))
91 x >>= 7
92 }
93 p.buf = append(p.buf, uint8(x))
94 return nil
95}
96
97// SizeVarint returns the varint encoding size of an integer.
98func SizeVarint(x uint64) int {
99 switch {
100 case x < 1<<7:
101 return 1
102 case x < 1<<14:
103 return 2
104 case x < 1<<21:
105 return 3
106 case x < 1<<28:
107 return 4
108 case x < 1<<35:
109 return 5
110 case x < 1<<42:
111 return 6
112 case x < 1<<49:
113 return 7
114 case x < 1<<56:
115 return 8
116 case x < 1<<63:
117 return 9
118 }
119 return 10
120}
121
122// EncodeFixed64 writes a 64-bit integer to the Buffer.
123// This is the format for the
124// fixed64, sfixed64, and double protocol buffer types.
125func (p *Buffer) EncodeFixed64(x uint64) error {
126 p.buf = append(p.buf,
127 uint8(x),
128 uint8(x>>8),
129 uint8(x>>16),
130 uint8(x>>24),
131 uint8(x>>32),
132 uint8(x>>40),
133 uint8(x>>48),
134 uint8(x>>56))
135 return nil
136}
137
138// EncodeFixed32 writes a 32-bit integer to the Buffer.
139// This is the format for the
140// fixed32, sfixed32, and float protocol buffer types.
141func (p *Buffer) EncodeFixed32(x uint64) error {
142 p.buf = append(p.buf,
143 uint8(x),
144 uint8(x>>8),
145 uint8(x>>16),
146 uint8(x>>24))
147 return nil
148}
149
150// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
151// to the Buffer.
152// This is the format used for the sint64 protocol buffer type.
153func (p *Buffer) EncodeZigzag64(x uint64) error {
154 // use signed number to get arithmetic right shift.
155 return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
156}
157
158// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
159// to the Buffer.
160// This is the format used for the sint32 protocol buffer type.
161func (p *Buffer) EncodeZigzag32(x uint64) error {
162 // use signed number to get arithmetic right shift.
163 return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
164}
165
166// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
167// This is the format used for the bytes protocol buffer
168// type and for embedded messages.
169func (p *Buffer) EncodeRawBytes(b []byte) error {
170 p.EncodeVarint(uint64(len(b)))
171 p.buf = append(p.buf, b...)
172 return nil
173}
174
175// EncodeStringBytes writes an encoded string to the Buffer.
176// This is the format used for the proto2 string type.
177func (p *Buffer) EncodeStringBytes(s string) error {
178 p.EncodeVarint(uint64(len(s)))
179 p.buf = append(p.buf, s...)
180 return nil
181}
182
183// Marshaler is the interface representing objects that can marshal themselves.
184type Marshaler interface {
185 Marshal() ([]byte, error)
186}
187
188// EncodeMessage writes the protocol buffer to the Buffer,
189// prefixed by a varint-encoded length.
190func (p *Buffer) EncodeMessage(pb Message) error {
191 siz := Size(pb)
192 p.EncodeVarint(uint64(siz))
193 return p.Marshal(pb)
194}
195
196// All protocol buffer fields are nillable, but be careful.
197func isNil(v reflect.Value) bool {
198 switch v.Kind() {
199 case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
200 return v.IsNil()
201 }
202 return false
203}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..d4db5a1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2011 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32// Protocol buffer comparison.
33
34package proto
35
36import (
37 "bytes"
38 "log"
39 "reflect"
40 "strings"
41)
42
43/*
44Equal returns true iff protocol buffers a and b are equal.
45The arguments must both be pointers to protocol buffer structs.
46
47Equality is defined in this way:
48 - Two messages are equal iff they are the same type,
49 corresponding fields are equal, unknown field sets
50 are equal, and extensions sets are equal.
51 - Two set scalar fields are equal iff their values are equal.
52 If the fields are of a floating-point type, remember that
53 NaN != x for all x, including NaN. If the message is defined
54 in a proto3 .proto file, fields are not "set"; specifically,
55 zero length proto3 "bytes" fields are equal (nil == {}).
56 - Two repeated fields are equal iff their lengths are the same,
57 and their corresponding elements are equal. Note a "bytes" field,
58 although represented by []byte, is not a repeated field and the
59 rule for the scalar fields described above applies.
60 - Two unset fields are equal.
61 - Two unknown field sets are equal if their current
62 encoded state is equal.
63 - Two extension sets are equal iff they have corresponding
64 elements that are pairwise equal.
65 - Two map fields are equal iff their lengths are the same,
66 and they contain the same set of elements. Zero-length map
67 fields are equal.
68 - Every other combination of things are not equal.
69
70The return value is undefined if a and b are not protocol buffers.
71*/
72func Equal(a, b Message) bool {
73 if a == nil || b == nil {
74 return a == b
75 }
76 v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
77 if v1.Type() != v2.Type() {
78 return false
79 }
80 if v1.Kind() == reflect.Ptr {
81 if v1.IsNil() {
82 return v2.IsNil()
83 }
84 if v2.IsNil() {
85 return false
86 }
87 v1, v2 = v1.Elem(), v2.Elem()
88 }
89 if v1.Kind() != reflect.Struct {
90 return false
91 }
92 return equalStruct(v1, v2)
93}
94
95// v1 and v2 are known to have the same type.
96func equalStruct(v1, v2 reflect.Value) bool {
97 sprop := GetProperties(v1.Type())
98 for i := 0; i < v1.NumField(); i++ {
99 f := v1.Type().Field(i)
100 if strings.HasPrefix(f.Name, "XXX_") {
101 continue
102 }
103 f1, f2 := v1.Field(i), v2.Field(i)
104 if f.Type.Kind() == reflect.Ptr {
105 if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
106 // both unset
107 continue
108 } else if n1 != n2 {
109 // set/unset mismatch
110 return false
111 }
112 f1, f2 = f1.Elem(), f2.Elem()
113 }
114 if !equalAny(f1, f2, sprop.Prop[i]) {
115 return false
116 }
117 }
118
119 if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
120 em2 := v2.FieldByName("XXX_InternalExtensions")
121 if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
122 return false
123 }
124 }
125
126 if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
127 em2 := v2.FieldByName("XXX_extensions")
128 if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
129 return false
130 }
131 }
132
133 uf := v1.FieldByName("XXX_unrecognized")
134 if !uf.IsValid() {
135 return true
136 }
137
138 u1 := uf.Bytes()
139 u2 := v2.FieldByName("XXX_unrecognized").Bytes()
140 return bytes.Equal(u1, u2)
141}
142
143// v1 and v2 are known to have the same type.
144// prop may be nil.
145func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
146 if v1.Type() == protoMessageType {
147 m1, _ := v1.Interface().(Message)
148 m2, _ := v2.Interface().(Message)
149 return Equal(m1, m2)
150 }
151 switch v1.Kind() {
152 case reflect.Bool:
153 return v1.Bool() == v2.Bool()
154 case reflect.Float32, reflect.Float64:
155 return v1.Float() == v2.Float()
156 case reflect.Int32, reflect.Int64:
157 return v1.Int() == v2.Int()
158 case reflect.Interface:
159 // Probably a oneof field; compare the inner values.
160 n1, n2 := v1.IsNil(), v2.IsNil()
161 if n1 || n2 {
162 return n1 == n2
163 }
164 e1, e2 := v1.Elem(), v2.Elem()
165 if e1.Type() != e2.Type() {
166 return false
167 }
168 return equalAny(e1, e2, nil)
169 case reflect.Map:
170 if v1.Len() != v2.Len() {
171 return false
172 }
173 for _, key := range v1.MapKeys() {
174 val2 := v2.MapIndex(key)
175 if !val2.IsValid() {
176 // This key was not found in the second map.
177 return false
178 }
179 if !equalAny(v1.MapIndex(key), val2, nil) {
180 return false
181 }
182 }
183 return true
184 case reflect.Ptr:
185 // Maps may have nil values in them, so check for nil.
186 if v1.IsNil() && v2.IsNil() {
187 return true
188 }
189 if v1.IsNil() != v2.IsNil() {
190 return false
191 }
192 return equalAny(v1.Elem(), v2.Elem(), prop)
193 case reflect.Slice:
194 if v1.Type().Elem().Kind() == reflect.Uint8 {
195 // short circuit: []byte
196
197 // Edge case: if this is in a proto3 message, a zero length
198 // bytes field is considered the zero value.
199 if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
200 return true
201 }
202 if v1.IsNil() != v2.IsNil() {
203 return false
204 }
205 return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
206 }
207
208 if v1.Len() != v2.Len() {
209 return false
210 }
211 for i := 0; i < v1.Len(); i++ {
212 if !equalAny(v1.Index(i), v2.Index(i), prop) {
213 return false
214 }
215 }
216 return true
217 case reflect.String:
218 return v1.Interface().(string) == v2.Interface().(string)
219 case reflect.Struct:
220 return equalStruct(v1, v2)
221 case reflect.Uint32, reflect.Uint64:
222 return v1.Uint() == v2.Uint()
223 }
224
225 // unknown type, so not a protocol buffer
226 log.Printf("proto: don't know how to compare %v", v1)
227 return false
228}
229
230// base is the struct type that the extensions are based on.
231// x1 and x2 are InternalExtensions.
232func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
233 em1, _ := x1.extensionsRead()
234 em2, _ := x2.extensionsRead()
235 return equalExtMap(base, em1, em2)
236}
237
238func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
239 if len(em1) != len(em2) {
240 return false
241 }
242
243 for extNum, e1 := range em1 {
244 e2, ok := em2[extNum]
245 if !ok {
246 return false
247 }
248
249 m1, m2 := e1.value, e2.value
250
251 if m1 == nil && m2 == nil {
252 // Both have only encoded form.
253 if bytes.Equal(e1.enc, e2.enc) {
254 continue
255 }
256 // The bytes are different, but the extensions might still be
257 // equal. We need to decode them to compare.
258 }
259
260 if m1 != nil && m2 != nil {
261 // Both are unencoded.
262 if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
263 return false
264 }
265 continue
266 }
267
268 // At least one is encoded. To do a semantically correct comparison
269 // we need to unmarshal them first.
270 var desc *ExtensionDesc
271 if m := extensionMaps[base]; m != nil {
272 desc = m[extNum]
273 }
274 if desc == nil {
275 // If both have only encoded form and the bytes are the same,
276 // it is handled above. We get here when the bytes are different.
277 // We don't know how to decode it, so just compare them as byte
278 // slices.
279 log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
280 return false
281 }
282 var err error
283 if m1 == nil {
284 m1, err = decodeExtension(e1.enc, desc)
285 }
286 if m2 == nil && err == nil {
287 m2, err = decodeExtension(e2.enc, desc)
288 }
289 if err != nil {
290 // The encoded form is invalid.
291 log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
292 return false
293 }
294 if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
295 return false
296 }
297 }
298
299 return true
300}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..816a3b9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,543 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2010 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package proto
33
34/*
35 * Types and routines for supporting protocol buffer extensions.
36 */
37
38import (
39 "errors"
40 "fmt"
41 "io"
42 "reflect"
43 "strconv"
44 "sync"
45)
46
47// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
48var ErrMissingExtension = errors.New("proto: missing extension")
49
50// ExtensionRange represents a range of message extensions for a protocol buffer.
51// Used in code generated by the protocol compiler.
52type ExtensionRange struct {
53 Start, End int32 // both inclusive
54}
55
56// extendableProto is an interface implemented by any protocol buffer generated by the current
57// proto compiler that may be extended.
58type extendableProto interface {
59 Message
60 ExtensionRangeArray() []ExtensionRange
61 extensionsWrite() map[int32]Extension
62 extensionsRead() (map[int32]Extension, sync.Locker)
63}
64
65// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
66// version of the proto compiler that may be extended.
67type extendableProtoV1 interface {
68 Message
69 ExtensionRangeArray() []ExtensionRange
70 ExtensionMap() map[int32]Extension
71}
72
73// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
74type extensionAdapter struct {
75 extendableProtoV1
76}
77
78func (e extensionAdapter) extensionsWrite() map[int32]Extension {
79 return e.ExtensionMap()
80}
81
82func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
83 return e.ExtensionMap(), notLocker{}
84}
85
86// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
87type notLocker struct{}
88
89func (n notLocker) Lock() {}
90func (n notLocker) Unlock() {}
91
92// extendable returns the extendableProto interface for the given generated proto message.
93// If the proto message has the old extension format, it returns a wrapper that implements
94// the extendableProto interface.
95func extendable(p interface{}) (extendableProto, error) {
96 switch p := p.(type) {
97 case extendableProto:
98 if isNilPtr(p) {
99 return nil, fmt.Errorf("proto: nil %T is not extendable", p)
100 }
101 return p, nil
102 case extendableProtoV1:
103 if isNilPtr(p) {
104 return nil, fmt.Errorf("proto: nil %T is not extendable", p)
105 }
106 return extensionAdapter{p}, nil
107 }
108 // Don't allocate a specific error containing %T:
109 // this is the hot path for Clone and MarshalText.
110 return nil, errNotExtendable
111}
112
113var errNotExtendable = errors.New("proto: not an extendable proto.Message")
114
115func isNilPtr(x interface{}) bool {
116 v := reflect.ValueOf(x)
117 return v.Kind() == reflect.Ptr && v.IsNil()
118}
119
120// XXX_InternalExtensions is an internal representation of proto extensions.
121//
122// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
123// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
124//
125// The methods of XXX_InternalExtensions are not concurrency safe in general,
126// but calls to logically read-only methods such as has and get may be executed concurrently.
127type XXX_InternalExtensions struct {
128 // The struct must be indirect so that if a user inadvertently copies a
129 // generated message and its embedded XXX_InternalExtensions, they
130 // avoid the mayhem of a copied mutex.
131 //
132 // The mutex serializes all logically read-only operations to p.extensionMap.
133 // It is up to the client to ensure that write operations to p.extensionMap are
134 // mutually exclusive with other accesses.
135 p *struct {
136 mu sync.Mutex
137 extensionMap map[int32]Extension
138 }
139}
140
141// extensionsWrite returns the extension map, creating it on first use.
142func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
143 if e.p == nil {
144 e.p = new(struct {
145 mu sync.Mutex
146 extensionMap map[int32]Extension
147 })
148 e.p.extensionMap = make(map[int32]Extension)
149 }
150 return e.p.extensionMap
151}
152
153// extensionsRead returns the extensions map for read-only use. It may be nil.
154// The caller must hold the returned mutex's lock when accessing Elements within the map.
155func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
156 if e.p == nil {
157 return nil, nil
158 }
159 return e.p.extensionMap, &e.p.mu
160}
161
162// ExtensionDesc represents an extension specification.
163// Used in generated code from the protocol compiler.
164type ExtensionDesc struct {
165 ExtendedType Message // nil pointer to the type that is being extended
166 ExtensionType interface{} // nil pointer to the extension type
167 Field int32 // field number
168 Name string // fully-qualified name of extension, for text formatting
169 Tag string // protobuf tag style
170 Filename string // name of the file in which the extension is defined
171}
172
173func (ed *ExtensionDesc) repeated() bool {
174 t := reflect.TypeOf(ed.ExtensionType)
175 return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
176}
177
178// Extension represents an extension in a message.
179type Extension struct {
180 // When an extension is stored in a message using SetExtension
181 // only desc and value are set. When the message is marshaled
182 // enc will be set to the encoded form of the message.
183 //
184 // When a message is unmarshaled and contains extensions, each
185 // extension will have only enc set. When such an extension is
186 // accessed using GetExtension (or GetExtensions) desc and value
187 // will be set.
188 desc *ExtensionDesc
189 value interface{}
190 enc []byte
191}
192
193// SetRawExtension is for testing only.
194func SetRawExtension(base Message, id int32, b []byte) {
195 epb, err := extendable(base)
196 if err != nil {
197 return
198 }
199 extmap := epb.extensionsWrite()
200 extmap[id] = Extension{enc: b}
201}
202
203// isExtensionField returns true iff the given field number is in an extension range.
204func isExtensionField(pb extendableProto, field int32) bool {
205 for _, er := range pb.ExtensionRangeArray() {
206 if er.Start <= field && field <= er.End {
207 return true
208 }
209 }
210 return false
211}
212
213// checkExtensionTypes checks that the given extension is valid for pb.
214func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
215 var pbi interface{} = pb
216 // Check the extended type.
217 if ea, ok := pbi.(extensionAdapter); ok {
218 pbi = ea.extendableProtoV1
219 }
220 if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
221 return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
222 }
223 // Check the range.
224 if !isExtensionField(pb, extension.Field) {
225 return errors.New("proto: bad extension number; not in declared ranges")
226 }
227 return nil
228}
229
230// extPropKey is sufficient to uniquely identify an extension.
231type extPropKey struct {
232 base reflect.Type
233 field int32
234}
235
236var extProp = struct {
237 sync.RWMutex
238 m map[extPropKey]*Properties
239}{
240 m: make(map[extPropKey]*Properties),
241}
242
243func extensionProperties(ed *ExtensionDesc) *Properties {
244 key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
245
246 extProp.RLock()
247 if prop, ok := extProp.m[key]; ok {
248 extProp.RUnlock()
249 return prop
250 }
251 extProp.RUnlock()
252
253 extProp.Lock()
254 defer extProp.Unlock()
255 // Check again.
256 if prop, ok := extProp.m[key]; ok {
257 return prop
258 }
259
260 prop := new(Properties)
261 prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
262 extProp.m[key] = prop
263 return prop
264}
265
266// HasExtension returns whether the given extension is present in pb.
267func HasExtension(pb Message, extension *ExtensionDesc) bool {
268 // TODO: Check types, field numbers, etc.?
269 epb, err := extendable(pb)
270 if err != nil {
271 return false
272 }
273 extmap, mu := epb.extensionsRead()
274 if extmap == nil {
275 return false
276 }
277 mu.Lock()
278 _, ok := extmap[extension.Field]
279 mu.Unlock()
280 return ok
281}
282
283// ClearExtension removes the given extension from pb.
284func ClearExtension(pb Message, extension *ExtensionDesc) {
285 epb, err := extendable(pb)
286 if err != nil {
287 return
288 }
289 // TODO: Check types, field numbers, etc.?
290 extmap := epb.extensionsWrite()
291 delete(extmap, extension.Field)
292}
293
294// GetExtension retrieves a proto2 extended field from pb.
295//
296// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
297// then GetExtension parses the encoded field and returns a Go value of the specified type.
298// If the field is not present, then the default value is returned (if one is specified),
299// otherwise ErrMissingExtension is reported.
300//
301// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
302// then GetExtension returns the raw encoded bytes of the field extension.
303func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
304 epb, err := extendable(pb)
305 if err != nil {
306 return nil, err
307 }
308
309 if extension.ExtendedType != nil {
310 // can only check type if this is a complete descriptor
311 if err := checkExtensionTypes(epb, extension); err != nil {
312 return nil, err
313 }
314 }
315
316 emap, mu := epb.extensionsRead()
317 if emap == nil {
318 return defaultExtensionValue(extension)
319 }
320 mu.Lock()
321 defer mu.Unlock()
322 e, ok := emap[extension.Field]
323 if !ok {
324 // defaultExtensionValue returns the default value or
325 // ErrMissingExtension if there is no default.
326 return defaultExtensionValue(extension)
327 }
328
329 if e.value != nil {
330 // Already decoded. Check the descriptor, though.
331 if e.desc != extension {
332 // This shouldn't happen. If it does, it means that
333 // GetExtension was called twice with two different
334 // descriptors with the same field number.
335 return nil, errors.New("proto: descriptor conflict")
336 }
337 return e.value, nil
338 }
339
340 if extension.ExtensionType == nil {
341 // incomplete descriptor
342 return e.enc, nil
343 }
344
345 v, err := decodeExtension(e.enc, extension)
346 if err != nil {
347 return nil, err
348 }
349
350 // Remember the decoded version and drop the encoded version.
351 // That way it is safe to mutate what we return.
352 e.value = v
353 e.desc = extension
354 e.enc = nil
355 emap[extension.Field] = e
356 return e.value, nil
357}
358
359// defaultExtensionValue returns the default value for extension.
360// If no default for an extension is defined ErrMissingExtension is returned.
361func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
362 if extension.ExtensionType == nil {
363 // incomplete descriptor, so no default
364 return nil, ErrMissingExtension
365 }
366
367 t := reflect.TypeOf(extension.ExtensionType)
368 props := extensionProperties(extension)
369
370 sf, _, err := fieldDefault(t, props)
371 if err != nil {
372 return nil, err
373 }
374
375 if sf == nil || sf.value == nil {
376 // There is no default value.
377 return nil, ErrMissingExtension
378 }
379
380 if t.Kind() != reflect.Ptr {
381 // We do not need to return a Ptr, we can directly return sf.value.
382 return sf.value, nil
383 }
384
385 // We need to return an interface{} that is a pointer to sf.value.
386 value := reflect.New(t).Elem()
387 value.Set(reflect.New(value.Type().Elem()))
388 if sf.kind == reflect.Int32 {
389 // We may have an int32 or an enum, but the underlying data is int32.
390 // Since we can't set an int32 into a non int32 reflect.value directly
391 // set it as a int32.
392 value.Elem().SetInt(int64(sf.value.(int32)))
393 } else {
394 value.Elem().Set(reflect.ValueOf(sf.value))
395 }
396 return value.Interface(), nil
397}
398
399// decodeExtension decodes an extension encoded in b.
400func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
401 t := reflect.TypeOf(extension.ExtensionType)
402 unmarshal := typeUnmarshaler(t, extension.Tag)
403
404 // t is a pointer to a struct, pointer to basic type or a slice.
405 // Allocate space to store the pointer/slice.
406 value := reflect.New(t).Elem()
407
408 var err error
409 for {
410 x, n := decodeVarint(b)
411 if n == 0 {
412 return nil, io.ErrUnexpectedEOF
413 }
414 b = b[n:]
415 wire := int(x) & 7
416
417 b, err = unmarshal(b, valToPointer(value.Addr()), wire)
418 if err != nil {
419 return nil, err
420 }
421
422 if len(b) == 0 {
423 break
424 }
425 }
426 return value.Interface(), nil
427}
428
429// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
430// The returned slice has the same length as es; missing extensions will appear as nil elements.
431func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
432 epb, err := extendable(pb)
433 if err != nil {
434 return nil, err
435 }
436 extensions = make([]interface{}, len(es))
437 for i, e := range es {
438 extensions[i], err = GetExtension(epb, e)
439 if err == ErrMissingExtension {
440 err = nil
441 }
442 if err != nil {
443 return
444 }
445 }
446 return
447}
448
449// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
450// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
451// just the Field field, which defines the extension's field number.
452func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
453 epb, err := extendable(pb)
454 if err != nil {
455 return nil, err
456 }
457 registeredExtensions := RegisteredExtensions(pb)
458
459 emap, mu := epb.extensionsRead()
460 if emap == nil {
461 return nil, nil
462 }
463 mu.Lock()
464 defer mu.Unlock()
465 extensions := make([]*ExtensionDesc, 0, len(emap))
466 for extid, e := range emap {
467 desc := e.desc
468 if desc == nil {
469 desc = registeredExtensions[extid]
470 if desc == nil {
471 desc = &ExtensionDesc{Field: extid}
472 }
473 }
474
475 extensions = append(extensions, desc)
476 }
477 return extensions, nil
478}
479
480// SetExtension sets the specified extension of pb to the specified value.
481func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
482 epb, err := extendable(pb)
483 if err != nil {
484 return err
485 }
486 if err := checkExtensionTypes(epb, extension); err != nil {
487 return err
488 }
489 typ := reflect.TypeOf(extension.ExtensionType)
490 if typ != reflect.TypeOf(value) {
491 return errors.New("proto: bad extension value type")
492 }
493 // nil extension values need to be caught early, because the
494 // encoder can't distinguish an ErrNil due to a nil extension
495 // from an ErrNil due to a missing field. Extensions are
496 // always optional, so the encoder would just swallow the error
497 // and drop all the extensions from the encoded message.
498 if reflect.ValueOf(value).IsNil() {
499 return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
500 }
501
502 extmap := epb.extensionsWrite()
503 extmap[extension.Field] = Extension{desc: extension, value: value}
504 return nil
505}
506
507// ClearAllExtensions clears all extensions from pb.
508func ClearAllExtensions(pb Message) {
509 epb, err := extendable(pb)
510 if err != nil {
511 return
512 }
513 m := epb.extensionsWrite()
514 for k := range m {
515 delete(m, k)
516 }
517}
518
519// A global registry of extensions.
520// The generated code will register the generated descriptors by calling RegisterExtension.
521
522var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
523
524// RegisterExtension is called from the generated code.
525func RegisterExtension(desc *ExtensionDesc) {
526 st := reflect.TypeOf(desc.ExtendedType).Elem()
527 m := extensionMaps[st]
528 if m == nil {
529 m = make(map[int32]*ExtensionDesc)
530 extensionMaps[st] = m
531 }
532 if _, ok := m[desc.Field]; ok {
533 panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
534 }
535 m[desc.Field] = desc
536}
537
538// RegisteredExtensions returns a map of the registered extensions of a
539// protocol buffer struct, indexed by the extension number.
540// The argument pb should be a nil pointer to the struct type.
541func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
542 return extensionMaps[reflect.TypeOf(pb).Elem()]
543}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..75565cc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,979 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2010 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32/*
33Package proto converts data structures to and from the wire format of
34protocol buffers. It works in concert with the Go source code generated
35for .proto files by the protocol compiler.
36
37A summary of the properties of the protocol buffer interface
38for a protocol buffer variable v:
39
40 - Names are turned from camel_case to CamelCase for export.
41 - There are no methods on v to set fields; just treat
42 them as structure fields.
43 - There are getters that return a field's value if set,
44 and return the field's default value if unset.
45 The getters work even if the receiver is a nil message.
46 - The zero value for a struct is its correct initialization state.
47 All desired fields must be set before marshaling.
48 - A Reset() method will restore a protobuf struct to its zero state.
49 - Non-repeated fields are pointers to the values; nil means unset.
50 That is, optional or required field int32 f becomes F *int32.
51 - Repeated fields are slices.
52 - Helper functions are available to aid the setting of fields.
53 msg.Foo = proto.String("hello") // set field
54 - Constants are defined to hold the default values of all fields that
55 have them. They have the form Default_StructName_FieldName.
56 Because the getter methods handle defaulted values,
57 direct use of these constants should be rare.
58 - Enums are given type names and maps from names to values.
59 Enum values are prefixed by the enclosing message's name, or by the
60 enum's type name if it is a top-level enum. Enum types have a String
61 method, and a Enum method to assist in message construction.
62 - Nested messages, groups and enums have type names prefixed with the name of
63 the surrounding message type.
64 - Extensions are given descriptor names that start with E_,
65 followed by an underscore-delimited list of the nested messages
66 that contain it (if any) followed by the CamelCased name of the
67 extension field itself. HasExtension, ClearExtension, GetExtension
68 and SetExtension are functions for manipulating extensions.
69 - Oneof field sets are given a single field in their message,
70 with distinguished wrapper types for each possible field value.
71 - Marshal and Unmarshal are functions to encode and decode the wire format.
72
73When the .proto file specifies `syntax="proto3"`, there are some differences:
74
75 - Non-repeated fields of non-message type are values instead of pointers.
76 - Enum types do not get an Enum method.
77
78The simplest way to describe this is to see an example.
79Given file test.proto, containing
80
81 package example;
82
83 enum FOO { X = 17; }
84
85 message Test {
86 required string label = 1;
87 optional int32 type = 2 [default=77];
88 repeated int64 reps = 3;
89 optional group OptionalGroup = 4 {
90 required string RequiredField = 5;
91 }
92 oneof union {
93 int32 number = 6;
94 string name = 7;
95 }
96 }
97
98The resulting file, test.pb.go, is:
99
100 package example
101
102 import proto "github.com/golang/protobuf/proto"
103 import math "math"
104
105 type FOO int32
106 const (
107 FOO_X FOO = 17
108 )
109 var FOO_name = map[int32]string{
110 17: "X",
111 }
112 var FOO_value = map[string]int32{
113 "X": 17,
114 }
115
116 func (x FOO) Enum() *FOO {
117 p := new(FOO)
118 *p = x
119 return p
120 }
121 func (x FOO) String() string {
122 return proto.EnumName(FOO_name, int32(x))
123 }
124 func (x *FOO) UnmarshalJSON(data []byte) error {
125 value, err := proto.UnmarshalJSONEnum(FOO_value, data)
126 if err != nil {
127 return err
128 }
129 *x = FOO(value)
130 return nil
131 }
132
133 type Test struct {
134 Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
135 Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
136 Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
137 Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
138 // Types that are valid to be assigned to Union:
139 // *Test_Number
140 // *Test_Name
141 Union isTest_Union `protobuf_oneof:"union"`
142 XXX_unrecognized []byte `json:"-"`
143 }
144 func (m *Test) Reset() { *m = Test{} }
145 func (m *Test) String() string { return proto.CompactTextString(m) }
146 func (*Test) ProtoMessage() {}
147
148 type isTest_Union interface {
149 isTest_Union()
150 }
151
152 type Test_Number struct {
153 Number int32 `protobuf:"varint,6,opt,name=number"`
154 }
155 type Test_Name struct {
156 Name string `protobuf:"bytes,7,opt,name=name"`
157 }
158
159 func (*Test_Number) isTest_Union() {}
160 func (*Test_Name) isTest_Union() {}
161
162 func (m *Test) GetUnion() isTest_Union {
163 if m != nil {
164 return m.Union
165 }
166 return nil
167 }
168 const Default_Test_Type int32 = 77
169
170 func (m *Test) GetLabel() string {
171 if m != nil && m.Label != nil {
172 return *m.Label
173 }
174 return ""
175 }
176
177 func (m *Test) GetType() int32 {
178 if m != nil && m.Type != nil {
179 return *m.Type
180 }
181 return Default_Test_Type
182 }
183
184 func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
185 if m != nil {
186 return m.Optionalgroup
187 }
188 return nil
189 }
190
191 type Test_OptionalGroup struct {
192 RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
193 }
194 func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
195 func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
196
197 func (m *Test_OptionalGroup) GetRequiredField() string {
198 if m != nil && m.RequiredField != nil {
199 return *m.RequiredField
200 }
201 return ""
202 }
203
204 func (m *Test) GetNumber() int32 {
205 if x, ok := m.GetUnion().(*Test_Number); ok {
206 return x.Number
207 }
208 return 0
209 }
210
211 func (m *Test) GetName() string {
212 if x, ok := m.GetUnion().(*Test_Name); ok {
213 return x.Name
214 }
215 return ""
216 }
217
218 func init() {
219 proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
220 }
221
222To create and play with a Test object:
223
224 package main
225
226 import (
227 "log"
228
229 "github.com/golang/protobuf/proto"
230 pb "./example.pb"
231 )
232
233 func main() {
234 test := &pb.Test{
235 Label: proto.String("hello"),
236 Type: proto.Int32(17),
237 Reps: []int64{1, 2, 3},
238 Optionalgroup: &pb.Test_OptionalGroup{
239 RequiredField: proto.String("good bye"),
240 },
241 Union: &pb.Test_Name{"fred"},
242 }
243 data, err := proto.Marshal(test)
244 if err != nil {
245 log.Fatal("marshaling error: ", err)
246 }
247 newTest := &pb.Test{}
248 err = proto.Unmarshal(data, newTest)
249 if err != nil {
250 log.Fatal("unmarshaling error: ", err)
251 }
252 // Now test and newTest contain the same data.
253 if test.GetLabel() != newTest.GetLabel() {
254 log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
255 }
256 // Use a type switch to determine which oneof was set.
257 switch u := test.Union.(type) {
258 case *pb.Test_Number: // u.Number contains the number.
259 case *pb.Test_Name: // u.Name contains the string.
260 }
261 // etc.
262 }
263*/
264package proto
265
266import (
267 "encoding/json"
268 "fmt"
269 "log"
270 "reflect"
271 "sort"
272 "strconv"
273 "sync"
274)
275
276// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
277// Marshal reports this when a required field is not initialized.
278// Unmarshal reports this when a required field is missing from the wire data.
279type RequiredNotSetError struct{ field string }
280
281func (e *RequiredNotSetError) Error() string {
282 if e.field == "" {
283 return fmt.Sprintf("proto: required field not set")
284 }
285 return fmt.Sprintf("proto: required field %q not set", e.field)
286}
287func (e *RequiredNotSetError) RequiredNotSet() bool {
288 return true
289}
290
291type invalidUTF8Error struct{ field string }
292
293func (e *invalidUTF8Error) Error() string {
294 if e.field == "" {
295 return "proto: invalid UTF-8 detected"
296 }
297 return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
298}
299func (e *invalidUTF8Error) InvalidUTF8() bool {
300 return true
301}
302
303// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
304// This error should not be exposed to the external API as such errors should
305// be recreated with the field information.
306var errInvalidUTF8 = &invalidUTF8Error{}
307
308// isNonFatal reports whether the error is either a RequiredNotSet error
309// or a InvalidUTF8 error.
310func isNonFatal(err error) bool {
311 if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
312 return true
313 }
314 if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
315 return true
316 }
317 return false
318}
319
320type nonFatal struct{ E error }
321
322// Merge merges err into nf and reports whether it was successful.
323// Otherwise it returns false for any fatal non-nil errors.
324func (nf *nonFatal) Merge(err error) (ok bool) {
325 if err == nil {
326 return true // not an error
327 }
328 if !isNonFatal(err) {
329 return false // fatal error
330 }
331 if nf.E == nil {
332 nf.E = err // store first instance of non-fatal error
333 }
334 return true
335}
336
337// Message is implemented by generated protocol buffer messages.
338type Message interface {
339 Reset()
340 String() string
341 ProtoMessage()
342}
343
344// Stats records allocation details about the protocol buffer encoders
345// and decoders. Useful for tuning the library itself.
346type Stats struct {
347 Emalloc uint64 // mallocs in encode
348 Dmalloc uint64 // mallocs in decode
349 Encode uint64 // number of encodes
350 Decode uint64 // number of decodes
351 Chit uint64 // number of cache hits
352 Cmiss uint64 // number of cache misses
353 Size uint64 // number of sizes
354}
355
356// Set to true to enable stats collection.
357const collectStats = false
358
359var stats Stats
360
361// GetStats returns a copy of the global Stats structure.
362func GetStats() Stats { return stats }
363
364// A Buffer is a buffer manager for marshaling and unmarshaling
365// protocol buffers. It may be reused between invocations to
366// reduce memory usage. It is not necessary to use a Buffer;
367// the global functions Marshal and Unmarshal create a
368// temporary Buffer and are fine for most applications.
369type Buffer struct {
370 buf []byte // encode/decode byte stream
371 index int // read point
372
373 deterministic bool
374}
375
376// NewBuffer allocates a new Buffer and initializes its internal data to
377// the contents of the argument slice.
378func NewBuffer(e []byte) *Buffer {
379 return &Buffer{buf: e}
380}
381
382// Reset resets the Buffer, ready for marshaling a new protocol buffer.
383func (p *Buffer) Reset() {
384 p.buf = p.buf[0:0] // for reading/writing
385 p.index = 0 // for reading
386}
387
388// SetBuf replaces the internal buffer with the slice,
389// ready for unmarshaling the contents of the slice.
390func (p *Buffer) SetBuf(s []byte) {
391 p.buf = s
392 p.index = 0
393}
394
395// Bytes returns the contents of the Buffer.
396func (p *Buffer) Bytes() []byte { return p.buf }
397
398// SetDeterministic sets whether to use deterministic serialization.
399//
400// Deterministic serialization guarantees that for a given binary, equal
401// messages will always be serialized to the same bytes. This implies:
402//
403// - Repeated serialization of a message will return the same bytes.
404// - Different processes of the same binary (which may be executing on
405// different machines) will serialize equal messages to the same bytes.
406//
407// Note that the deterministic serialization is NOT canonical across
408// languages. It is not guaranteed to remain stable over time. It is unstable
409// across different builds with schema changes due to unknown fields.
410// Users who need canonical serialization (e.g., persistent storage in a
411// canonical form, fingerprinting, etc.) should define their own
412// canonicalization specification and implement their own serializer rather
413// than relying on this API.
414//
415// If deterministic serialization is requested, map entries will be sorted
416// by keys in lexographical order. This is an implementation detail and
417// subject to change.
418func (p *Buffer) SetDeterministic(deterministic bool) {
419 p.deterministic = deterministic
420}
421
422/*
423 * Helper routines for simplifying the creation of optional fields of basic type.
424 */
425
426// Bool is a helper routine that allocates a new bool value
427// to store v and returns a pointer to it.
428func Bool(v bool) *bool {
429 return &v
430}
431
432// Int32 is a helper routine that allocates a new int32 value
433// to store v and returns a pointer to it.
434func Int32(v int32) *int32 {
435 return &v
436}
437
438// Int is a helper routine that allocates a new int32 value
439// to store v and returns a pointer to it, but unlike Int32
440// its argument value is an int.
441func Int(v int) *int32 {
442 p := new(int32)
443 *p = int32(v)
444 return p
445}
446
447// Int64 is a helper routine that allocates a new int64 value
448// to store v and returns a pointer to it.
449func Int64(v int64) *int64 {
450 return &v
451}
452
453// Float32 is a helper routine that allocates a new float32 value
454// to store v and returns a pointer to it.
455func Float32(v float32) *float32 {
456 return &v
457}
458
459// Float64 is a helper routine that allocates a new float64 value
460// to store v and returns a pointer to it.
461func Float64(v float64) *float64 {
462 return &v
463}
464
465// Uint32 is a helper routine that allocates a new uint32 value
466// to store v and returns a pointer to it.
467func Uint32(v uint32) *uint32 {
468 return &v
469}
470
471// Uint64 is a helper routine that allocates a new uint64 value
472// to store v and returns a pointer to it.
473func Uint64(v uint64) *uint64 {
474 return &v
475}
476
477// String is a helper routine that allocates a new string value
478// to store v and returns a pointer to it.
479func String(v string) *string {
480 return &v
481}
482
483// EnumName is a helper function to simplify printing protocol buffer enums
484// by name. Given an enum map and a value, it returns a useful string.
485func EnumName(m map[int32]string, v int32) string {
486 s, ok := m[v]
487 if ok {
488 return s
489 }
490 return strconv.Itoa(int(v))
491}
492
493// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
494// from their JSON-encoded representation. Given a map from the enum's symbolic
495// names to its int values, and a byte buffer containing the JSON-encoded
496// value, it returns an int32 that can be cast to the enum type by the caller.
497//
498// The function can deal with both JSON representations, numeric and symbolic.
499func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
500 if data[0] == '"' {
501 // New style: enums are strings.
502 var repr string
503 if err := json.Unmarshal(data, &repr); err != nil {
504 return -1, err
505 }
506 val, ok := m[repr]
507 if !ok {
508 return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
509 }
510 return val, nil
511 }
512 // Old style: enums are ints.
513 var val int32
514 if err := json.Unmarshal(data, &val); err != nil {
515 return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
516 }
517 return val, nil
518}
519
520// DebugPrint dumps the encoded data in b in a debugging format with a header
521// including the string s. Used in testing but made available for general debugging.
522func (p *Buffer) DebugPrint(s string, b []byte) {
523 var u uint64
524
525 obuf := p.buf
526 index := p.index
527 p.buf = b
528 p.index = 0
529 depth := 0
530
531 fmt.Printf("\n--- %s ---\n", s)
532
533out:
534 for {
535 for i := 0; i < depth; i++ {
536 fmt.Print(" ")
537 }
538
539 index := p.index
540 if index == len(p.buf) {
541 break
542 }
543
544 op, err := p.DecodeVarint()
545 if err != nil {
546 fmt.Printf("%3d: fetching op err %v\n", index, err)
547 break out
548 }
549 tag := op >> 3
550 wire := op & 7
551
552 switch wire {
553 default:
554 fmt.Printf("%3d: t=%3d unknown wire=%d\n",
555 index, tag, wire)
556 break out
557
558 case WireBytes:
559 var r []byte
560
561 r, err = p.DecodeRawBytes(false)
562 if err != nil {
563 break out
564 }
565 fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
566 if len(r) <= 6 {
567 for i := 0; i < len(r); i++ {
568 fmt.Printf(" %.2x", r[i])
569 }
570 } else {
571 for i := 0; i < 3; i++ {
572 fmt.Printf(" %.2x", r[i])
573 }
574 fmt.Printf(" ..")
575 for i := len(r) - 3; i < len(r); i++ {
576 fmt.Printf(" %.2x", r[i])
577 }
578 }
579 fmt.Printf("\n")
580
581 case WireFixed32:
582 u, err = p.DecodeFixed32()
583 if err != nil {
584 fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
585 break out
586 }
587 fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
588
589 case WireFixed64:
590 u, err = p.DecodeFixed64()
591 if err != nil {
592 fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
593 break out
594 }
595 fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
596
597 case WireVarint:
598 u, err = p.DecodeVarint()
599 if err != nil {
600 fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
601 break out
602 }
603 fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
604
605 case WireStartGroup:
606 fmt.Printf("%3d: t=%3d start\n", index, tag)
607 depth++
608
609 case WireEndGroup:
610 depth--
611 fmt.Printf("%3d: t=%3d end\n", index, tag)
612 }
613 }
614
615 if depth != 0 {
616 fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
617 }
618 fmt.Printf("\n")
619
620 p.buf = obuf
621 p.index = index
622}
623
624// SetDefaults sets unset protocol buffer fields to their default values.
625// It only modifies fields that are both unset and have defined defaults.
626// It recursively sets default values in any non-nil sub-messages.
627func SetDefaults(pb Message) {
628 setDefaults(reflect.ValueOf(pb), true, false)
629}
630
631// v is a pointer to a struct.
632func setDefaults(v reflect.Value, recur, zeros bool) {
633 v = v.Elem()
634
635 defaultMu.RLock()
636 dm, ok := defaults[v.Type()]
637 defaultMu.RUnlock()
638 if !ok {
639 dm = buildDefaultMessage(v.Type())
640 defaultMu.Lock()
641 defaults[v.Type()] = dm
642 defaultMu.Unlock()
643 }
644
645 for _, sf := range dm.scalars {
646 f := v.Field(sf.index)
647 if !f.IsNil() {
648 // field already set
649 continue
650 }
651 dv := sf.value
652 if dv == nil && !zeros {
653 // no explicit default, and don't want to set zeros
654 continue
655 }
656 fptr := f.Addr().Interface() // **T
657 // TODO: Consider batching the allocations we do here.
658 switch sf.kind {
659 case reflect.Bool:
660 b := new(bool)
661 if dv != nil {
662 *b = dv.(bool)
663 }
664 *(fptr.(**bool)) = b
665 case reflect.Float32:
666 f := new(float32)
667 if dv != nil {
668 *f = dv.(float32)
669 }
670 *(fptr.(**float32)) = f
671 case reflect.Float64:
672 f := new(float64)
673 if dv != nil {
674 *f = dv.(float64)
675 }
676 *(fptr.(**float64)) = f
677 case reflect.Int32:
678 // might be an enum
679 if ft := f.Type(); ft != int32PtrType {
680 // enum
681 f.Set(reflect.New(ft.Elem()))
682 if dv != nil {
683 f.Elem().SetInt(int64(dv.(int32)))
684 }
685 } else {
686 // int32 field
687 i := new(int32)
688 if dv != nil {
689 *i = dv.(int32)
690 }
691 *(fptr.(**int32)) = i
692 }
693 case reflect.Int64:
694 i := new(int64)
695 if dv != nil {
696 *i = dv.(int64)
697 }
698 *(fptr.(**int64)) = i
699 case reflect.String:
700 s := new(string)
701 if dv != nil {
702 *s = dv.(string)
703 }
704 *(fptr.(**string)) = s
705 case reflect.Uint8:
706 // exceptional case: []byte
707 var b []byte
708 if dv != nil {
709 db := dv.([]byte)
710 b = make([]byte, len(db))
711 copy(b, db)
712 } else {
713 b = []byte{}
714 }
715 *(fptr.(*[]byte)) = b
716 case reflect.Uint32:
717 u := new(uint32)
718 if dv != nil {
719 *u = dv.(uint32)
720 }
721 *(fptr.(**uint32)) = u
722 case reflect.Uint64:
723 u := new(uint64)
724 if dv != nil {
725 *u = dv.(uint64)
726 }
727 *(fptr.(**uint64)) = u
728 default:
729 log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
730 }
731 }
732
733 for _, ni := range dm.nested {
734 f := v.Field(ni)
735 // f is *T or []*T or map[T]*T
736 switch f.Kind() {
737 case reflect.Ptr:
738 if f.IsNil() {
739 continue
740 }
741 setDefaults(f, recur, zeros)
742
743 case reflect.Slice:
744 for i := 0; i < f.Len(); i++ {
745 e := f.Index(i)
746 if e.IsNil() {
747 continue
748 }
749 setDefaults(e, recur, zeros)
750 }
751
752 case reflect.Map:
753 for _, k := range f.MapKeys() {
754 e := f.MapIndex(k)
755 if e.IsNil() {
756 continue
757 }
758 setDefaults(e, recur, zeros)
759 }
760 }
761 }
762}
763
764var (
765 // defaults maps a protocol buffer struct type to a slice of the fields,
766 // with its scalar fields set to their proto-declared non-zero default values.
767 defaultMu sync.RWMutex
768 defaults = make(map[reflect.Type]defaultMessage)
769
770 int32PtrType = reflect.TypeOf((*int32)(nil))
771)
772
773// defaultMessage represents information about the default values of a message.
774type defaultMessage struct {
775 scalars []scalarField
776 nested []int // struct field index of nested messages
777}
778
779type scalarField struct {
780 index int // struct field index
781 kind reflect.Kind // element type (the T in *T or []T)
782 value interface{} // the proto-declared default value, or nil
783}
784
785// t is a struct type.
786func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
787 sprop := GetProperties(t)
788 for _, prop := range sprop.Prop {
789 fi, ok := sprop.decoderTags.get(prop.Tag)
790 if !ok {
791 // XXX_unrecognized
792 continue
793 }
794 ft := t.Field(fi).Type
795
796 sf, nested, err := fieldDefault(ft, prop)
797 switch {
798 case err != nil:
799 log.Print(err)
800 case nested:
801 dm.nested = append(dm.nested, fi)
802 case sf != nil:
803 sf.index = fi
804 dm.scalars = append(dm.scalars, *sf)
805 }
806 }
807
808 return dm
809}
810
811// fieldDefault returns the scalarField for field type ft.
812// sf will be nil if the field can not have a default.
813// nestedMessage will be true if this is a nested message.
814// Note that sf.index is not set on return.
815func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
816 var canHaveDefault bool
817 switch ft.Kind() {
818 case reflect.Ptr:
819 if ft.Elem().Kind() == reflect.Struct {
820 nestedMessage = true
821 } else {
822 canHaveDefault = true // proto2 scalar field
823 }
824
825 case reflect.Slice:
826 switch ft.Elem().Kind() {
827 case reflect.Ptr:
828 nestedMessage = true // repeated message
829 case reflect.Uint8:
830 canHaveDefault = true // bytes field
831 }
832
833 case reflect.Map:
834 if ft.Elem().Kind() == reflect.Ptr {
835 nestedMessage = true // map with message values
836 }
837 }
838
839 if !canHaveDefault {
840 if nestedMessage {
841 return nil, true, nil
842 }
843 return nil, false, nil
844 }
845
846 // We now know that ft is a pointer or slice.
847 sf = &scalarField{kind: ft.Elem().Kind()}
848
849 // scalar fields without defaults
850 if !prop.HasDefault {
851 return sf, false, nil
852 }
853
854 // a scalar field: either *T or []byte
855 switch ft.Elem().Kind() {
856 case reflect.Bool:
857 x, err := strconv.ParseBool(prop.Default)
858 if err != nil {
859 return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
860 }
861 sf.value = x
862 case reflect.Float32:
863 x, err := strconv.ParseFloat(prop.Default, 32)
864 if err != nil {
865 return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
866 }
867 sf.value = float32(x)
868 case reflect.Float64:
869 x, err := strconv.ParseFloat(prop.Default, 64)
870 if err != nil {
871 return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
872 }
873 sf.value = x
874 case reflect.Int32:
875 x, err := strconv.ParseInt(prop.Default, 10, 32)
876 if err != nil {
877 return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
878 }
879 sf.value = int32(x)
880 case reflect.Int64:
881 x, err := strconv.ParseInt(prop.Default, 10, 64)
882 if err != nil {
883 return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
884 }
885 sf.value = x
886 case reflect.String:
887 sf.value = prop.Default
888 case reflect.Uint8:
889 // []byte (not *uint8)
890 sf.value = []byte(prop.Default)
891 case reflect.Uint32:
892 x, err := strconv.ParseUint(prop.Default, 10, 32)
893 if err != nil {
894 return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
895 }
896 sf.value = uint32(x)
897 case reflect.Uint64:
898 x, err := strconv.ParseUint(prop.Default, 10, 64)
899 if err != nil {
900 return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
901 }
902 sf.value = x
903 default:
904 return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
905 }
906
907 return sf, false, nil
908}
909
910// mapKeys returns a sort.Interface to be used for sorting the map keys.
911// Map fields may have key types of non-float scalars, strings and enums.
912func mapKeys(vs []reflect.Value) sort.Interface {
913 s := mapKeySorter{vs: vs}
914
915 // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
916 if len(vs) == 0 {
917 return s
918 }
919 switch vs[0].Kind() {
920 case reflect.Int32, reflect.Int64:
921 s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
922 case reflect.Uint32, reflect.Uint64:
923 s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
924 case reflect.Bool:
925 s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
926 case reflect.String:
927 s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
928 default:
929 panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
930 }
931
932 return s
933}
934
935type mapKeySorter struct {
936 vs []reflect.Value
937 less func(a, b reflect.Value) bool
938}
939
940func (s mapKeySorter) Len() int { return len(s.vs) }
941func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
942func (s mapKeySorter) Less(i, j int) bool {
943 return s.less(s.vs[i], s.vs[j])
944}
945
946// isProto3Zero reports whether v is a zero proto3 value.
947func isProto3Zero(v reflect.Value) bool {
948 switch v.Kind() {
949 case reflect.Bool:
950 return !v.Bool()
951 case reflect.Int32, reflect.Int64:
952 return v.Int() == 0
953 case reflect.Uint32, reflect.Uint64:
954 return v.Uint() == 0
955 case reflect.Float32, reflect.Float64:
956 return v.Float() == 0
957 case reflect.String:
958 return v.String() == ""
959 }
960 return false
961}
962
963// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
964// to assert that that code is compatible with this version of the proto package.
965const ProtoPackageIsVersion2 = true
966
967// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
968// to assert that that code is compatible with this version of the proto package.
969const ProtoPackageIsVersion1 = true
970
971// InternalMessageInfo is a type used internally by generated .pb.go files.
972// This type is not intended to be used by non-generated code.
973// This type is not subject to any compatibility guarantee.
974type InternalMessageInfo struct {
975 marshal *marshalInfo
976 unmarshal *unmarshalInfo
977 merge *mergeInfo
978 discard *discardInfo
979}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..3b6ca41
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,314 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2010 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package proto
33
34/*
35 * Support for message sets.
36 */
37
38import (
39 "bytes"
40 "encoding/json"
41 "errors"
42 "fmt"
43 "reflect"
44 "sort"
45 "sync"
46)
47
48// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
49// A message type ID is required for storing a protocol buffer in a message set.
50var errNoMessageTypeID = errors.New("proto does not have a message type ID")
51
52// The first two types (_MessageSet_Item and messageSet)
53// model what the protocol compiler produces for the following protocol message:
54// message MessageSet {
55// repeated group Item = 1 {
56// required int32 type_id = 2;
57// required string message = 3;
58// };
59// }
60// That is the MessageSet wire format. We can't use a proto to generate these
61// because that would introduce a circular dependency between it and this package.
62
63type _MessageSet_Item struct {
64 TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
65 Message []byte `protobuf:"bytes,3,req,name=message"`
66}
67
68type messageSet struct {
69 Item []*_MessageSet_Item `protobuf:"group,1,rep"`
70 XXX_unrecognized []byte
71 // TODO: caching?
72}
73
74// Make sure messageSet is a Message.
75var _ Message = (*messageSet)(nil)
76
77// messageTypeIder is an interface satisfied by a protocol buffer type
78// that may be stored in a MessageSet.
79type messageTypeIder interface {
80 MessageTypeId() int32
81}
82
83func (ms *messageSet) find(pb Message) *_MessageSet_Item {
84 mti, ok := pb.(messageTypeIder)
85 if !ok {
86 return nil
87 }
88 id := mti.MessageTypeId()
89 for _, item := range ms.Item {
90 if *item.TypeId == id {
91 return item
92 }
93 }
94 return nil
95}
96
97func (ms *messageSet) Has(pb Message) bool {
98 return ms.find(pb) != nil
99}
100
101func (ms *messageSet) Unmarshal(pb Message) error {
102 if item := ms.find(pb); item != nil {
103 return Unmarshal(item.Message, pb)
104 }
105 if _, ok := pb.(messageTypeIder); !ok {
106 return errNoMessageTypeID
107 }
108 return nil // TODO: return error instead?
109}
110
111func (ms *messageSet) Marshal(pb Message) error {
112 msg, err := Marshal(pb)
113 if err != nil {
114 return err
115 }
116 if item := ms.find(pb); item != nil {
117 // reuse existing item
118 item.Message = msg
119 return nil
120 }
121
122 mti, ok := pb.(messageTypeIder)
123 if !ok {
124 return errNoMessageTypeID
125 }
126
127 mtid := mti.MessageTypeId()
128 ms.Item = append(ms.Item, &_MessageSet_Item{
129 TypeId: &mtid,
130 Message: msg,
131 })
132 return nil
133}
134
135func (ms *messageSet) Reset() { *ms = messageSet{} }
136func (ms *messageSet) String() string { return CompactTextString(ms) }
137func (*messageSet) ProtoMessage() {}
138
139// Support for the message_set_wire_format message option.
140
141func skipVarint(buf []byte) []byte {
142 i := 0
143 for ; buf[i]&0x80 != 0; i++ {
144 }
145 return buf[i+1:]
146}
147
148// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
149// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
150func MarshalMessageSet(exts interface{}) ([]byte, error) {
151 return marshalMessageSet(exts, false)
152}
153
154// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
155func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
156 switch exts := exts.(type) {
157 case *XXX_InternalExtensions:
158 var u marshalInfo
159 siz := u.sizeMessageSet(exts)
160 b := make([]byte, 0, siz)
161 return u.appendMessageSet(b, exts, deterministic)
162
163 case map[int32]Extension:
164 // This is an old-style extension map.
165 // Wrap it in a new-style XXX_InternalExtensions.
166 ie := XXX_InternalExtensions{
167 p: &struct {
168 mu sync.Mutex
169 extensionMap map[int32]Extension
170 }{
171 extensionMap: exts,
172 },
173 }
174
175 var u marshalInfo
176 siz := u.sizeMessageSet(&ie)
177 b := make([]byte, 0, siz)
178 return u.appendMessageSet(b, &ie, deterministic)
179
180 default:
181 return nil, errors.New("proto: not an extension map")
182 }
183}
184
185// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
186// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
187func UnmarshalMessageSet(buf []byte, exts interface{}) error {
188 var m map[int32]Extension
189 switch exts := exts.(type) {
190 case *XXX_InternalExtensions:
191 m = exts.extensionsWrite()
192 case map[int32]Extension:
193 m = exts
194 default:
195 return errors.New("proto: not an extension map")
196 }
197
198 ms := new(messageSet)
199 if err := Unmarshal(buf, ms); err != nil {
200 return err
201 }
202 for _, item := range ms.Item {
203 id := *item.TypeId
204 msg := item.Message
205
206 // Restore wire type and field number varint, plus length varint.
207 // Be careful to preserve duplicate items.
208 b := EncodeVarint(uint64(id)<<3 | WireBytes)
209 if ext, ok := m[id]; ok {
210 // Existing data; rip off the tag and length varint
211 // so we join the new data correctly.
212 // We can assume that ext.enc is set because we are unmarshaling.
213 o := ext.enc[len(b):] // skip wire type and field number
214 _, n := DecodeVarint(o) // calculate length of length varint
215 o = o[n:] // skip length varint
216 msg = append(o, msg...) // join old data and new data
217 }
218 b = append(b, EncodeVarint(uint64(len(msg)))...)
219 b = append(b, msg...)
220
221 m[id] = Extension{enc: b}
222 }
223 return nil
224}
225
226// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
227// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
228func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
229 var m map[int32]Extension
230 switch exts := exts.(type) {
231 case *XXX_InternalExtensions:
232 var mu sync.Locker
233 m, mu = exts.extensionsRead()
234 if m != nil {
235 // Keep the extensions map locked until we're done marshaling to prevent
236 // races between marshaling and unmarshaling the lazily-{en,de}coded
237 // values.
238 mu.Lock()
239 defer mu.Unlock()
240 }
241 case map[int32]Extension:
242 m = exts
243 default:
244 return nil, errors.New("proto: not an extension map")
245 }
246 var b bytes.Buffer
247 b.WriteByte('{')
248
249 // Process the map in key order for deterministic output.
250 ids := make([]int32, 0, len(m))
251 for id := range m {
252 ids = append(ids, id)
253 }
254 sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
255
256 for i, id := range ids {
257 ext := m[id]
258 msd, ok := messageSetMap[id]
259 if !ok {
260 // Unknown type; we can't render it, so skip it.
261 continue
262 }
263
264 if i > 0 && b.Len() > 1 {
265 b.WriteByte(',')
266 }
267
268 fmt.Fprintf(&b, `"[%s]":`, msd.name)
269
270 x := ext.value
271 if x == nil {
272 x = reflect.New(msd.t.Elem()).Interface()
273 if err := Unmarshal(ext.enc, x.(Message)); err != nil {
274 return nil, err
275 }
276 }
277 d, err := json.Marshal(x)
278 if err != nil {
279 return nil, err
280 }
281 b.Write(d)
282 }
283 b.WriteByte('}')
284 return b.Bytes(), nil
285}
286
287// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
288// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
289func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
290 // Common-case fast path.
291 if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
292 return nil
293 }
294
295 // This is fairly tricky, and it's not clear that it is needed.
296 return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
297}
298
299// A global registry of types that can be used in a MessageSet.
300
301var messageSetMap = make(map[int32]messageSetDesc)
302
303type messageSetDesc struct {
304 t reflect.Type // pointer to struct
305 name string
306}
307
308// RegisterMessageSetType is called from the generated code.
309func RegisterMessageSetType(m Message, fieldNum int32, name string) {
310 messageSetMap[fieldNum] = messageSetDesc{
311 t: reflect.TypeOf(m),
312 name: name,
313 }
314}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..b6cad90
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,357 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2012 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32// +build purego appengine js
33
34// This file contains an implementation of proto field accesses using package reflect.
35// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
36// be used on App Engine.
37
38package proto
39
40import (
41 "reflect"
42 "sync"
43)
44
45const unsafeAllowed = false
46
47// A field identifies a field in a struct, accessible from a pointer.
48// In this implementation, a field is identified by the sequence of field indices
49// passed to reflect's FieldByIndex.
50type field []int
51
52// toField returns a field equivalent to the given reflect field.
53func toField(f *reflect.StructField) field {
54 return f.Index
55}
56
57// invalidField is an invalid field identifier.
58var invalidField = field(nil)
59
60// zeroField is a noop when calling pointer.offset.
61var zeroField = field([]int{})
62
63// IsValid reports whether the field identifier is valid.
64func (f field) IsValid() bool { return f != nil }
65
66// The pointer type is for the table-driven decoder.
67// The implementation here uses a reflect.Value of pointer type to
68// create a generic pointer. In pointer_unsafe.go we use unsafe
69// instead of reflect to implement the same (but faster) interface.
70type pointer struct {
71 v reflect.Value
72}
73
74// toPointer converts an interface of pointer type to a pointer
75// that points to the same target.
76func toPointer(i *Message) pointer {
77 return pointer{v: reflect.ValueOf(*i)}
78}
79
80// toAddrPointer converts an interface to a pointer that points to
81// the interface data.
82func toAddrPointer(i *interface{}, isptr bool) pointer {
83 v := reflect.ValueOf(*i)
84 u := reflect.New(v.Type())
85 u.Elem().Set(v)
86 return pointer{v: u}
87}
88
89// valToPointer converts v to a pointer. v must be of pointer type.
90func valToPointer(v reflect.Value) pointer {
91 return pointer{v: v}
92}
93
94// offset converts from a pointer to a structure to a pointer to
95// one of its fields.
96func (p pointer) offset(f field) pointer {
97 return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
98}
99
100func (p pointer) isNil() bool {
101 return p.v.IsNil()
102}
103
104// grow updates the slice s in place to make it one element longer.
105// s must be addressable.
106// Returns the (addressable) new element.
107func grow(s reflect.Value) reflect.Value {
108 n, m := s.Len(), s.Cap()
109 if n < m {
110 s.SetLen(n + 1)
111 } else {
112 s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
113 }
114 return s.Index(n)
115}
116
117func (p pointer) toInt64() *int64 {
118 return p.v.Interface().(*int64)
119}
120func (p pointer) toInt64Ptr() **int64 {
121 return p.v.Interface().(**int64)
122}
123func (p pointer) toInt64Slice() *[]int64 {
124 return p.v.Interface().(*[]int64)
125}
126
127var int32ptr = reflect.TypeOf((*int32)(nil))
128
129func (p pointer) toInt32() *int32 {
130 return p.v.Convert(int32ptr).Interface().(*int32)
131}
132
133// The toInt32Ptr/Slice methods don't work because of enums.
134// Instead, we must use set/get methods for the int32ptr/slice case.
135/*
136 func (p pointer) toInt32Ptr() **int32 {
137 return p.v.Interface().(**int32)
138}
139 func (p pointer) toInt32Slice() *[]int32 {
140 return p.v.Interface().(*[]int32)
141}
142*/
143func (p pointer) getInt32Ptr() *int32 {
144 if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
145 // raw int32 type
146 return p.v.Elem().Interface().(*int32)
147 }
148 // an enum
149 return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
150}
151func (p pointer) setInt32Ptr(v int32) {
152 // Allocate value in a *int32. Possibly convert that to a *enum.
153 // Then assign it to a **int32 or **enum.
154 // Note: we can convert *int32 to *enum, but we can't convert
155 // **int32 to **enum!
156 p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
157}
158
159// getInt32Slice copies []int32 from p as a new slice.
160// This behavior differs from the implementation in pointer_unsafe.go.
161func (p pointer) getInt32Slice() []int32 {
162 if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
163 // raw int32 type
164 return p.v.Elem().Interface().([]int32)
165 }
166 // an enum
167 // Allocate a []int32, then assign []enum's values into it.
168 // Note: we can't convert []enum to []int32.
169 slice := p.v.Elem()
170 s := make([]int32, slice.Len())
171 for i := 0; i < slice.Len(); i++ {
172 s[i] = int32(slice.Index(i).Int())
173 }
174 return s
175}
176
177// setInt32Slice copies []int32 into p as a new slice.
178// This behavior differs from the implementation in pointer_unsafe.go.
179func (p pointer) setInt32Slice(v []int32) {
180 if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
181 // raw int32 type
182 p.v.Elem().Set(reflect.ValueOf(v))
183 return
184 }
185 // an enum
186 // Allocate a []enum, then assign []int32's values into it.
187 // Note: we can't convert []enum to []int32.
188 slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
189 for i, x := range v {
190 slice.Index(i).SetInt(int64(x))
191 }
192 p.v.Elem().Set(slice)
193}
194func (p pointer) appendInt32Slice(v int32) {
195 grow(p.v.Elem()).SetInt(int64(v))
196}
197
198func (p pointer) toUint64() *uint64 {
199 return p.v.Interface().(*uint64)
200}
201func (p pointer) toUint64Ptr() **uint64 {
202 return p.v.Interface().(**uint64)
203}
204func (p pointer) toUint64Slice() *[]uint64 {
205 return p.v.Interface().(*[]uint64)
206}
207func (p pointer) toUint32() *uint32 {
208 return p.v.Interface().(*uint32)
209}
210func (p pointer) toUint32Ptr() **uint32 {
211 return p.v.Interface().(**uint32)
212}
213func (p pointer) toUint32Slice() *[]uint32 {
214 return p.v.Interface().(*[]uint32)
215}
216func (p pointer) toBool() *bool {
217 return p.v.Interface().(*bool)
218}
219func (p pointer) toBoolPtr() **bool {
220 return p.v.Interface().(**bool)
221}
222func (p pointer) toBoolSlice() *[]bool {
223 return p.v.Interface().(*[]bool)
224}
225func (p pointer) toFloat64() *float64 {
226 return p.v.Interface().(*float64)
227}
228func (p pointer) toFloat64Ptr() **float64 {
229 return p.v.Interface().(**float64)
230}
231func (p pointer) toFloat64Slice() *[]float64 {
232 return p.v.Interface().(*[]float64)
233}
234func (p pointer) toFloat32() *float32 {
235 return p.v.Interface().(*float32)
236}
237func (p pointer) toFloat32Ptr() **float32 {
238 return p.v.Interface().(**float32)
239}
240func (p pointer) toFloat32Slice() *[]float32 {
241 return p.v.Interface().(*[]float32)
242}
243func (p pointer) toString() *string {
244 return p.v.Interface().(*string)
245}
246func (p pointer) toStringPtr() **string {
247 return p.v.Interface().(**string)
248}
249func (p pointer) toStringSlice() *[]string {
250 return p.v.Interface().(*[]string)
251}
252func (p pointer) toBytes() *[]byte {
253 return p.v.Interface().(*[]byte)
254}
255func (p pointer) toBytesSlice() *[][]byte {
256 return p.v.Interface().(*[][]byte)
257}
258func (p pointer) toExtensions() *XXX_InternalExtensions {
259 return p.v.Interface().(*XXX_InternalExtensions)
260}
261func (p pointer) toOldExtensions() *map[int32]Extension {
262 return p.v.Interface().(*map[int32]Extension)
263}
264func (p pointer) getPointer() pointer {
265 return pointer{v: p.v.Elem()}
266}
267func (p pointer) setPointer(q pointer) {
268 p.v.Elem().Set(q.v)
269}
270func (p pointer) appendPointer(q pointer) {
271 grow(p.v.Elem()).Set(q.v)
272}
273
274// getPointerSlice copies []*T from p as a new []pointer.
275// This behavior differs from the implementation in pointer_unsafe.go.
276func (p pointer) getPointerSlice() []pointer {
277 if p.v.IsNil() {
278 return nil
279 }
280 n := p.v.Elem().Len()
281 s := make([]pointer, n)
282 for i := 0; i < n; i++ {
283 s[i] = pointer{v: p.v.Elem().Index(i)}
284 }
285 return s
286}
287
288// setPointerSlice copies []pointer into p as a new []*T.
289// This behavior differs from the implementation in pointer_unsafe.go.
290func (p pointer) setPointerSlice(v []pointer) {
291 if v == nil {
292 p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
293 return
294 }
295 s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
296 for _, p := range v {
297 s = reflect.Append(s, p.v)
298 }
299 p.v.Elem().Set(s)
300}
301
302// getInterfacePointer returns a pointer that points to the
303// interface data of the interface pointed by p.
304func (p pointer) getInterfacePointer() pointer {
305 if p.v.Elem().IsNil() {
306 return pointer{v: p.v.Elem()}
307 }
308 return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
309}
310
311func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
312 // TODO: check that p.v.Type().Elem() == t?
313 return p.v
314}
315
316func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
317 atomicLock.Lock()
318 defer atomicLock.Unlock()
319 return *p
320}
321func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
322 atomicLock.Lock()
323 defer atomicLock.Unlock()
324 *p = v
325}
326func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
327 atomicLock.Lock()
328 defer atomicLock.Unlock()
329 return *p
330}
331func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
332 atomicLock.Lock()
333 defer atomicLock.Unlock()
334 *p = v
335}
336func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
337 atomicLock.Lock()
338 defer atomicLock.Unlock()
339 return *p
340}
341func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
342 atomicLock.Lock()
343 defer atomicLock.Unlock()
344 *p = v
345}
346func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
347 atomicLock.Lock()
348 defer atomicLock.Unlock()
349 return *p
350}
351func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
352 atomicLock.Lock()
353 defer atomicLock.Unlock()
354 *p = v
355}
356
357var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..d55a335
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,308 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2012 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32// +build !purego,!appengine,!js
33
34// This file contains the implementation of the proto field accesses using package unsafe.
35
36package proto
37
38import (
39 "reflect"
40 "sync/atomic"
41 "unsafe"
42)
43
44const unsafeAllowed = true
45
46// A field identifies a field in a struct, accessible from a pointer.
47// In this implementation, a field is identified by its byte offset from the start of the struct.
48type field uintptr
49
50// toField returns a field equivalent to the given reflect field.
51func toField(f *reflect.StructField) field {
52 return field(f.Offset)
53}
54
55// invalidField is an invalid field identifier.
56const invalidField = ^field(0)
57
58// zeroField is a noop when calling pointer.offset.
59const zeroField = field(0)
60
61// IsValid reports whether the field identifier is valid.
62func (f field) IsValid() bool {
63 return f != invalidField
64}
65
66// The pointer type below is for the new table-driven encoder/decoder.
67// The implementation here uses unsafe.Pointer to create a generic pointer.
68// In pointer_reflect.go we use reflect instead of unsafe to implement
69// the same (but slower) interface.
70type pointer struct {
71 p unsafe.Pointer
72}
73
74// size of pointer
75var ptrSize = unsafe.Sizeof(uintptr(0))
76
77// toPointer converts an interface of pointer type to a pointer
78// that points to the same target.
79func toPointer(i *Message) pointer {
80 // Super-tricky - read pointer out of data word of interface value.
81 // Saves ~25ns over the equivalent:
82 // return valToPointer(reflect.ValueOf(*i))
83 return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
84}
85
86// toAddrPointer converts an interface to a pointer that points to
87// the interface data.
88func toAddrPointer(i *interface{}, isptr bool) pointer {
89 // Super-tricky - read or get the address of data word of interface value.
90 if isptr {
91 // The interface is of pointer type, thus it is a direct interface.
92 // The data word is the pointer data itself. We take its address.
93 return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
94 }
95 // The interface is not of pointer type. The data word is the pointer
96 // to the data.
97 return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
98}
99
100// valToPointer converts v to a pointer. v must be of pointer type.
101func valToPointer(v reflect.Value) pointer {
102 return pointer{p: unsafe.Pointer(v.Pointer())}
103}
104
105// offset converts from a pointer to a structure to a pointer to
106// one of its fields.
107func (p pointer) offset(f field) pointer {
108 // For safety, we should panic if !f.IsValid, however calling panic causes
109 // this to no longer be inlineable, which is a serious performance cost.
110 /*
111 if !f.IsValid() {
112 panic("invalid field")
113 }
114 */
115 return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
116}
117
118func (p pointer) isNil() bool {
119 return p.p == nil
120}
121
122func (p pointer) toInt64() *int64 {
123 return (*int64)(p.p)
124}
125func (p pointer) toInt64Ptr() **int64 {
126 return (**int64)(p.p)
127}
128func (p pointer) toInt64Slice() *[]int64 {
129 return (*[]int64)(p.p)
130}
131func (p pointer) toInt32() *int32 {
132 return (*int32)(p.p)
133}
134
135// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
136/*
137 func (p pointer) toInt32Ptr() **int32 {
138 return (**int32)(p.p)
139 }
140 func (p pointer) toInt32Slice() *[]int32 {
141 return (*[]int32)(p.p)
142 }
143*/
144func (p pointer) getInt32Ptr() *int32 {
145 return *(**int32)(p.p)
146}
147func (p pointer) setInt32Ptr(v int32) {
148 *(**int32)(p.p) = &v
149}
150
151// getInt32Slice loads a []int32 from p.
152// The value returned is aliased with the original slice.
153// This behavior differs from the implementation in pointer_reflect.go.
154func (p pointer) getInt32Slice() []int32 {
155 return *(*[]int32)(p.p)
156}
157
158// setInt32Slice stores a []int32 to p.
159// The value set is aliased with the input slice.
160// This behavior differs from the implementation in pointer_reflect.go.
161func (p pointer) setInt32Slice(v []int32) {
162 *(*[]int32)(p.p) = v
163}
164
165// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
166func (p pointer) appendInt32Slice(v int32) {
167 s := (*[]int32)(p.p)
168 *s = append(*s, v)
169}
170
171func (p pointer) toUint64() *uint64 {
172 return (*uint64)(p.p)
173}
174func (p pointer) toUint64Ptr() **uint64 {
175 return (**uint64)(p.p)
176}
177func (p pointer) toUint64Slice() *[]uint64 {
178 return (*[]uint64)(p.p)
179}
180func (p pointer) toUint32() *uint32 {
181 return (*uint32)(p.p)
182}
183func (p pointer) toUint32Ptr() **uint32 {
184 return (**uint32)(p.p)
185}
186func (p pointer) toUint32Slice() *[]uint32 {
187 return (*[]uint32)(p.p)
188}
189func (p pointer) toBool() *bool {
190 return (*bool)(p.p)
191}
192func (p pointer) toBoolPtr() **bool {
193 return (**bool)(p.p)
194}
195func (p pointer) toBoolSlice() *[]bool {
196 return (*[]bool)(p.p)
197}
198func (p pointer) toFloat64() *float64 {
199 return (*float64)(p.p)
200}
201func (p pointer) toFloat64Ptr() **float64 {
202 return (**float64)(p.p)
203}
204func (p pointer) toFloat64Slice() *[]float64 {
205 return (*[]float64)(p.p)
206}
207func (p pointer) toFloat32() *float32 {
208 return (*float32)(p.p)
209}
210func (p pointer) toFloat32Ptr() **float32 {
211 return (**float32)(p.p)
212}
213func (p pointer) toFloat32Slice() *[]float32 {
214 return (*[]float32)(p.p)
215}
216func (p pointer) toString() *string {
217 return (*string)(p.p)
218}
219func (p pointer) toStringPtr() **string {
220 return (**string)(p.p)
221}
222func (p pointer) toStringSlice() *[]string {
223 return (*[]string)(p.p)
224}
225func (p pointer) toBytes() *[]byte {
226 return (*[]byte)(p.p)
227}
228func (p pointer) toBytesSlice() *[][]byte {
229 return (*[][]byte)(p.p)
230}
231func (p pointer) toExtensions() *XXX_InternalExtensions {
232 return (*XXX_InternalExtensions)(p.p)
233}
234func (p pointer) toOldExtensions() *map[int32]Extension {
235 return (*map[int32]Extension)(p.p)
236}
237
238// getPointerSlice loads []*T from p as a []pointer.
239// The value returned is aliased with the original slice.
240// This behavior differs from the implementation in pointer_reflect.go.
241func (p pointer) getPointerSlice() []pointer {
242 // Super-tricky - p should point to a []*T where T is a
243 // message type. We load it as []pointer.
244 return *(*[]pointer)(p.p)
245}
246
247// setPointerSlice stores []pointer into p as a []*T.
248// The value set is aliased with the input slice.
249// This behavior differs from the implementation in pointer_reflect.go.
250func (p pointer) setPointerSlice(v []pointer) {
251 // Super-tricky - p should point to a []*T where T is a
252 // message type. We store it as []pointer.
253 *(*[]pointer)(p.p) = v
254}
255
256// getPointer loads the pointer at p and returns it.
257func (p pointer) getPointer() pointer {
258 return pointer{p: *(*unsafe.Pointer)(p.p)}
259}
260
261// setPointer stores the pointer q at p.
262func (p pointer) setPointer(q pointer) {
263 *(*unsafe.Pointer)(p.p) = q.p
264}
265
266// append q to the slice pointed to by p.
267func (p pointer) appendPointer(q pointer) {
268 s := (*[]unsafe.Pointer)(p.p)
269 *s = append(*s, q.p)
270}
271
272// getInterfacePointer returns a pointer that points to the
273// interface data of the interface pointed by p.
274func (p pointer) getInterfacePointer() pointer {
275 // Super-tricky - read pointer out of data word of interface value.
276 return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
277}
278
279// asPointerTo returns a reflect.Value that is a pointer to an
280// object of type t stored at p.
281func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
282 return reflect.NewAt(t, p.p)
283}
284
285func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
286 return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
287}
288func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
289 atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
290}
291func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
292 return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
293}
294func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
295 atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
296}
297func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
298 return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
299}
300func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
301 atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
302}
303func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
304 return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
305}
306func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
307 atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
308}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..50b99b8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,544 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2010 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package proto
33
34/*
35 * Routines for encoding data into the wire format for protocol buffers.
36 */
37
38import (
39 "fmt"
40 "log"
41 "os"
42 "reflect"
43 "sort"
44 "strconv"
45 "strings"
46 "sync"
47)
48
49const debug bool = false
50
51// Constants that identify the encoding of a value on the wire.
52const (
53 WireVarint = 0
54 WireFixed64 = 1
55 WireBytes = 2
56 WireStartGroup = 3
57 WireEndGroup = 4
58 WireFixed32 = 5
59)
60
61// tagMap is an optimization over map[int]int for typical protocol buffer
62// use-cases. Encoded protocol buffers are often in tag order with small tag
63// numbers.
64type tagMap struct {
65 fastTags []int
66 slowTags map[int]int
67}
68
69// tagMapFastLimit is the upper bound on the tag number that will be stored in
70// the tagMap slice rather than its map.
71const tagMapFastLimit = 1024
72
73func (p *tagMap) get(t int) (int, bool) {
74 if t > 0 && t < tagMapFastLimit {
75 if t >= len(p.fastTags) {
76 return 0, false
77 }
78 fi := p.fastTags[t]
79 return fi, fi >= 0
80 }
81 fi, ok := p.slowTags[t]
82 return fi, ok
83}
84
85func (p *tagMap) put(t int, fi int) {
86 if t > 0 && t < tagMapFastLimit {
87 for len(p.fastTags) < t+1 {
88 p.fastTags = append(p.fastTags, -1)
89 }
90 p.fastTags[t] = fi
91 return
92 }
93 if p.slowTags == nil {
94 p.slowTags = make(map[int]int)
95 }
96 p.slowTags[t] = fi
97}
98
99// StructProperties represents properties for all the fields of a struct.
100// decoderTags and decoderOrigNames should only be used by the decoder.
101type StructProperties struct {
102 Prop []*Properties // properties for each field
103 reqCount int // required count
104 decoderTags tagMap // map from proto tag to struct field number
105 decoderOrigNames map[string]int // map from original name to struct field number
106 order []int // list of struct field numbers in tag order
107
108 // OneofTypes contains information about the oneof fields in this message.
109 // It is keyed by the original name of a field.
110 OneofTypes map[string]*OneofProperties
111}
112
113// OneofProperties represents information about a specific field in a oneof.
114type OneofProperties struct {
115 Type reflect.Type // pointer to generated struct type for this oneof field
116 Field int // struct field number of the containing oneof in the message
117 Prop *Properties
118}
119
120// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
121// See encode.go, (*Buffer).enc_struct.
122
123func (sp *StructProperties) Len() int { return len(sp.order) }
124func (sp *StructProperties) Less(i, j int) bool {
125 return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
126}
127func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
128
129// Properties represents the protocol-specific behavior of a single struct field.
130type Properties struct {
131 Name string // name of the field, for error messages
132 OrigName string // original name before protocol compiler (always set)
133 JSONName string // name to use for JSON; determined by protoc
134 Wire string
135 WireType int
136 Tag int
137 Required bool
138 Optional bool
139 Repeated bool
140 Packed bool // relevant for repeated primitives only
141 Enum string // set for enum types only
142 proto3 bool // whether this is known to be a proto3 field
143 oneof bool // whether this is a oneof field
144
145 Default string // default value
146 HasDefault bool // whether an explicit default was provided
147
148 stype reflect.Type // set for struct types only
149 sprop *StructProperties // set for struct types only
150
151 mtype reflect.Type // set for map types only
152 MapKeyProp *Properties // set for map types only
153 MapValProp *Properties // set for map types only
154}
155
156// String formats the properties in the protobuf struct field tag style.
157func (p *Properties) String() string {
158 s := p.Wire
159 s += ","
160 s += strconv.Itoa(p.Tag)
161 if p.Required {
162 s += ",req"
163 }
164 if p.Optional {
165 s += ",opt"
166 }
167 if p.Repeated {
168 s += ",rep"
169 }
170 if p.Packed {
171 s += ",packed"
172 }
173 s += ",name=" + p.OrigName
174 if p.JSONName != p.OrigName {
175 s += ",json=" + p.JSONName
176 }
177 if p.proto3 {
178 s += ",proto3"
179 }
180 if p.oneof {
181 s += ",oneof"
182 }
183 if len(p.Enum) > 0 {
184 s += ",enum=" + p.Enum
185 }
186 if p.HasDefault {
187 s += ",def=" + p.Default
188 }
189 return s
190}
191
192// Parse populates p by parsing a string in the protobuf struct field tag style.
193func (p *Properties) Parse(s string) {
194 // "bytes,49,opt,name=foo,def=hello!"
195 fields := strings.Split(s, ",") // breaks def=, but handled below.
196 if len(fields) < 2 {
197 fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
198 return
199 }
200
201 p.Wire = fields[0]
202 switch p.Wire {
203 case "varint":
204 p.WireType = WireVarint
205 case "fixed32":
206 p.WireType = WireFixed32
207 case "fixed64":
208 p.WireType = WireFixed64
209 case "zigzag32":
210 p.WireType = WireVarint
211 case "zigzag64":
212 p.WireType = WireVarint
213 case "bytes", "group":
214 p.WireType = WireBytes
215 // no numeric converter for non-numeric types
216 default:
217 fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
218 return
219 }
220
221 var err error
222 p.Tag, err = strconv.Atoi(fields[1])
223 if err != nil {
224 return
225 }
226
227outer:
228 for i := 2; i < len(fields); i++ {
229 f := fields[i]
230 switch {
231 case f == "req":
232 p.Required = true
233 case f == "opt":
234 p.Optional = true
235 case f == "rep":
236 p.Repeated = true
237 case f == "packed":
238 p.Packed = true
239 case strings.HasPrefix(f, "name="):
240 p.OrigName = f[5:]
241 case strings.HasPrefix(f, "json="):
242 p.JSONName = f[5:]
243 case strings.HasPrefix(f, "enum="):
244 p.Enum = f[5:]
245 case f == "proto3":
246 p.proto3 = true
247 case f == "oneof":
248 p.oneof = true
249 case strings.HasPrefix(f, "def="):
250 p.HasDefault = true
251 p.Default = f[4:] // rest of string
252 if i+1 < len(fields) {
253 // Commas aren't escaped, and def is always last.
254 p.Default += "," + strings.Join(fields[i+1:], ",")
255 break outer
256 }
257 }
258 }
259}
260
261var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
262
263// setFieldProps initializes the field properties for submessages and maps.
264func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
265 switch t1 := typ; t1.Kind() {
266 case reflect.Ptr:
267 if t1.Elem().Kind() == reflect.Struct {
268 p.stype = t1.Elem()
269 }
270
271 case reflect.Slice:
272 if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
273 p.stype = t2.Elem()
274 }
275
276 case reflect.Map:
277 p.mtype = t1
278 p.MapKeyProp = &Properties{}
279 p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
280 p.MapValProp = &Properties{}
281 vtype := p.mtype.Elem()
282 if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
283 // The value type is not a message (*T) or bytes ([]byte),
284 // so we need encoders for the pointer to this type.
285 vtype = reflect.PtrTo(vtype)
286 }
287 p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
288 }
289
290 if p.stype != nil {
291 if lockGetProp {
292 p.sprop = GetProperties(p.stype)
293 } else {
294 p.sprop = getPropertiesLocked(p.stype)
295 }
296 }
297}
298
299var (
300 marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
301)
302
303// Init populates the properties from a protocol buffer struct tag.
304func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
305 p.init(typ, name, tag, f, true)
306}
307
308func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
309 // "bytes,49,opt,def=hello!"
310 p.Name = name
311 p.OrigName = name
312 if tag == "" {
313 return
314 }
315 p.Parse(tag)
316 p.setFieldProps(typ, f, lockGetProp)
317}
318
319var (
320 propertiesMu sync.RWMutex
321 propertiesMap = make(map[reflect.Type]*StructProperties)
322)
323
324// GetProperties returns the list of properties for the type represented by t.
325// t must represent a generated struct type of a protocol message.
326func GetProperties(t reflect.Type) *StructProperties {
327 if t.Kind() != reflect.Struct {
328 panic("proto: type must have kind struct")
329 }
330
331 // Most calls to GetProperties in a long-running program will be
332 // retrieving details for types we have seen before.
333 propertiesMu.RLock()
334 sprop, ok := propertiesMap[t]
335 propertiesMu.RUnlock()
336 if ok {
337 if collectStats {
338 stats.Chit++
339 }
340 return sprop
341 }
342
343 propertiesMu.Lock()
344 sprop = getPropertiesLocked(t)
345 propertiesMu.Unlock()
346 return sprop
347}
348
349// getPropertiesLocked requires that propertiesMu is held.
350func getPropertiesLocked(t reflect.Type) *StructProperties {
351 if prop, ok := propertiesMap[t]; ok {
352 if collectStats {
353 stats.Chit++
354 }
355 return prop
356 }
357 if collectStats {
358 stats.Cmiss++
359 }
360
361 prop := new(StructProperties)
362 // in case of recursive protos, fill this in now.
363 propertiesMap[t] = prop
364
365 // build properties
366 prop.Prop = make([]*Properties, t.NumField())
367 prop.order = make([]int, t.NumField())
368
369 for i := 0; i < t.NumField(); i++ {
370 f := t.Field(i)
371 p := new(Properties)
372 name := f.Name
373 p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
374
375 oneof := f.Tag.Get("protobuf_oneof") // special case
376 if oneof != "" {
377 // Oneof fields don't use the traditional protobuf tag.
378 p.OrigName = oneof
379 }
380 prop.Prop[i] = p
381 prop.order[i] = i
382 if debug {
383 print(i, " ", f.Name, " ", t.String(), " ")
384 if p.Tag > 0 {
385 print(p.String())
386 }
387 print("\n")
388 }
389 }
390
391 // Re-order prop.order.
392 sort.Sort(prop)
393
394 type oneofMessage interface {
395 XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
396 }
397 if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
398 var oots []interface{}
399 _, _, _, oots = om.XXX_OneofFuncs()
400
401 // Interpret oneof metadata.
402 prop.OneofTypes = make(map[string]*OneofProperties)
403 for _, oot := range oots {
404 oop := &OneofProperties{
405 Type: reflect.ValueOf(oot).Type(), // *T
406 Prop: new(Properties),
407 }
408 sft := oop.Type.Elem().Field(0)
409 oop.Prop.Name = sft.Name
410 oop.Prop.Parse(sft.Tag.Get("protobuf"))
411 // There will be exactly one interface field that
412 // this new value is assignable to.
413 for i := 0; i < t.NumField(); i++ {
414 f := t.Field(i)
415 if f.Type.Kind() != reflect.Interface {
416 continue
417 }
418 if !oop.Type.AssignableTo(f.Type) {
419 continue
420 }
421 oop.Field = i
422 break
423 }
424 prop.OneofTypes[oop.Prop.OrigName] = oop
425 }
426 }
427
428 // build required counts
429 // build tags
430 reqCount := 0
431 prop.decoderOrigNames = make(map[string]int)
432 for i, p := range prop.Prop {
433 if strings.HasPrefix(p.Name, "XXX_") {
434 // Internal fields should not appear in tags/origNames maps.
435 // They are handled specially when encoding and decoding.
436 continue
437 }
438 if p.Required {
439 reqCount++
440 }
441 prop.decoderTags.put(p.Tag, i)
442 prop.decoderOrigNames[p.OrigName] = i
443 }
444 prop.reqCount = reqCount
445
446 return prop
447}
448
449// A global registry of enum types.
450// The generated code will register the generated maps by calling RegisterEnum.
451
452var enumValueMaps = make(map[string]map[string]int32)
453
454// RegisterEnum is called from the generated code to install the enum descriptor
455// maps into the global table to aid parsing text format protocol buffers.
456func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
457 if _, ok := enumValueMaps[typeName]; ok {
458 panic("proto: duplicate enum registered: " + typeName)
459 }
460 enumValueMaps[typeName] = valueMap
461}
462
463// EnumValueMap returns the mapping from names to integers of the
464// enum type enumType, or a nil if not found.
465func EnumValueMap(enumType string) map[string]int32 {
466 return enumValueMaps[enumType]
467}
468
469// A registry of all linked message types.
470// The string is a fully-qualified proto name ("pkg.Message").
471var (
472 protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
473 protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
474 revProtoTypes = make(map[reflect.Type]string)
475)
476
477// RegisterType is called from generated code and maps from the fully qualified
478// proto name to the type (pointer to struct) of the protocol buffer.
479func RegisterType(x Message, name string) {
480 if _, ok := protoTypedNils[name]; ok {
481 // TODO: Some day, make this a panic.
482 log.Printf("proto: duplicate proto type registered: %s", name)
483 return
484 }
485 t := reflect.TypeOf(x)
486 if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
487 // Generated code always calls RegisterType with nil x.
488 // This check is just for extra safety.
489 protoTypedNils[name] = x
490 } else {
491 protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
492 }
493 revProtoTypes[t] = name
494}
495
496// RegisterMapType is called from generated code and maps from the fully qualified
497// proto name to the native map type of the proto map definition.
498func RegisterMapType(x interface{}, name string) {
499 if reflect.TypeOf(x).Kind() != reflect.Map {
500 panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
501 }
502 if _, ok := protoMapTypes[name]; ok {
503 log.Printf("proto: duplicate proto type registered: %s", name)
504 return
505 }
506 t := reflect.TypeOf(x)
507 protoMapTypes[name] = t
508 revProtoTypes[t] = name
509}
510
511// MessageName returns the fully-qualified proto name for the given message type.
512func MessageName(x Message) string {
513 type xname interface {
514 XXX_MessageName() string
515 }
516 if m, ok := x.(xname); ok {
517 return m.XXX_MessageName()
518 }
519 return revProtoTypes[reflect.TypeOf(x)]
520}
521
522// MessageType returns the message type (pointer to struct) for a named message.
523// The type is not guaranteed to implement proto.Message if the name refers to a
524// map entry.
525func MessageType(name string) reflect.Type {
526 if t, ok := protoTypedNils[name]; ok {
527 return reflect.TypeOf(t)
528 }
529 return protoMapTypes[name]
530}
531
532// A registry of all linked proto files.
533var (
534 protoFiles = make(map[string][]byte) // file name => fileDescriptor
535)
536
537// RegisterFile is called from generated code and maps from the
538// full file name of a .proto file to its compressed FileDescriptorProto.
539func RegisterFile(filename string, fileDescriptor []byte) {
540 protoFiles[filename] = fileDescriptor
541}
542
543// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
544func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..b167944
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2767 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2016 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package proto
33
34import (
35 "errors"
36 "fmt"
37 "math"
38 "reflect"
39 "sort"
40 "strconv"
41 "strings"
42 "sync"
43 "sync/atomic"
44 "unicode/utf8"
45)
46
47// a sizer takes a pointer to a field and the size of its tag, computes the size of
48// the encoded data.
49type sizer func(pointer, int) int
50
51// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
52// marshals the field to the end of the slice, returns the slice and error (if any).
53type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
54
55// marshalInfo is the information used for marshaling a message.
56type marshalInfo struct {
57 typ reflect.Type
58 fields []*marshalFieldInfo
59 unrecognized field // offset of XXX_unrecognized
60 extensions field // offset of XXX_InternalExtensions
61 v1extensions field // offset of XXX_extensions
62 sizecache field // offset of XXX_sizecache
63 initialized int32 // 0 -- only typ is set, 1 -- fully initialized
64 messageset bool // uses message set wire format
65 hasmarshaler bool // has custom marshaler
66 sync.RWMutex // protect extElems map, also for initialization
67 extElems map[int32]*marshalElemInfo // info of extension elements
68}
69
70// marshalFieldInfo is the information used for marshaling a field of a message.
71type marshalFieldInfo struct {
72 field field
73 wiretag uint64 // tag in wire format
74 tagsize int // size of tag in wire format
75 sizer sizer
76 marshaler marshaler
77 isPointer bool
78 required bool // field is required
79 name string // name of the field, for error reporting
80 oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
81}
82
83// marshalElemInfo is the information used for marshaling an extension or oneof element.
84type marshalElemInfo struct {
85 wiretag uint64 // tag in wire format
86 tagsize int // size of tag in wire format
87 sizer sizer
88 marshaler marshaler
89 isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
90}
91
92var (
93 marshalInfoMap = map[reflect.Type]*marshalInfo{}
94 marshalInfoLock sync.Mutex
95)
96
97// getMarshalInfo returns the information to marshal a given type of message.
98// The info it returns may not necessarily initialized.
99// t is the type of the message (NOT the pointer to it).
100func getMarshalInfo(t reflect.Type) *marshalInfo {
101 marshalInfoLock.Lock()
102 u, ok := marshalInfoMap[t]
103 if !ok {
104 u = &marshalInfo{typ: t}
105 marshalInfoMap[t] = u
106 }
107 marshalInfoLock.Unlock()
108 return u
109}
110
111// Size is the entry point from generated code,
112// and should be ONLY called by generated code.
113// It computes the size of encoded data of msg.
114// a is a pointer to a place to store cached marshal info.
115func (a *InternalMessageInfo) Size(msg Message) int {
116 u := getMessageMarshalInfo(msg, a)
117 ptr := toPointer(&msg)
118 if ptr.isNil() {
119 // We get here if msg is a typed nil ((*SomeMessage)(nil)),
120 // so it satisfies the interface, and msg == nil wouldn't
121 // catch it. We don't want crash in this case.
122 return 0
123 }
124 return u.size(ptr)
125}
126
127// Marshal is the entry point from generated code,
128// and should be ONLY called by generated code.
129// It marshals msg to the end of b.
130// a is a pointer to a place to store cached marshal info.
131func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
132 u := getMessageMarshalInfo(msg, a)
133 ptr := toPointer(&msg)
134 if ptr.isNil() {
135 // We get here if msg is a typed nil ((*SomeMessage)(nil)),
136 // so it satisfies the interface, and msg == nil wouldn't
137 // catch it. We don't want crash in this case.
138 return b, ErrNil
139 }
140 return u.marshal(b, ptr, deterministic)
141}
142
143func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
144 // u := a.marshal, but atomically.
145 // We use an atomic here to ensure memory consistency.
146 u := atomicLoadMarshalInfo(&a.marshal)
147 if u == nil {
148 // Get marshal information from type of message.
149 t := reflect.ValueOf(msg).Type()
150 if t.Kind() != reflect.Ptr {
151 panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
152 }
153 u = getMarshalInfo(t.Elem())
154 // Store it in the cache for later users.
155 // a.marshal = u, but atomically.
156 atomicStoreMarshalInfo(&a.marshal, u)
157 }
158 return u
159}
160
161// size is the main function to compute the size of the encoded data of a message.
162// ptr is the pointer to the message.
163func (u *marshalInfo) size(ptr pointer) int {
164 if atomic.LoadInt32(&u.initialized) == 0 {
165 u.computeMarshalInfo()
166 }
167
168 // If the message can marshal itself, let it do it, for compatibility.
169 // NOTE: This is not efficient.
170 if u.hasmarshaler {
171 m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
172 b, _ := m.Marshal()
173 return len(b)
174 }
175
176 n := 0
177 for _, f := range u.fields {
178 if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
179 // nil pointer always marshals to nothing
180 continue
181 }
182 n += f.sizer(ptr.offset(f.field), f.tagsize)
183 }
184 if u.extensions.IsValid() {
185 e := ptr.offset(u.extensions).toExtensions()
186 if u.messageset {
187 n += u.sizeMessageSet(e)
188 } else {
189 n += u.sizeExtensions(e)
190 }
191 }
192 if u.v1extensions.IsValid() {
193 m := *ptr.offset(u.v1extensions).toOldExtensions()
194 n += u.sizeV1Extensions(m)
195 }
196 if u.unrecognized.IsValid() {
197 s := *ptr.offset(u.unrecognized).toBytes()
198 n += len(s)
199 }
200 // cache the result for use in marshal
201 if u.sizecache.IsValid() {
202 atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
203 }
204 return n
205}
206
207// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
208// fall back to compute the size.
209func (u *marshalInfo) cachedsize(ptr pointer) int {
210 if u.sizecache.IsValid() {
211 return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
212 }
213 return u.size(ptr)
214}
215
216// marshal is the main function to marshal a message. It takes a byte slice and appends
217// the encoded data to the end of the slice, returns the slice and error (if any).
218// ptr is the pointer to the message.
219// If deterministic is true, map is marshaled in deterministic order.
220func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
221 if atomic.LoadInt32(&u.initialized) == 0 {
222 u.computeMarshalInfo()
223 }
224
225 // If the message can marshal itself, let it do it, for compatibility.
226 // NOTE: This is not efficient.
227 if u.hasmarshaler {
228 m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
229 b1, err := m.Marshal()
230 b = append(b, b1...)
231 return b, err
232 }
233
234 var err, errLater error
235 // The old marshaler encodes extensions at beginning.
236 if u.extensions.IsValid() {
237 e := ptr.offset(u.extensions).toExtensions()
238 if u.messageset {
239 b, err = u.appendMessageSet(b, e, deterministic)
240 } else {
241 b, err = u.appendExtensions(b, e, deterministic)
242 }
243 if err != nil {
244 return b, err
245 }
246 }
247 if u.v1extensions.IsValid() {
248 m := *ptr.offset(u.v1extensions).toOldExtensions()
249 b, err = u.appendV1Extensions(b, m, deterministic)
250 if err != nil {
251 return b, err
252 }
253 }
254 for _, f := range u.fields {
255 if f.required {
256 if ptr.offset(f.field).getPointer().isNil() {
257 // Required field is not set.
258 // We record the error but keep going, to give a complete marshaling.
259 if errLater == nil {
260 errLater = &RequiredNotSetError{f.name}
261 }
262 continue
263 }
264 }
265 if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
266 // nil pointer always marshals to nothing
267 continue
268 }
269 b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
270 if err != nil {
271 if err1, ok := err.(*RequiredNotSetError); ok {
272 // Required field in submessage is not set.
273 // We record the error but keep going, to give a complete marshaling.
274 if errLater == nil {
275 errLater = &RequiredNotSetError{f.name + "." + err1.field}
276 }
277 continue
278 }
279 if err == errRepeatedHasNil {
280 err = errors.New("proto: repeated field " + f.name + " has nil element")
281 }
282 if err == errInvalidUTF8 {
283 if errLater == nil {
284 fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
285 errLater = &invalidUTF8Error{fullName}
286 }
287 continue
288 }
289 return b, err
290 }
291 }
292 if u.unrecognized.IsValid() {
293 s := *ptr.offset(u.unrecognized).toBytes()
294 b = append(b, s...)
295 }
296 return b, errLater
297}
298
299// computeMarshalInfo initializes the marshal info.
300func (u *marshalInfo) computeMarshalInfo() {
301 u.Lock()
302 defer u.Unlock()
303 if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
304 return
305 }
306
307 t := u.typ
308 u.unrecognized = invalidField
309 u.extensions = invalidField
310 u.v1extensions = invalidField
311 u.sizecache = invalidField
312
313 // If the message can marshal itself, let it do it, for compatibility.
314 // NOTE: This is not efficient.
315 if reflect.PtrTo(t).Implements(marshalerType) {
316 u.hasmarshaler = true
317 atomic.StoreInt32(&u.initialized, 1)
318 return
319 }
320
321 // get oneof implementers
322 var oneofImplementers []interface{}
323 if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
324 _, _, _, oneofImplementers = m.XXX_OneofFuncs()
325 }
326
327 n := t.NumField()
328
329 // deal with XXX fields first
330 for i := 0; i < t.NumField(); i++ {
331 f := t.Field(i)
332 if !strings.HasPrefix(f.Name, "XXX_") {
333 continue
334 }
335 switch f.Name {
336 case "XXX_sizecache":
337 u.sizecache = toField(&f)
338 case "XXX_unrecognized":
339 u.unrecognized = toField(&f)
340 case "XXX_InternalExtensions":
341 u.extensions = toField(&f)
342 u.messageset = f.Tag.Get("protobuf_messageset") == "1"
343 case "XXX_extensions":
344 u.v1extensions = toField(&f)
345 case "XXX_NoUnkeyedLiteral":
346 // nothing to do
347 default:
348 panic("unknown XXX field: " + f.Name)
349 }
350 n--
351 }
352
353 // normal fields
354 fields := make([]marshalFieldInfo, n) // batch allocation
355 u.fields = make([]*marshalFieldInfo, 0, n)
356 for i, j := 0, 0; i < t.NumField(); i++ {
357 f := t.Field(i)
358
359 if strings.HasPrefix(f.Name, "XXX_") {
360 continue
361 }
362 field := &fields[j]
363 j++
364 field.name = f.Name
365 u.fields = append(u.fields, field)
366 if f.Tag.Get("protobuf_oneof") != "" {
367 field.computeOneofFieldInfo(&f, oneofImplementers)
368 continue
369 }
370 if f.Tag.Get("protobuf") == "" {
371 // field has no tag (not in generated message), ignore it
372 u.fields = u.fields[:len(u.fields)-1]
373 j--
374 continue
375 }
376 field.computeMarshalFieldInfo(&f)
377 }
378
379 // fields are marshaled in tag order on the wire.
380 sort.Sort(byTag(u.fields))
381
382 atomic.StoreInt32(&u.initialized, 1)
383}
384
385// helper for sorting fields by tag
386type byTag []*marshalFieldInfo
387
388func (a byTag) Len() int { return len(a) }
389func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
390func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
391
392// getExtElemInfo returns the information to marshal an extension element.
393// The info it returns is initialized.
394func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
395 // get from cache first
396 u.RLock()
397 e, ok := u.extElems[desc.Field]
398 u.RUnlock()
399 if ok {
400 return e
401 }
402
403 t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
404 tags := strings.Split(desc.Tag, ",")
405 tag, err := strconv.Atoi(tags[1])
406 if err != nil {
407 panic("tag is not an integer")
408 }
409 wt := wiretype(tags[0])
410 sizer, marshaler := typeMarshaler(t, tags, false, false)
411 e = &marshalElemInfo{
412 wiretag: uint64(tag)<<3 | wt,
413 tagsize: SizeVarint(uint64(tag) << 3),
414 sizer: sizer,
415 marshaler: marshaler,
416 isptr: t.Kind() == reflect.Ptr,
417 }
418
419 // update cache
420 u.Lock()
421 if u.extElems == nil {
422 u.extElems = make(map[int32]*marshalElemInfo)
423 }
424 u.extElems[desc.Field] = e
425 u.Unlock()
426 return e
427}
428
429// computeMarshalFieldInfo fills up the information to marshal a field.
430func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
431 // parse protobuf tag of the field.
432 // tag has format of "bytes,49,opt,name=foo,def=hello!"
433 tags := strings.Split(f.Tag.Get("protobuf"), ",")
434 if tags[0] == "" {
435 return
436 }
437 tag, err := strconv.Atoi(tags[1])
438 if err != nil {
439 panic("tag is not an integer")
440 }
441 wt := wiretype(tags[0])
442 if tags[2] == "req" {
443 fi.required = true
444 }
445 fi.setTag(f, tag, wt)
446 fi.setMarshaler(f, tags)
447}
448
449func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
450 fi.field = toField(f)
451 fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
452 fi.isPointer = true
453 fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
454 fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
455
456 ityp := f.Type // interface type
457 for _, o := range oneofImplementers {
458 t := reflect.TypeOf(o)
459 if !t.Implements(ityp) {
460 continue
461 }
462 sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
463 tags := strings.Split(sf.Tag.Get("protobuf"), ",")
464 tag, err := strconv.Atoi(tags[1])
465 if err != nil {
466 panic("tag is not an integer")
467 }
468 wt := wiretype(tags[0])
469 sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
470 fi.oneofElems[t.Elem()] = &marshalElemInfo{
471 wiretag: uint64(tag)<<3 | wt,
472 tagsize: SizeVarint(uint64(tag) << 3),
473 sizer: sizer,
474 marshaler: marshaler,
475 }
476 }
477}
478
479type oneofMessage interface {
480 XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
481}
482
483// wiretype returns the wire encoding of the type.
484func wiretype(encoding string) uint64 {
485 switch encoding {
486 case "fixed32":
487 return WireFixed32
488 case "fixed64":
489 return WireFixed64
490 case "varint", "zigzag32", "zigzag64":
491 return WireVarint
492 case "bytes":
493 return WireBytes
494 case "group":
495 return WireStartGroup
496 }
497 panic("unknown wire type " + encoding)
498}
499
500// setTag fills up the tag (in wire format) and its size in the info of a field.
501func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
502 fi.field = toField(f)
503 fi.wiretag = uint64(tag)<<3 | wt
504 fi.tagsize = SizeVarint(uint64(tag) << 3)
505}
506
507// setMarshaler fills up the sizer and marshaler in the info of a field.
508func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
509 switch f.Type.Kind() {
510 case reflect.Map:
511 // map field
512 fi.isPointer = true
513 fi.sizer, fi.marshaler = makeMapMarshaler(f)
514 return
515 case reflect.Ptr, reflect.Slice:
516 fi.isPointer = true
517 }
518 fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
519}
520
521// typeMarshaler returns the sizer and marshaler of a given field.
522// t is the type of the field.
523// tags is the generated "protobuf" tag of the field.
524// If nozero is true, zero value is not marshaled to the wire.
525// If oneof is true, it is a oneof field.
526func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
527 encoding := tags[0]
528
529 pointer := false
530 slice := false
531 if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
532 slice = true
533 t = t.Elem()
534 }
535 if t.Kind() == reflect.Ptr {
536 pointer = true
537 t = t.Elem()
538 }
539
540 packed := false
541 proto3 := false
542 validateUTF8 := true
543 for i := 2; i < len(tags); i++ {
544 if tags[i] == "packed" {
545 packed = true
546 }
547 if tags[i] == "proto3" {
548 proto3 = true
549 }
550 }
551 validateUTF8 = validateUTF8 && proto3
552
553 switch t.Kind() {
554 case reflect.Bool:
555 if pointer {
556 return sizeBoolPtr, appendBoolPtr
557 }
558 if slice {
559 if packed {
560 return sizeBoolPackedSlice, appendBoolPackedSlice
561 }
562 return sizeBoolSlice, appendBoolSlice
563 }
564 if nozero {
565 return sizeBoolValueNoZero, appendBoolValueNoZero
566 }
567 return sizeBoolValue, appendBoolValue
568 case reflect.Uint32:
569 switch encoding {
570 case "fixed32":
571 if pointer {
572 return sizeFixed32Ptr, appendFixed32Ptr
573 }
574 if slice {
575 if packed {
576 return sizeFixed32PackedSlice, appendFixed32PackedSlice
577 }
578 return sizeFixed32Slice, appendFixed32Slice
579 }
580 if nozero {
581 return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
582 }
583 return sizeFixed32Value, appendFixed32Value
584 case "varint":
585 if pointer {
586 return sizeVarint32Ptr, appendVarint32Ptr
587 }
588 if slice {
589 if packed {
590 return sizeVarint32PackedSlice, appendVarint32PackedSlice
591 }
592 return sizeVarint32Slice, appendVarint32Slice
593 }
594 if nozero {
595 return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
596 }
597 return sizeVarint32Value, appendVarint32Value
598 }
599 case reflect.Int32:
600 switch encoding {
601 case "fixed32":
602 if pointer {
603 return sizeFixedS32Ptr, appendFixedS32Ptr
604 }
605 if slice {
606 if packed {
607 return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
608 }
609 return sizeFixedS32Slice, appendFixedS32Slice
610 }
611 if nozero {
612 return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
613 }
614 return sizeFixedS32Value, appendFixedS32Value
615 case "varint":
616 if pointer {
617 return sizeVarintS32Ptr, appendVarintS32Ptr
618 }
619 if slice {
620 if packed {
621 return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
622 }
623 return sizeVarintS32Slice, appendVarintS32Slice
624 }
625 if nozero {
626 return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
627 }
628 return sizeVarintS32Value, appendVarintS32Value
629 case "zigzag32":
630 if pointer {
631 return sizeZigzag32Ptr, appendZigzag32Ptr
632 }
633 if slice {
634 if packed {
635 return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
636 }
637 return sizeZigzag32Slice, appendZigzag32Slice
638 }
639 if nozero {
640 return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
641 }
642 return sizeZigzag32Value, appendZigzag32Value
643 }
644 case reflect.Uint64:
645 switch encoding {
646 case "fixed64":
647 if pointer {
648 return sizeFixed64Ptr, appendFixed64Ptr
649 }
650 if slice {
651 if packed {
652 return sizeFixed64PackedSlice, appendFixed64PackedSlice
653 }
654 return sizeFixed64Slice, appendFixed64Slice
655 }
656 if nozero {
657 return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
658 }
659 return sizeFixed64Value, appendFixed64Value
660 case "varint":
661 if pointer {
662 return sizeVarint64Ptr, appendVarint64Ptr
663 }
664 if slice {
665 if packed {
666 return sizeVarint64PackedSlice, appendVarint64PackedSlice
667 }
668 return sizeVarint64Slice, appendVarint64Slice
669 }
670 if nozero {
671 return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
672 }
673 return sizeVarint64Value, appendVarint64Value
674 }
675 case reflect.Int64:
676 switch encoding {
677 case "fixed64":
678 if pointer {
679 return sizeFixedS64Ptr, appendFixedS64Ptr
680 }
681 if slice {
682 if packed {
683 return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
684 }
685 return sizeFixedS64Slice, appendFixedS64Slice
686 }
687 if nozero {
688 return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
689 }
690 return sizeFixedS64Value, appendFixedS64Value
691 case "varint":
692 if pointer {
693 return sizeVarintS64Ptr, appendVarintS64Ptr
694 }
695 if slice {
696 if packed {
697 return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
698 }
699 return sizeVarintS64Slice, appendVarintS64Slice
700 }
701 if nozero {
702 return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
703 }
704 return sizeVarintS64Value, appendVarintS64Value
705 case "zigzag64":
706 if pointer {
707 return sizeZigzag64Ptr, appendZigzag64Ptr
708 }
709 if slice {
710 if packed {
711 return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
712 }
713 return sizeZigzag64Slice, appendZigzag64Slice
714 }
715 if nozero {
716 return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
717 }
718 return sizeZigzag64Value, appendZigzag64Value
719 }
720 case reflect.Float32:
721 if pointer {
722 return sizeFloat32Ptr, appendFloat32Ptr
723 }
724 if slice {
725 if packed {
726 return sizeFloat32PackedSlice, appendFloat32PackedSlice
727 }
728 return sizeFloat32Slice, appendFloat32Slice
729 }
730 if nozero {
731 return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
732 }
733 return sizeFloat32Value, appendFloat32Value
734 case reflect.Float64:
735 if pointer {
736 return sizeFloat64Ptr, appendFloat64Ptr
737 }
738 if slice {
739 if packed {
740 return sizeFloat64PackedSlice, appendFloat64PackedSlice
741 }
742 return sizeFloat64Slice, appendFloat64Slice
743 }
744 if nozero {
745 return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
746 }
747 return sizeFloat64Value, appendFloat64Value
748 case reflect.String:
749 if validateUTF8 {
750 if pointer {
751 return sizeStringPtr, appendUTF8StringPtr
752 }
753 if slice {
754 return sizeStringSlice, appendUTF8StringSlice
755 }
756 if nozero {
757 return sizeStringValueNoZero, appendUTF8StringValueNoZero
758 }
759 return sizeStringValue, appendUTF8StringValue
760 }
761 if pointer {
762 return sizeStringPtr, appendStringPtr
763 }
764 if slice {
765 return sizeStringSlice, appendStringSlice
766 }
767 if nozero {
768 return sizeStringValueNoZero, appendStringValueNoZero
769 }
770 return sizeStringValue, appendStringValue
771 case reflect.Slice:
772 if slice {
773 return sizeBytesSlice, appendBytesSlice
774 }
775 if oneof {
776 // Oneof bytes field may also have "proto3" tag.
777 // We want to marshal it as a oneof field. Do this
778 // check before the proto3 check.
779 return sizeBytesOneof, appendBytesOneof
780 }
781 if proto3 {
782 return sizeBytes3, appendBytes3
783 }
784 return sizeBytes, appendBytes
785 case reflect.Struct:
786 switch encoding {
787 case "group":
788 if slice {
789 return makeGroupSliceMarshaler(getMarshalInfo(t))
790 }
791 return makeGroupMarshaler(getMarshalInfo(t))
792 case "bytes":
793 if slice {
794 return makeMessageSliceMarshaler(getMarshalInfo(t))
795 }
796 return makeMessageMarshaler(getMarshalInfo(t))
797 }
798 }
799 panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
800}
801
802// Below are functions to size/marshal a specific type of a field.
803// They are stored in the field's info, and called by function pointers.
804// They have type sizer or marshaler.
805
806func sizeFixed32Value(_ pointer, tagsize int) int {
807 return 4 + tagsize
808}
809func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
810 v := *ptr.toUint32()
811 if v == 0 {
812 return 0
813 }
814 return 4 + tagsize
815}
816func sizeFixed32Ptr(ptr pointer, tagsize int) int {
817 p := *ptr.toUint32Ptr()
818 if p == nil {
819 return 0
820 }
821 return 4 + tagsize
822}
823func sizeFixed32Slice(ptr pointer, tagsize int) int {
824 s := *ptr.toUint32Slice()
825 return (4 + tagsize) * len(s)
826}
827func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
828 s := *ptr.toUint32Slice()
829 if len(s) == 0 {
830 return 0
831 }
832 return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
833}
834func sizeFixedS32Value(_ pointer, tagsize int) int {
835 return 4 + tagsize
836}
837func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
838 v := *ptr.toInt32()
839 if v == 0 {
840 return 0
841 }
842 return 4 + tagsize
843}
844func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
845 p := ptr.getInt32Ptr()
846 if p == nil {
847 return 0
848 }
849 return 4 + tagsize
850}
851func sizeFixedS32Slice(ptr pointer, tagsize int) int {
852 s := ptr.getInt32Slice()
853 return (4 + tagsize) * len(s)
854}
855func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
856 s := ptr.getInt32Slice()
857 if len(s) == 0 {
858 return 0
859 }
860 return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
861}
862func sizeFloat32Value(_ pointer, tagsize int) int {
863 return 4 + tagsize
864}
865func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
866 v := math.Float32bits(*ptr.toFloat32())
867 if v == 0 {
868 return 0
869 }
870 return 4 + tagsize
871}
872func sizeFloat32Ptr(ptr pointer, tagsize int) int {
873 p := *ptr.toFloat32Ptr()
874 if p == nil {
875 return 0
876 }
877 return 4 + tagsize
878}
879func sizeFloat32Slice(ptr pointer, tagsize int) int {
880 s := *ptr.toFloat32Slice()
881 return (4 + tagsize) * len(s)
882}
883func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
884 s := *ptr.toFloat32Slice()
885 if len(s) == 0 {
886 return 0
887 }
888 return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
889}
890func sizeFixed64Value(_ pointer, tagsize int) int {
891 return 8 + tagsize
892}
893func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
894 v := *ptr.toUint64()
895 if v == 0 {
896 return 0
897 }
898 return 8 + tagsize
899}
900func sizeFixed64Ptr(ptr pointer, tagsize int) int {
901 p := *ptr.toUint64Ptr()
902 if p == nil {
903 return 0
904 }
905 return 8 + tagsize
906}
907func sizeFixed64Slice(ptr pointer, tagsize int) int {
908 s := *ptr.toUint64Slice()
909 return (8 + tagsize) * len(s)
910}
911func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
912 s := *ptr.toUint64Slice()
913 if len(s) == 0 {
914 return 0
915 }
916 return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
917}
918func sizeFixedS64Value(_ pointer, tagsize int) int {
919 return 8 + tagsize
920}
921func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
922 v := *ptr.toInt64()
923 if v == 0 {
924 return 0
925 }
926 return 8 + tagsize
927}
928func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
929 p := *ptr.toInt64Ptr()
930 if p == nil {
931 return 0
932 }
933 return 8 + tagsize
934}
935func sizeFixedS64Slice(ptr pointer, tagsize int) int {
936 s := *ptr.toInt64Slice()
937 return (8 + tagsize) * len(s)
938}
939func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
940 s := *ptr.toInt64Slice()
941 if len(s) == 0 {
942 return 0
943 }
944 return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
945}
946func sizeFloat64Value(_ pointer, tagsize int) int {
947 return 8 + tagsize
948}
949func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
950 v := math.Float64bits(*ptr.toFloat64())
951 if v == 0 {
952 return 0
953 }
954 return 8 + tagsize
955}
956func sizeFloat64Ptr(ptr pointer, tagsize int) int {
957 p := *ptr.toFloat64Ptr()
958 if p == nil {
959 return 0
960 }
961 return 8 + tagsize
962}
963func sizeFloat64Slice(ptr pointer, tagsize int) int {
964 s := *ptr.toFloat64Slice()
965 return (8 + tagsize) * len(s)
966}
967func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
968 s := *ptr.toFloat64Slice()
969 if len(s) == 0 {
970 return 0
971 }
972 return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
973}
974func sizeVarint32Value(ptr pointer, tagsize int) int {
975 v := *ptr.toUint32()
976 return SizeVarint(uint64(v)) + tagsize
977}
978func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
979 v := *ptr.toUint32()
980 if v == 0 {
981 return 0
982 }
983 return SizeVarint(uint64(v)) + tagsize
984}
985func sizeVarint32Ptr(ptr pointer, tagsize int) int {
986 p := *ptr.toUint32Ptr()
987 if p == nil {
988 return 0
989 }
990 return SizeVarint(uint64(*p)) + tagsize
991}
992func sizeVarint32Slice(ptr pointer, tagsize int) int {
993 s := *ptr.toUint32Slice()
994 n := 0
995 for _, v := range s {
996 n += SizeVarint(uint64(v)) + tagsize
997 }
998 return n
999}
1000func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
1001 s := *ptr.toUint32Slice()
1002 if len(s) == 0 {
1003 return 0
1004 }
1005 n := 0
1006 for _, v := range s {
1007 n += SizeVarint(uint64(v))
1008 }
1009 return n + SizeVarint(uint64(n)) + tagsize
1010}
1011func sizeVarintS32Value(ptr pointer, tagsize int) int {
1012 v := *ptr.toInt32()
1013 return SizeVarint(uint64(v)) + tagsize
1014}
1015func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
1016 v := *ptr.toInt32()
1017 if v == 0 {
1018 return 0
1019 }
1020 return SizeVarint(uint64(v)) + tagsize
1021}
1022func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
1023 p := ptr.getInt32Ptr()
1024 if p == nil {
1025 return 0
1026 }
1027 return SizeVarint(uint64(*p)) + tagsize
1028}
1029func sizeVarintS32Slice(ptr pointer, tagsize int) int {
1030 s := ptr.getInt32Slice()
1031 n := 0
1032 for _, v := range s {
1033 n += SizeVarint(uint64(v)) + tagsize
1034 }
1035 return n
1036}
1037func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
1038 s := ptr.getInt32Slice()
1039 if len(s) == 0 {
1040 return 0
1041 }
1042 n := 0
1043 for _, v := range s {
1044 n += SizeVarint(uint64(v))
1045 }
1046 return n + SizeVarint(uint64(n)) + tagsize
1047}
1048func sizeVarint64Value(ptr pointer, tagsize int) int {
1049 v := *ptr.toUint64()
1050 return SizeVarint(v) + tagsize
1051}
1052func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
1053 v := *ptr.toUint64()
1054 if v == 0 {
1055 return 0
1056 }
1057 return SizeVarint(v) + tagsize
1058}
1059func sizeVarint64Ptr(ptr pointer, tagsize int) int {
1060 p := *ptr.toUint64Ptr()
1061 if p == nil {
1062 return 0
1063 }
1064 return SizeVarint(*p) + tagsize
1065}
1066func sizeVarint64Slice(ptr pointer, tagsize int) int {
1067 s := *ptr.toUint64Slice()
1068 n := 0
1069 for _, v := range s {
1070 n += SizeVarint(v) + tagsize
1071 }
1072 return n
1073}
1074func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
1075 s := *ptr.toUint64Slice()
1076 if len(s) == 0 {
1077 return 0
1078 }
1079 n := 0
1080 for _, v := range s {
1081 n += SizeVarint(v)
1082 }
1083 return n + SizeVarint(uint64(n)) + tagsize
1084}
1085func sizeVarintS64Value(ptr pointer, tagsize int) int {
1086 v := *ptr.toInt64()
1087 return SizeVarint(uint64(v)) + tagsize
1088}
1089func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
1090 v := *ptr.toInt64()
1091 if v == 0 {
1092 return 0
1093 }
1094 return SizeVarint(uint64(v)) + tagsize
1095}
1096func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
1097 p := *ptr.toInt64Ptr()
1098 if p == nil {
1099 return 0
1100 }
1101 return SizeVarint(uint64(*p)) + tagsize
1102}
1103func sizeVarintS64Slice(ptr pointer, tagsize int) int {
1104 s := *ptr.toInt64Slice()
1105 n := 0
1106 for _, v := range s {
1107 n += SizeVarint(uint64(v)) + tagsize
1108 }
1109 return n
1110}
1111func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
1112 s := *ptr.toInt64Slice()
1113 if len(s) == 0 {
1114 return 0
1115 }
1116 n := 0
1117 for _, v := range s {
1118 n += SizeVarint(uint64(v))
1119 }
1120 return n + SizeVarint(uint64(n)) + tagsize
1121}
1122func sizeZigzag32Value(ptr pointer, tagsize int) int {
1123 v := *ptr.toInt32()
1124 return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
1125}
1126func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
1127 v := *ptr.toInt32()
1128 if v == 0 {
1129 return 0
1130 }
1131 return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
1132}
1133func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
1134 p := ptr.getInt32Ptr()
1135 if p == nil {
1136 return 0
1137 }
1138 v := *p
1139 return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
1140}
1141func sizeZigzag32Slice(ptr pointer, tagsize int) int {
1142 s := ptr.getInt32Slice()
1143 n := 0
1144 for _, v := range s {
1145 n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
1146 }
1147 return n
1148}
1149func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
1150 s := ptr.getInt32Slice()
1151 if len(s) == 0 {
1152 return 0
1153 }
1154 n := 0
1155 for _, v := range s {
1156 n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
1157 }
1158 return n + SizeVarint(uint64(n)) + tagsize
1159}
1160func sizeZigzag64Value(ptr pointer, tagsize int) int {
1161 v := *ptr.toInt64()
1162 return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
1163}
1164func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
1165 v := *ptr.toInt64()
1166 if v == 0 {
1167 return 0
1168 }
1169 return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
1170}
1171func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
1172 p := *ptr.toInt64Ptr()
1173 if p == nil {
1174 return 0
1175 }
1176 v := *p
1177 return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
1178}
1179func sizeZigzag64Slice(ptr pointer, tagsize int) int {
1180 s := *ptr.toInt64Slice()
1181 n := 0
1182 for _, v := range s {
1183 n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
1184 }
1185 return n
1186}
1187func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
1188 s := *ptr.toInt64Slice()
1189 if len(s) == 0 {
1190 return 0
1191 }
1192 n := 0
1193 for _, v := range s {
1194 n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
1195 }
1196 return n + SizeVarint(uint64(n)) + tagsize
1197}
1198func sizeBoolValue(_ pointer, tagsize int) int {
1199 return 1 + tagsize
1200}
1201func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
1202 v := *ptr.toBool()
1203 if !v {
1204 return 0
1205 }
1206 return 1 + tagsize
1207}
1208func sizeBoolPtr(ptr pointer, tagsize int) int {
1209 p := *ptr.toBoolPtr()
1210 if p == nil {
1211 return 0
1212 }
1213 return 1 + tagsize
1214}
1215func sizeBoolSlice(ptr pointer, tagsize int) int {
1216 s := *ptr.toBoolSlice()
1217 return (1 + tagsize) * len(s)
1218}
1219func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
1220 s := *ptr.toBoolSlice()
1221 if len(s) == 0 {
1222 return 0
1223 }
1224 return len(s) + SizeVarint(uint64(len(s))) + tagsize
1225}
1226func sizeStringValue(ptr pointer, tagsize int) int {
1227 v := *ptr.toString()
1228 return len(v) + SizeVarint(uint64(len(v))) + tagsize
1229}
1230func sizeStringValueNoZero(ptr pointer, tagsize int) int {
1231 v := *ptr.toString()
1232 if v == "" {
1233 return 0
1234 }
1235 return len(v) + SizeVarint(uint64(len(v))) + tagsize
1236}
1237func sizeStringPtr(ptr pointer, tagsize int) int {
1238 p := *ptr.toStringPtr()
1239 if p == nil {
1240 return 0
1241 }
1242 v := *p
1243 return len(v) + SizeVarint(uint64(len(v))) + tagsize
1244}
1245func sizeStringSlice(ptr pointer, tagsize int) int {
1246 s := *ptr.toStringSlice()
1247 n := 0
1248 for _, v := range s {
1249 n += len(v) + SizeVarint(uint64(len(v))) + tagsize
1250 }
1251 return n
1252}
1253func sizeBytes(ptr pointer, tagsize int) int {
1254 v := *ptr.toBytes()
1255 if v == nil {
1256 return 0
1257 }
1258 return len(v) + SizeVarint(uint64(len(v))) + tagsize
1259}
1260func sizeBytes3(ptr pointer, tagsize int) int {
1261 v := *ptr.toBytes()
1262 if len(v) == 0 {
1263 return 0
1264 }
1265 return len(v) + SizeVarint(uint64(len(v))) + tagsize
1266}
1267func sizeBytesOneof(ptr pointer, tagsize int) int {
1268 v := *ptr.toBytes()
1269 return len(v) + SizeVarint(uint64(len(v))) + tagsize
1270}
1271func sizeBytesSlice(ptr pointer, tagsize int) int {
1272 s := *ptr.toBytesSlice()
1273 n := 0
1274 for _, v := range s {
1275 n += len(v) + SizeVarint(uint64(len(v))) + tagsize
1276 }
1277 return n
1278}
1279
1280// appendFixed32 appends an encoded fixed32 to b.
1281func appendFixed32(b []byte, v uint32) []byte {
1282 b = append(b,
1283 byte(v),
1284 byte(v>>8),
1285 byte(v>>16),
1286 byte(v>>24))
1287 return b
1288}
1289
1290// appendFixed64 appends an encoded fixed64 to b.
1291func appendFixed64(b []byte, v uint64) []byte {
1292 b = append(b,
1293 byte(v),
1294 byte(v>>8),
1295 byte(v>>16),
1296 byte(v>>24),
1297 byte(v>>32),
1298 byte(v>>40),
1299 byte(v>>48),
1300 byte(v>>56))
1301 return b
1302}
1303
1304// appendVarint appends an encoded varint to b.
1305func appendVarint(b []byte, v uint64) []byte {
1306 // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
1307 // have non-leaf inliner.
1308 switch {
1309 case v < 1<<7:
1310 b = append(b, byte(v))
1311 case v < 1<<14:
1312 b = append(b,
1313 byte(v&0x7f|0x80),
1314 byte(v>>7))
1315 case v < 1<<21:
1316 b = append(b,
1317 byte(v&0x7f|0x80),
1318 byte((v>>7)&0x7f|0x80),
1319 byte(v>>14))
1320 case v < 1<<28:
1321 b = append(b,
1322 byte(v&0x7f|0x80),
1323 byte((v>>7)&0x7f|0x80),
1324 byte((v>>14)&0x7f|0x80),
1325 byte(v>>21))
1326 case v < 1<<35:
1327 b = append(b,
1328 byte(v&0x7f|0x80),
1329 byte((v>>7)&0x7f|0x80),
1330 byte((v>>14)&0x7f|0x80),
1331 byte((v>>21)&0x7f|0x80),
1332 byte(v>>28))
1333 case v < 1<<42:
1334 b = append(b,
1335 byte(v&0x7f|0x80),
1336 byte((v>>7)&0x7f|0x80),
1337 byte((v>>14)&0x7f|0x80),
1338 byte((v>>21)&0x7f|0x80),
1339 byte((v>>28)&0x7f|0x80),
1340 byte(v>>35))
1341 case v < 1<<49:
1342 b = append(b,
1343 byte(v&0x7f|0x80),
1344 byte((v>>7)&0x7f|0x80),
1345 byte((v>>14)&0x7f|0x80),
1346 byte((v>>21)&0x7f|0x80),
1347 byte((v>>28)&0x7f|0x80),
1348 byte((v>>35)&0x7f|0x80),
1349 byte(v>>42))
1350 case v < 1<<56:
1351 b = append(b,
1352 byte(v&0x7f|0x80),
1353 byte((v>>7)&0x7f|0x80),
1354 byte((v>>14)&0x7f|0x80),
1355 byte((v>>21)&0x7f|0x80),
1356 byte((v>>28)&0x7f|0x80),
1357 byte((v>>35)&0x7f|0x80),
1358 byte((v>>42)&0x7f|0x80),
1359 byte(v>>49))
1360 case v < 1<<63:
1361 b = append(b,
1362 byte(v&0x7f|0x80),
1363 byte((v>>7)&0x7f|0x80),
1364 byte((v>>14)&0x7f|0x80),
1365 byte((v>>21)&0x7f|0x80),
1366 byte((v>>28)&0x7f|0x80),
1367 byte((v>>35)&0x7f|0x80),
1368 byte((v>>42)&0x7f|0x80),
1369 byte((v>>49)&0x7f|0x80),
1370 byte(v>>56))
1371 default:
1372 b = append(b,
1373 byte(v&0x7f|0x80),
1374 byte((v>>7)&0x7f|0x80),
1375 byte((v>>14)&0x7f|0x80),
1376 byte((v>>21)&0x7f|0x80),
1377 byte((v>>28)&0x7f|0x80),
1378 byte((v>>35)&0x7f|0x80),
1379 byte((v>>42)&0x7f|0x80),
1380 byte((v>>49)&0x7f|0x80),
1381 byte((v>>56)&0x7f|0x80),
1382 1)
1383 }
1384 return b
1385}
1386
1387func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1388 v := *ptr.toUint32()
1389 b = appendVarint(b, wiretag)
1390 b = appendFixed32(b, v)
1391 return b, nil
1392}
1393func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1394 v := *ptr.toUint32()
1395 if v == 0 {
1396 return b, nil
1397 }
1398 b = appendVarint(b, wiretag)
1399 b = appendFixed32(b, v)
1400 return b, nil
1401}
1402func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1403 p := *ptr.toUint32Ptr()
1404 if p == nil {
1405 return b, nil
1406 }
1407 b = appendVarint(b, wiretag)
1408 b = appendFixed32(b, *p)
1409 return b, nil
1410}
1411func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1412 s := *ptr.toUint32Slice()
1413 for _, v := range s {
1414 b = appendVarint(b, wiretag)
1415 b = appendFixed32(b, v)
1416 }
1417 return b, nil
1418}
1419func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1420 s := *ptr.toUint32Slice()
1421 if len(s) == 0 {
1422 return b, nil
1423 }
1424 b = appendVarint(b, wiretag&^7|WireBytes)
1425 b = appendVarint(b, uint64(4*len(s)))
1426 for _, v := range s {
1427 b = appendFixed32(b, v)
1428 }
1429 return b, nil
1430}
1431func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1432 v := *ptr.toInt32()
1433 b = appendVarint(b, wiretag)
1434 b = appendFixed32(b, uint32(v))
1435 return b, nil
1436}
1437func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1438 v := *ptr.toInt32()
1439 if v == 0 {
1440 return b, nil
1441 }
1442 b = appendVarint(b, wiretag)
1443 b = appendFixed32(b, uint32(v))
1444 return b, nil
1445}
1446func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1447 p := ptr.getInt32Ptr()
1448 if p == nil {
1449 return b, nil
1450 }
1451 b = appendVarint(b, wiretag)
1452 b = appendFixed32(b, uint32(*p))
1453 return b, nil
1454}
1455func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1456 s := ptr.getInt32Slice()
1457 for _, v := range s {
1458 b = appendVarint(b, wiretag)
1459 b = appendFixed32(b, uint32(v))
1460 }
1461 return b, nil
1462}
1463func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1464 s := ptr.getInt32Slice()
1465 if len(s) == 0 {
1466 return b, nil
1467 }
1468 b = appendVarint(b, wiretag&^7|WireBytes)
1469 b = appendVarint(b, uint64(4*len(s)))
1470 for _, v := range s {
1471 b = appendFixed32(b, uint32(v))
1472 }
1473 return b, nil
1474}
1475func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1476 v := math.Float32bits(*ptr.toFloat32())
1477 b = appendVarint(b, wiretag)
1478 b = appendFixed32(b, v)
1479 return b, nil
1480}
1481func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1482 v := math.Float32bits(*ptr.toFloat32())
1483 if v == 0 {
1484 return b, nil
1485 }
1486 b = appendVarint(b, wiretag)
1487 b = appendFixed32(b, v)
1488 return b, nil
1489}
1490func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1491 p := *ptr.toFloat32Ptr()
1492 if p == nil {
1493 return b, nil
1494 }
1495 b = appendVarint(b, wiretag)
1496 b = appendFixed32(b, math.Float32bits(*p))
1497 return b, nil
1498}
1499func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1500 s := *ptr.toFloat32Slice()
1501 for _, v := range s {
1502 b = appendVarint(b, wiretag)
1503 b = appendFixed32(b, math.Float32bits(v))
1504 }
1505 return b, nil
1506}
1507func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1508 s := *ptr.toFloat32Slice()
1509 if len(s) == 0 {
1510 return b, nil
1511 }
1512 b = appendVarint(b, wiretag&^7|WireBytes)
1513 b = appendVarint(b, uint64(4*len(s)))
1514 for _, v := range s {
1515 b = appendFixed32(b, math.Float32bits(v))
1516 }
1517 return b, nil
1518}
1519func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1520 v := *ptr.toUint64()
1521 b = appendVarint(b, wiretag)
1522 b = appendFixed64(b, v)
1523 return b, nil
1524}
1525func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1526 v := *ptr.toUint64()
1527 if v == 0 {
1528 return b, nil
1529 }
1530 b = appendVarint(b, wiretag)
1531 b = appendFixed64(b, v)
1532 return b, nil
1533}
1534func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1535 p := *ptr.toUint64Ptr()
1536 if p == nil {
1537 return b, nil
1538 }
1539 b = appendVarint(b, wiretag)
1540 b = appendFixed64(b, *p)
1541 return b, nil
1542}
1543func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1544 s := *ptr.toUint64Slice()
1545 for _, v := range s {
1546 b = appendVarint(b, wiretag)
1547 b = appendFixed64(b, v)
1548 }
1549 return b, nil
1550}
1551func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1552 s := *ptr.toUint64Slice()
1553 if len(s) == 0 {
1554 return b, nil
1555 }
1556 b = appendVarint(b, wiretag&^7|WireBytes)
1557 b = appendVarint(b, uint64(8*len(s)))
1558 for _, v := range s {
1559 b = appendFixed64(b, v)
1560 }
1561 return b, nil
1562}
1563func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1564 v := *ptr.toInt64()
1565 b = appendVarint(b, wiretag)
1566 b = appendFixed64(b, uint64(v))
1567 return b, nil
1568}
1569func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1570 v := *ptr.toInt64()
1571 if v == 0 {
1572 return b, nil
1573 }
1574 b = appendVarint(b, wiretag)
1575 b = appendFixed64(b, uint64(v))
1576 return b, nil
1577}
1578func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1579 p := *ptr.toInt64Ptr()
1580 if p == nil {
1581 return b, nil
1582 }
1583 b = appendVarint(b, wiretag)
1584 b = appendFixed64(b, uint64(*p))
1585 return b, nil
1586}
1587func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1588 s := *ptr.toInt64Slice()
1589 for _, v := range s {
1590 b = appendVarint(b, wiretag)
1591 b = appendFixed64(b, uint64(v))
1592 }
1593 return b, nil
1594}
1595func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1596 s := *ptr.toInt64Slice()
1597 if len(s) == 0 {
1598 return b, nil
1599 }
1600 b = appendVarint(b, wiretag&^7|WireBytes)
1601 b = appendVarint(b, uint64(8*len(s)))
1602 for _, v := range s {
1603 b = appendFixed64(b, uint64(v))
1604 }
1605 return b, nil
1606}
1607func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1608 v := math.Float64bits(*ptr.toFloat64())
1609 b = appendVarint(b, wiretag)
1610 b = appendFixed64(b, v)
1611 return b, nil
1612}
1613func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1614 v := math.Float64bits(*ptr.toFloat64())
1615 if v == 0 {
1616 return b, nil
1617 }
1618 b = appendVarint(b, wiretag)
1619 b = appendFixed64(b, v)
1620 return b, nil
1621}
1622func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1623 p := *ptr.toFloat64Ptr()
1624 if p == nil {
1625 return b, nil
1626 }
1627 b = appendVarint(b, wiretag)
1628 b = appendFixed64(b, math.Float64bits(*p))
1629 return b, nil
1630}
1631func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1632 s := *ptr.toFloat64Slice()
1633 for _, v := range s {
1634 b = appendVarint(b, wiretag)
1635 b = appendFixed64(b, math.Float64bits(v))
1636 }
1637 return b, nil
1638}
1639func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1640 s := *ptr.toFloat64Slice()
1641 if len(s) == 0 {
1642 return b, nil
1643 }
1644 b = appendVarint(b, wiretag&^7|WireBytes)
1645 b = appendVarint(b, uint64(8*len(s)))
1646 for _, v := range s {
1647 b = appendFixed64(b, math.Float64bits(v))
1648 }
1649 return b, nil
1650}
1651func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1652 v := *ptr.toUint32()
1653 b = appendVarint(b, wiretag)
1654 b = appendVarint(b, uint64(v))
1655 return b, nil
1656}
1657func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1658 v := *ptr.toUint32()
1659 if v == 0 {
1660 return b, nil
1661 }
1662 b = appendVarint(b, wiretag)
1663 b = appendVarint(b, uint64(v))
1664 return b, nil
1665}
1666func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1667 p := *ptr.toUint32Ptr()
1668 if p == nil {
1669 return b, nil
1670 }
1671 b = appendVarint(b, wiretag)
1672 b = appendVarint(b, uint64(*p))
1673 return b, nil
1674}
1675func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1676 s := *ptr.toUint32Slice()
1677 for _, v := range s {
1678 b = appendVarint(b, wiretag)
1679 b = appendVarint(b, uint64(v))
1680 }
1681 return b, nil
1682}
1683func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1684 s := *ptr.toUint32Slice()
1685 if len(s) == 0 {
1686 return b, nil
1687 }
1688 b = appendVarint(b, wiretag&^7|WireBytes)
1689 // compute size
1690 n := 0
1691 for _, v := range s {
1692 n += SizeVarint(uint64(v))
1693 }
1694 b = appendVarint(b, uint64(n))
1695 for _, v := range s {
1696 b = appendVarint(b, uint64(v))
1697 }
1698 return b, nil
1699}
1700func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1701 v := *ptr.toInt32()
1702 b = appendVarint(b, wiretag)
1703 b = appendVarint(b, uint64(v))
1704 return b, nil
1705}
1706func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1707 v := *ptr.toInt32()
1708 if v == 0 {
1709 return b, nil
1710 }
1711 b = appendVarint(b, wiretag)
1712 b = appendVarint(b, uint64(v))
1713 return b, nil
1714}
1715func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1716 p := ptr.getInt32Ptr()
1717 if p == nil {
1718 return b, nil
1719 }
1720 b = appendVarint(b, wiretag)
1721 b = appendVarint(b, uint64(*p))
1722 return b, nil
1723}
1724func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1725 s := ptr.getInt32Slice()
1726 for _, v := range s {
1727 b = appendVarint(b, wiretag)
1728 b = appendVarint(b, uint64(v))
1729 }
1730 return b, nil
1731}
1732func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1733 s := ptr.getInt32Slice()
1734 if len(s) == 0 {
1735 return b, nil
1736 }
1737 b = appendVarint(b, wiretag&^7|WireBytes)
1738 // compute size
1739 n := 0
1740 for _, v := range s {
1741 n += SizeVarint(uint64(v))
1742 }
1743 b = appendVarint(b, uint64(n))
1744 for _, v := range s {
1745 b = appendVarint(b, uint64(v))
1746 }
1747 return b, nil
1748}
1749func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1750 v := *ptr.toUint64()
1751 b = appendVarint(b, wiretag)
1752 b = appendVarint(b, v)
1753 return b, nil
1754}
1755func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1756 v := *ptr.toUint64()
1757 if v == 0 {
1758 return b, nil
1759 }
1760 b = appendVarint(b, wiretag)
1761 b = appendVarint(b, v)
1762 return b, nil
1763}
1764func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1765 p := *ptr.toUint64Ptr()
1766 if p == nil {
1767 return b, nil
1768 }
1769 b = appendVarint(b, wiretag)
1770 b = appendVarint(b, *p)
1771 return b, nil
1772}
1773func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1774 s := *ptr.toUint64Slice()
1775 for _, v := range s {
1776 b = appendVarint(b, wiretag)
1777 b = appendVarint(b, v)
1778 }
1779 return b, nil
1780}
1781func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1782 s := *ptr.toUint64Slice()
1783 if len(s) == 0 {
1784 return b, nil
1785 }
1786 b = appendVarint(b, wiretag&^7|WireBytes)
1787 // compute size
1788 n := 0
1789 for _, v := range s {
1790 n += SizeVarint(v)
1791 }
1792 b = appendVarint(b, uint64(n))
1793 for _, v := range s {
1794 b = appendVarint(b, v)
1795 }
1796 return b, nil
1797}
1798func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1799 v := *ptr.toInt64()
1800 b = appendVarint(b, wiretag)
1801 b = appendVarint(b, uint64(v))
1802 return b, nil
1803}
1804func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1805 v := *ptr.toInt64()
1806 if v == 0 {
1807 return b, nil
1808 }
1809 b = appendVarint(b, wiretag)
1810 b = appendVarint(b, uint64(v))
1811 return b, nil
1812}
1813func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1814 p := *ptr.toInt64Ptr()
1815 if p == nil {
1816 return b, nil
1817 }
1818 b = appendVarint(b, wiretag)
1819 b = appendVarint(b, uint64(*p))
1820 return b, nil
1821}
1822func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1823 s := *ptr.toInt64Slice()
1824 for _, v := range s {
1825 b = appendVarint(b, wiretag)
1826 b = appendVarint(b, uint64(v))
1827 }
1828 return b, nil
1829}
1830func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1831 s := *ptr.toInt64Slice()
1832 if len(s) == 0 {
1833 return b, nil
1834 }
1835 b = appendVarint(b, wiretag&^7|WireBytes)
1836 // compute size
1837 n := 0
1838 for _, v := range s {
1839 n += SizeVarint(uint64(v))
1840 }
1841 b = appendVarint(b, uint64(n))
1842 for _, v := range s {
1843 b = appendVarint(b, uint64(v))
1844 }
1845 return b, nil
1846}
1847func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1848 v := *ptr.toInt32()
1849 b = appendVarint(b, wiretag)
1850 b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
1851 return b, nil
1852}
1853func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1854 v := *ptr.toInt32()
1855 if v == 0 {
1856 return b, nil
1857 }
1858 b = appendVarint(b, wiretag)
1859 b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
1860 return b, nil
1861}
1862func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1863 p := ptr.getInt32Ptr()
1864 if p == nil {
1865 return b, nil
1866 }
1867 b = appendVarint(b, wiretag)
1868 v := *p
1869 b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
1870 return b, nil
1871}
1872func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1873 s := ptr.getInt32Slice()
1874 for _, v := range s {
1875 b = appendVarint(b, wiretag)
1876 b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
1877 }
1878 return b, nil
1879}
1880func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1881 s := ptr.getInt32Slice()
1882 if len(s) == 0 {
1883 return b, nil
1884 }
1885 b = appendVarint(b, wiretag&^7|WireBytes)
1886 // compute size
1887 n := 0
1888 for _, v := range s {
1889 n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
1890 }
1891 b = appendVarint(b, uint64(n))
1892 for _, v := range s {
1893 b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
1894 }
1895 return b, nil
1896}
1897func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1898 v := *ptr.toInt64()
1899 b = appendVarint(b, wiretag)
1900 b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
1901 return b, nil
1902}
1903func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1904 v := *ptr.toInt64()
1905 if v == 0 {
1906 return b, nil
1907 }
1908 b = appendVarint(b, wiretag)
1909 b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
1910 return b, nil
1911}
1912func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1913 p := *ptr.toInt64Ptr()
1914 if p == nil {
1915 return b, nil
1916 }
1917 b = appendVarint(b, wiretag)
1918 v := *p
1919 b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
1920 return b, nil
1921}
1922func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1923 s := *ptr.toInt64Slice()
1924 for _, v := range s {
1925 b = appendVarint(b, wiretag)
1926 b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
1927 }
1928 return b, nil
1929}
1930func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1931 s := *ptr.toInt64Slice()
1932 if len(s) == 0 {
1933 return b, nil
1934 }
1935 b = appendVarint(b, wiretag&^7|WireBytes)
1936 // compute size
1937 n := 0
1938 for _, v := range s {
1939 n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
1940 }
1941 b = appendVarint(b, uint64(n))
1942 for _, v := range s {
1943 b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
1944 }
1945 return b, nil
1946}
1947func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1948 v := *ptr.toBool()
1949 b = appendVarint(b, wiretag)
1950 if v {
1951 b = append(b, 1)
1952 } else {
1953 b = append(b, 0)
1954 }
1955 return b, nil
1956}
1957func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1958 v := *ptr.toBool()
1959 if !v {
1960 return b, nil
1961 }
1962 b = appendVarint(b, wiretag)
1963 b = append(b, 1)
1964 return b, nil
1965}
1966
1967func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1968 p := *ptr.toBoolPtr()
1969 if p == nil {
1970 return b, nil
1971 }
1972 b = appendVarint(b, wiretag)
1973 if *p {
1974 b = append(b, 1)
1975 } else {
1976 b = append(b, 0)
1977 }
1978 return b, nil
1979}
1980func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1981 s := *ptr.toBoolSlice()
1982 for _, v := range s {
1983 b = appendVarint(b, wiretag)
1984 if v {
1985 b = append(b, 1)
1986 } else {
1987 b = append(b, 0)
1988 }
1989 }
1990 return b, nil
1991}
1992func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
1993 s := *ptr.toBoolSlice()
1994 if len(s) == 0 {
1995 return b, nil
1996 }
1997 b = appendVarint(b, wiretag&^7|WireBytes)
1998 b = appendVarint(b, uint64(len(s)))
1999 for _, v := range s {
2000 if v {
2001 b = append(b, 1)
2002 } else {
2003 b = append(b, 0)
2004 }
2005 }
2006 return b, nil
2007}
2008func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2009 v := *ptr.toString()
2010 b = appendVarint(b, wiretag)
2011 b = appendVarint(b, uint64(len(v)))
2012 b = append(b, v...)
2013 return b, nil
2014}
2015func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2016 v := *ptr.toString()
2017 if v == "" {
2018 return b, nil
2019 }
2020 b = appendVarint(b, wiretag)
2021 b = appendVarint(b, uint64(len(v)))
2022 b = append(b, v...)
2023 return b, nil
2024}
2025func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2026 p := *ptr.toStringPtr()
2027 if p == nil {
2028 return b, nil
2029 }
2030 v := *p
2031 b = appendVarint(b, wiretag)
2032 b = appendVarint(b, uint64(len(v)))
2033 b = append(b, v...)
2034 return b, nil
2035}
2036func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2037 s := *ptr.toStringSlice()
2038 for _, v := range s {
2039 b = appendVarint(b, wiretag)
2040 b = appendVarint(b, uint64(len(v)))
2041 b = append(b, v...)
2042 }
2043 return b, nil
2044}
2045func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2046 var invalidUTF8 bool
2047 v := *ptr.toString()
2048 if !utf8.ValidString(v) {
2049 invalidUTF8 = true
2050 }
2051 b = appendVarint(b, wiretag)
2052 b = appendVarint(b, uint64(len(v)))
2053 b = append(b, v...)
2054 if invalidUTF8 {
2055 return b, errInvalidUTF8
2056 }
2057 return b, nil
2058}
2059func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2060 var invalidUTF8 bool
2061 v := *ptr.toString()
2062 if v == "" {
2063 return b, nil
2064 }
2065 if !utf8.ValidString(v) {
2066 invalidUTF8 = true
2067 }
2068 b = appendVarint(b, wiretag)
2069 b = appendVarint(b, uint64(len(v)))
2070 b = append(b, v...)
2071 if invalidUTF8 {
2072 return b, errInvalidUTF8
2073 }
2074 return b, nil
2075}
2076func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2077 var invalidUTF8 bool
2078 p := *ptr.toStringPtr()
2079 if p == nil {
2080 return b, nil
2081 }
2082 v := *p
2083 if !utf8.ValidString(v) {
2084 invalidUTF8 = true
2085 }
2086 b = appendVarint(b, wiretag)
2087 b = appendVarint(b, uint64(len(v)))
2088 b = append(b, v...)
2089 if invalidUTF8 {
2090 return b, errInvalidUTF8
2091 }
2092 return b, nil
2093}
2094func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2095 var invalidUTF8 bool
2096 s := *ptr.toStringSlice()
2097 for _, v := range s {
2098 if !utf8.ValidString(v) {
2099 invalidUTF8 = true
2100 }
2101 b = appendVarint(b, wiretag)
2102 b = appendVarint(b, uint64(len(v)))
2103 b = append(b, v...)
2104 }
2105 if invalidUTF8 {
2106 return b, errInvalidUTF8
2107 }
2108 return b, nil
2109}
2110func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2111 v := *ptr.toBytes()
2112 if v == nil {
2113 return b, nil
2114 }
2115 b = appendVarint(b, wiretag)
2116 b = appendVarint(b, uint64(len(v)))
2117 b = append(b, v...)
2118 return b, nil
2119}
2120func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2121 v := *ptr.toBytes()
2122 if len(v) == 0 {
2123 return b, nil
2124 }
2125 b = appendVarint(b, wiretag)
2126 b = appendVarint(b, uint64(len(v)))
2127 b = append(b, v...)
2128 return b, nil
2129}
2130func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2131 v := *ptr.toBytes()
2132 b = appendVarint(b, wiretag)
2133 b = appendVarint(b, uint64(len(v)))
2134 b = append(b, v...)
2135 return b, nil
2136}
2137func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
2138 s := *ptr.toBytesSlice()
2139 for _, v := range s {
2140 b = appendVarint(b, wiretag)
2141 b = appendVarint(b, uint64(len(v)))
2142 b = append(b, v...)
2143 }
2144 return b, nil
2145}
2146
2147// makeGroupMarshaler returns the sizer and marshaler for a group.
2148// u is the marshal info of the underlying message.
2149func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
2150 return func(ptr pointer, tagsize int) int {
2151 p := ptr.getPointer()
2152 if p.isNil() {
2153 return 0
2154 }
2155 return u.size(p) + 2*tagsize
2156 },
2157 func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
2158 p := ptr.getPointer()
2159 if p.isNil() {
2160 return b, nil
2161 }
2162 var err error
2163 b = appendVarint(b, wiretag) // start group
2164 b, err = u.marshal(b, p, deterministic)
2165 b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
2166 return b, err
2167 }
2168}
2169
2170// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
2171// u is the marshal info of the underlying message.
2172func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
2173 return func(ptr pointer, tagsize int) int {
2174 s := ptr.getPointerSlice()
2175 n := 0
2176 for _, v := range s {
2177 if v.isNil() {
2178 continue
2179 }
2180 n += u.size(v) + 2*tagsize
2181 }
2182 return n
2183 },
2184 func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
2185 s := ptr.getPointerSlice()
2186 var err error
2187 var nerr nonFatal
2188 for _, v := range s {
2189 if v.isNil() {
2190 return b, errRepeatedHasNil
2191 }
2192 b = appendVarint(b, wiretag) // start group
2193 b, err = u.marshal(b, v, deterministic)
2194 b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
2195 if !nerr.Merge(err) {
2196 if err == ErrNil {
2197 err = errRepeatedHasNil
2198 }
2199 return b, err
2200 }
2201 }
2202 return b, nerr.E
2203 }
2204}
2205
2206// makeMessageMarshaler returns the sizer and marshaler for a message field.
2207// u is the marshal info of the message.
2208func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
2209 return func(ptr pointer, tagsize int) int {
2210 p := ptr.getPointer()
2211 if p.isNil() {
2212 return 0
2213 }
2214 siz := u.size(p)
2215 return siz + SizeVarint(uint64(siz)) + tagsize
2216 },
2217 func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
2218 p := ptr.getPointer()
2219 if p.isNil() {
2220 return b, nil
2221 }
2222 b = appendVarint(b, wiretag)
2223 siz := u.cachedsize(p)
2224 b = appendVarint(b, uint64(siz))
2225 return u.marshal(b, p, deterministic)
2226 }
2227}
2228
2229// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
2230// u is the marshal info of the message.
2231func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
2232 return func(ptr pointer, tagsize int) int {
2233 s := ptr.getPointerSlice()
2234 n := 0
2235 for _, v := range s {
2236 if v.isNil() {
2237 continue
2238 }
2239 siz := u.size(v)
2240 n += siz + SizeVarint(uint64(siz)) + tagsize
2241 }
2242 return n
2243 },
2244 func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
2245 s := ptr.getPointerSlice()
2246 var err error
2247 var nerr nonFatal
2248 for _, v := range s {
2249 if v.isNil() {
2250 return b, errRepeatedHasNil
2251 }
2252 b = appendVarint(b, wiretag)
2253 siz := u.cachedsize(v)
2254 b = appendVarint(b, uint64(siz))
2255 b, err = u.marshal(b, v, deterministic)
2256
2257 if !nerr.Merge(err) {
2258 if err == ErrNil {
2259 err = errRepeatedHasNil
2260 }
2261 return b, err
2262 }
2263 }
2264 return b, nerr.E
2265 }
2266}
2267
2268// makeMapMarshaler returns the sizer and marshaler for a map field.
2269// f is the pointer to the reflect data structure of the field.
2270func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
2271 // figure out key and value type
2272 t := f.Type
2273 keyType := t.Key()
2274 valType := t.Elem()
2275 keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
2276 valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
2277 keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
2278 valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
2279 keyWireTag := 1<<3 | wiretype(keyTags[0])
2280 valWireTag := 2<<3 | wiretype(valTags[0])
2281
2282 // We create an interface to get the addresses of the map key and value.
2283 // If value is pointer-typed, the interface is a direct interface, the
2284 // idata itself is the value. Otherwise, the idata is the pointer to the
2285 // value.
2286 // Key cannot be pointer-typed.
2287 valIsPtr := valType.Kind() == reflect.Ptr
2288
2289 // If value is a message with nested maps, calling
2290 // valSizer in marshal may be quadratic. We should use
2291 // cached version in marshal (but not in size).
2292 // If value is not message type, we don't have size cache,
2293 // but it cannot be nested either. Just use valSizer.
2294 valCachedSizer := valSizer
2295 if valIsPtr && valType.Elem().Kind() == reflect.Struct {
2296 u := getMarshalInfo(valType.Elem())
2297 valCachedSizer = func(ptr pointer, tagsize int) int {
2298 // Same as message sizer, but use cache.
2299 p := ptr.getPointer()
2300 if p.isNil() {
2301 return 0
2302 }
2303 siz := u.cachedsize(p)
2304 return siz + SizeVarint(uint64(siz)) + tagsize
2305 }
2306 }
2307 return func(ptr pointer, tagsize int) int {
2308 m := ptr.asPointerTo(t).Elem() // the map
2309 n := 0
2310 for _, k := range m.MapKeys() {
2311 ki := k.Interface()
2312 vi := m.MapIndex(k).Interface()
2313 kaddr := toAddrPointer(&ki, false) // pointer to key
2314 vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
2315 siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
2316 n += siz + SizeVarint(uint64(siz)) + tagsize
2317 }
2318 return n
2319 },
2320 func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
2321 m := ptr.asPointerTo(t).Elem() // the map
2322 var err error
2323 keys := m.MapKeys()
2324 if len(keys) > 1 && deterministic {
2325 sort.Sort(mapKeys(keys))
2326 }
2327
2328 var nerr nonFatal
2329 for _, k := range keys {
2330 ki := k.Interface()
2331 vi := m.MapIndex(k).Interface()
2332 kaddr := toAddrPointer(&ki, false) // pointer to key
2333 vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
2334 b = appendVarint(b, tag)
2335 siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
2336 b = appendVarint(b, uint64(siz))
2337 b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
2338 if !nerr.Merge(err) {
2339 return b, err
2340 }
2341 b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
2342 if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
2343 return b, err
2344 }
2345 }
2346 return b, nerr.E
2347 }
2348}
2349
2350// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
2351// fi is the marshal info of the field.
2352// f is the pointer to the reflect data structure of the field.
2353func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
2354 // Oneof field is an interface. We need to get the actual data type on the fly.
2355 t := f.Type
2356 return func(ptr pointer, _ int) int {
2357 p := ptr.getInterfacePointer()
2358 if p.isNil() {
2359 return 0
2360 }
2361 v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
2362 telem := v.Type()
2363 e := fi.oneofElems[telem]
2364 return e.sizer(p, e.tagsize)
2365 },
2366 func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
2367 p := ptr.getInterfacePointer()
2368 if p.isNil() {
2369 return b, nil
2370 }
2371 v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
2372 telem := v.Type()
2373 if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
2374 return b, errOneofHasNil
2375 }
2376 e := fi.oneofElems[telem]
2377 return e.marshaler(b, p, e.wiretag, deterministic)
2378 }
2379}
2380
2381// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
2382func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
2383 m, mu := ext.extensionsRead()
2384 if m == nil {
2385 return 0
2386 }
2387 mu.Lock()
2388
2389 n := 0
2390 for _, e := range m {
2391 if e.value == nil || e.desc == nil {
2392 // Extension is only in its encoded form.
2393 n += len(e.enc)
2394 continue
2395 }
2396
2397 // We don't skip extensions that have an encoded form set,
2398 // because the extension value may have been mutated after
2399 // the last time this function was called.
2400 ei := u.getExtElemInfo(e.desc)
2401 v := e.value
2402 p := toAddrPointer(&v, ei.isptr)
2403 n += ei.sizer(p, ei.tagsize)
2404 }
2405 mu.Unlock()
2406 return n
2407}
2408
2409// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
2410func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
2411 m, mu := ext.extensionsRead()
2412 if m == nil {
2413 return b, nil
2414 }
2415 mu.Lock()
2416 defer mu.Unlock()
2417
2418 var err error
2419 var nerr nonFatal
2420
2421 // Fast-path for common cases: zero or one extensions.
2422 // Don't bother sorting the keys.
2423 if len(m) <= 1 {
2424 for _, e := range m {
2425 if e.value == nil || e.desc == nil {
2426 // Extension is only in its encoded form.
2427 b = append(b, e.enc...)
2428 continue
2429 }
2430
2431 // We don't skip extensions that have an encoded form set,
2432 // because the extension value may have been mutated after
2433 // the last time this function was called.
2434
2435 ei := u.getExtElemInfo(e.desc)
2436 v := e.value
2437 p := toAddrPointer(&v, ei.isptr)
2438 b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
2439 if !nerr.Merge(err) {
2440 return b, err
2441 }
2442 }
2443 return b, nerr.E
2444 }
2445
2446 // Sort the keys to provide a deterministic encoding.
2447 // Not sure this is required, but the old code does it.
2448 keys := make([]int, 0, len(m))
2449 for k := range m {
2450 keys = append(keys, int(k))
2451 }
2452 sort.Ints(keys)
2453
2454 for _, k := range keys {
2455 e := m[int32(k)]
2456 if e.value == nil || e.desc == nil {
2457 // Extension is only in its encoded form.
2458 b = append(b, e.enc...)
2459 continue
2460 }
2461
2462 // We don't skip extensions that have an encoded form set,
2463 // because the extension value may have been mutated after
2464 // the last time this function was called.
2465
2466 ei := u.getExtElemInfo(e.desc)
2467 v := e.value
2468 p := toAddrPointer(&v, ei.isptr)
2469 b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
2470 if !nerr.Merge(err) {
2471 return b, err
2472 }
2473 }
2474 return b, nerr.E
2475}
2476
2477// message set format is:
2478// message MessageSet {
2479// repeated group Item = 1 {
2480// required int32 type_id = 2;
2481// required string message = 3;
2482// };
2483// }
2484
2485// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
2486// in message set format (above).
2487func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
2488 m, mu := ext.extensionsRead()
2489 if m == nil {
2490 return 0
2491 }
2492 mu.Lock()
2493
2494 n := 0
2495 for id, e := range m {
2496 n += 2 // start group, end group. tag = 1 (size=1)
2497 n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
2498
2499 if e.value == nil || e.desc == nil {
2500 // Extension is only in its encoded form.
2501 msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
2502 siz := len(msgWithLen)
2503 n += siz + 1 // message, tag = 3 (size=1)
2504 continue
2505 }
2506
2507 // We don't skip extensions that have an encoded form set,
2508 // because the extension value may have been mutated after
2509 // the last time this function was called.
2510
2511 ei := u.getExtElemInfo(e.desc)
2512 v := e.value
2513 p := toAddrPointer(&v, ei.isptr)
2514 n += ei.sizer(p, 1) // message, tag = 3 (size=1)
2515 }
2516 mu.Unlock()
2517 return n
2518}
2519
2520// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
2521// to the end of byte slice b.
2522func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
2523 m, mu := ext.extensionsRead()
2524 if m == nil {
2525 return b, nil
2526 }
2527 mu.Lock()
2528 defer mu.Unlock()
2529
2530 var err error
2531 var nerr nonFatal
2532
2533 // Fast-path for common cases: zero or one extensions.
2534 // Don't bother sorting the keys.
2535 if len(m) <= 1 {
2536 for id, e := range m {
2537 b = append(b, 1<<3|WireStartGroup)
2538 b = append(b, 2<<3|WireVarint)
2539 b = appendVarint(b, uint64(id))
2540
2541 if e.value == nil || e.desc == nil {
2542 // Extension is only in its encoded form.
2543 msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
2544 b = append(b, 3<<3|WireBytes)
2545 b = append(b, msgWithLen...)
2546 b = append(b, 1<<3|WireEndGroup)
2547 continue
2548 }
2549
2550 // We don't skip extensions that have an encoded form set,
2551 // because the extension value may have been mutated after
2552 // the last time this function was called.
2553
2554 ei := u.getExtElemInfo(e.desc)
2555 v := e.value
2556 p := toAddrPointer(&v, ei.isptr)
2557 b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
2558 if !nerr.Merge(err) {
2559 return b, err
2560 }
2561 b = append(b, 1<<3|WireEndGroup)
2562 }
2563 return b, nerr.E
2564 }
2565
2566 // Sort the keys to provide a deterministic encoding.
2567 keys := make([]int, 0, len(m))
2568 for k := range m {
2569 keys = append(keys, int(k))
2570 }
2571 sort.Ints(keys)
2572
2573 for _, id := range keys {
2574 e := m[int32(id)]
2575 b = append(b, 1<<3|WireStartGroup)
2576 b = append(b, 2<<3|WireVarint)
2577 b = appendVarint(b, uint64(id))
2578
2579 if e.value == nil || e.desc == nil {
2580 // Extension is only in its encoded form.
2581 msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
2582 b = append(b, 3<<3|WireBytes)
2583 b = append(b, msgWithLen...)
2584 b = append(b, 1<<3|WireEndGroup)
2585 continue
2586 }
2587
2588 // We don't skip extensions that have an encoded form set,
2589 // because the extension value may have been mutated after
2590 // the last time this function was called.
2591
2592 ei := u.getExtElemInfo(e.desc)
2593 v := e.value
2594 p := toAddrPointer(&v, ei.isptr)
2595 b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
2596 b = append(b, 1<<3|WireEndGroup)
2597 if !nerr.Merge(err) {
2598 return b, err
2599 }
2600 }
2601 return b, nerr.E
2602}
2603
2604// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
2605func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
2606 if m == nil {
2607 return 0
2608 }
2609
2610 n := 0
2611 for _, e := range m {
2612 if e.value == nil || e.desc == nil {
2613 // Extension is only in its encoded form.
2614 n += len(e.enc)
2615 continue
2616 }
2617
2618 // We don't skip extensions that have an encoded form set,
2619 // because the extension value may have been mutated after
2620 // the last time this function was called.
2621
2622 ei := u.getExtElemInfo(e.desc)
2623 v := e.value
2624 p := toAddrPointer(&v, ei.isptr)
2625 n += ei.sizer(p, ei.tagsize)
2626 }
2627 return n
2628}
2629
2630// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
2631func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
2632 if m == nil {
2633 return b, nil
2634 }
2635
2636 // Sort the keys to provide a deterministic encoding.
2637 keys := make([]int, 0, len(m))
2638 for k := range m {
2639 keys = append(keys, int(k))
2640 }
2641 sort.Ints(keys)
2642
2643 var err error
2644 var nerr nonFatal
2645 for _, k := range keys {
2646 e := m[int32(k)]
2647 if e.value == nil || e.desc == nil {
2648 // Extension is only in its encoded form.
2649 b = append(b, e.enc...)
2650 continue
2651 }
2652
2653 // We don't skip extensions that have an encoded form set,
2654 // because the extension value may have been mutated after
2655 // the last time this function was called.
2656
2657 ei := u.getExtElemInfo(e.desc)
2658 v := e.value
2659 p := toAddrPointer(&v, ei.isptr)
2660 b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
2661 if !nerr.Merge(err) {
2662 return b, err
2663 }
2664 }
2665 return b, nerr.E
2666}
2667
2668// newMarshaler is the interface representing objects that can marshal themselves.
2669//
2670// This exists to support protoc-gen-go generated messages.
2671// The proto package will stop type-asserting to this interface in the future.
2672//
2673// DO NOT DEPEND ON THIS.
2674type newMarshaler interface {
2675 XXX_Size() int
2676 XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
2677}
2678
2679// Size returns the encoded size of a protocol buffer message.
2680// This is the main entry point.
2681func Size(pb Message) int {
2682 if m, ok := pb.(newMarshaler); ok {
2683 return m.XXX_Size()
2684 }
2685 if m, ok := pb.(Marshaler); ok {
2686 // If the message can marshal itself, let it do it, for compatibility.
2687 // NOTE: This is not efficient.
2688 b, _ := m.Marshal()
2689 return len(b)
2690 }
2691 // in case somehow we didn't generate the wrapper
2692 if pb == nil {
2693 return 0
2694 }
2695 var info InternalMessageInfo
2696 return info.Size(pb)
2697}
2698
2699// Marshal takes a protocol buffer message
2700// and encodes it into the wire format, returning the data.
2701// This is the main entry point.
2702func Marshal(pb Message) ([]byte, error) {
2703 if m, ok := pb.(newMarshaler); ok {
2704 siz := m.XXX_Size()
2705 b := make([]byte, 0, siz)
2706 return m.XXX_Marshal(b, false)
2707 }
2708 if m, ok := pb.(Marshaler); ok {
2709 // If the message can marshal itself, let it do it, for compatibility.
2710 // NOTE: This is not efficient.
2711 return m.Marshal()
2712 }
2713 // in case somehow we didn't generate the wrapper
2714 if pb == nil {
2715 return nil, ErrNil
2716 }
2717 var info InternalMessageInfo
2718 siz := info.Size(pb)
2719 b := make([]byte, 0, siz)
2720 return info.Marshal(b, pb, false)
2721}
2722
2723// Marshal takes a protocol buffer message
2724// and encodes it into the wire format, writing the result to the
2725// Buffer.
2726// This is an alternative entry point. It is not necessary to use
2727// a Buffer for most applications.
2728func (p *Buffer) Marshal(pb Message) error {
2729 var err error
2730 if m, ok := pb.(newMarshaler); ok {
2731 siz := m.XXX_Size()
2732 p.grow(siz) // make sure buf has enough capacity
2733 p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
2734 return err
2735 }
2736 if m, ok := pb.(Marshaler); ok {
2737 // If the message can marshal itself, let it do it, for compatibility.
2738 // NOTE: This is not efficient.
2739 b, err := m.Marshal()
2740 p.buf = append(p.buf, b...)
2741 return err
2742 }
2743 // in case somehow we didn't generate the wrapper
2744 if pb == nil {
2745 return ErrNil
2746 }
2747 var info InternalMessageInfo
2748 siz := info.Size(pb)
2749 p.grow(siz) // make sure buf has enough capacity
2750 p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
2751 return err
2752}
2753
2754// grow grows the buffer's capacity, if necessary, to guarantee space for
2755// another n bytes. After grow(n), at least n bytes can be written to the
2756// buffer without another allocation.
2757func (p *Buffer) grow(n int) {
2758 need := len(p.buf) + n
2759 if need <= cap(p.buf) {
2760 return
2761 }
2762 newCap := len(p.buf) * 2
2763 if newCap < need {
2764 newCap = need
2765 }
2766 p.buf = append(make([]byte, 0, newCap), p.buf...)
2767}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..5525def
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2016 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package proto
33
34import (
35 "fmt"
36 "reflect"
37 "strings"
38 "sync"
39 "sync/atomic"
40)
41
42// Merge merges the src message into dst.
43// This assumes that dst and src of the same type and are non-nil.
44func (a *InternalMessageInfo) Merge(dst, src Message) {
45 mi := atomicLoadMergeInfo(&a.merge)
46 if mi == nil {
47 mi = getMergeInfo(reflect.TypeOf(dst).Elem())
48 atomicStoreMergeInfo(&a.merge, mi)
49 }
50 mi.merge(toPointer(&dst), toPointer(&src))
51}
52
53type mergeInfo struct {
54 typ reflect.Type
55
56 initialized int32 // 0: only typ is valid, 1: everything is valid
57 lock sync.Mutex
58
59 fields []mergeFieldInfo
60 unrecognized field // Offset of XXX_unrecognized
61}
62
63type mergeFieldInfo struct {
64 field field // Offset of field, guaranteed to be valid
65
66 // isPointer reports whether the value in the field is a pointer.
67 // This is true for the following situations:
68 // * Pointer to struct
69 // * Pointer to basic type (proto2 only)
70 // * Slice (first value in slice header is a pointer)
71 // * String (first value in string header is a pointer)
72 isPointer bool
73
74 // basicWidth reports the width of the field assuming that it is directly
75 // embedded in the struct (as is the case for basic types in proto3).
76 // The possible values are:
77 // 0: invalid
78 // 1: bool
79 // 4: int32, uint32, float32
80 // 8: int64, uint64, float64
81 basicWidth int
82
83 // Where dst and src are pointers to the types being merged.
84 merge func(dst, src pointer)
85}
86
87var (
88 mergeInfoMap = map[reflect.Type]*mergeInfo{}
89 mergeInfoLock sync.Mutex
90)
91
92func getMergeInfo(t reflect.Type) *mergeInfo {
93 mergeInfoLock.Lock()
94 defer mergeInfoLock.Unlock()
95 mi := mergeInfoMap[t]
96 if mi == nil {
97 mi = &mergeInfo{typ: t}
98 mergeInfoMap[t] = mi
99 }
100 return mi
101}
102
103// merge merges src into dst assuming they are both of type *mi.typ.
104func (mi *mergeInfo) merge(dst, src pointer) {
105 if dst.isNil() {
106 panic("proto: nil destination")
107 }
108 if src.isNil() {
109 return // Nothing to do.
110 }
111
112 if atomic.LoadInt32(&mi.initialized) == 0 {
113 mi.computeMergeInfo()
114 }
115
116 for _, fi := range mi.fields {
117 sfp := src.offset(fi.field)
118
119 // As an optimization, we can avoid the merge function call cost
120 // if we know for sure that the source will have no effect
121 // by checking if it is the zero value.
122 if unsafeAllowed {
123 if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
124 continue
125 }
126 if fi.basicWidth > 0 {
127 switch {
128 case fi.basicWidth == 1 && !*sfp.toBool():
129 continue
130 case fi.basicWidth == 4 && *sfp.toUint32() == 0:
131 continue
132 case fi.basicWidth == 8 && *sfp.toUint64() == 0:
133 continue
134 }
135 }
136 }
137
138 dfp := dst.offset(fi.field)
139 fi.merge(dfp, sfp)
140 }
141
142 // TODO: Make this faster?
143 out := dst.asPointerTo(mi.typ).Elem()
144 in := src.asPointerTo(mi.typ).Elem()
145 if emIn, err := extendable(in.Addr().Interface()); err == nil {
146 emOut, _ := extendable(out.Addr().Interface())
147 mIn, muIn := emIn.extensionsRead()
148 if mIn != nil {
149 mOut := emOut.extensionsWrite()
150 muIn.Lock()
151 mergeExtension(mOut, mIn)
152 muIn.Unlock()
153 }
154 }
155
156 if mi.unrecognized.IsValid() {
157 if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
158 *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
159 }
160 }
161}
162
163func (mi *mergeInfo) computeMergeInfo() {
164 mi.lock.Lock()
165 defer mi.lock.Unlock()
166 if mi.initialized != 0 {
167 return
168 }
169 t := mi.typ
170 n := t.NumField()
171
172 props := GetProperties(t)
173 for i := 0; i < n; i++ {
174 f := t.Field(i)
175 if strings.HasPrefix(f.Name, "XXX_") {
176 continue
177 }
178
179 mfi := mergeFieldInfo{field: toField(&f)}
180 tf := f.Type
181
182 // As an optimization, we can avoid the merge function call cost
183 // if we know for sure that the source will have no effect
184 // by checking if it is the zero value.
185 if unsafeAllowed {
186 switch tf.Kind() {
187 case reflect.Ptr, reflect.Slice, reflect.String:
188 // As a special case, we assume slices and strings are pointers
189 // since we know that the first field in the SliceSlice or
190 // StringHeader is a data pointer.
191 mfi.isPointer = true
192 case reflect.Bool:
193 mfi.basicWidth = 1
194 case reflect.Int32, reflect.Uint32, reflect.Float32:
195 mfi.basicWidth = 4
196 case reflect.Int64, reflect.Uint64, reflect.Float64:
197 mfi.basicWidth = 8
198 }
199 }
200
201 // Unwrap tf to get at its most basic type.
202 var isPointer, isSlice bool
203 if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
204 isSlice = true
205 tf = tf.Elem()
206 }
207 if tf.Kind() == reflect.Ptr {
208 isPointer = true
209 tf = tf.Elem()
210 }
211 if isPointer && isSlice && tf.Kind() != reflect.Struct {
212 panic("both pointer and slice for basic type in " + tf.Name())
213 }
214
215 switch tf.Kind() {
216 case reflect.Int32:
217 switch {
218 case isSlice: // E.g., []int32
219 mfi.merge = func(dst, src pointer) {
220 // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
221 /*
222 sfsp := src.toInt32Slice()
223 if *sfsp != nil {
224 dfsp := dst.toInt32Slice()
225 *dfsp = append(*dfsp, *sfsp...)
226 if *dfsp == nil {
227 *dfsp = []int64{}
228 }
229 }
230 */
231 sfs := src.getInt32Slice()
232 if sfs != nil {
233 dfs := dst.getInt32Slice()
234 dfs = append(dfs, sfs...)
235 if dfs == nil {
236 dfs = []int32{}
237 }
238 dst.setInt32Slice(dfs)
239 }
240 }
241 case isPointer: // E.g., *int32
242 mfi.merge = func(dst, src pointer) {
243 // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
244 /*
245 sfpp := src.toInt32Ptr()
246 if *sfpp != nil {
247 dfpp := dst.toInt32Ptr()
248 if *dfpp == nil {
249 *dfpp = Int32(**sfpp)
250 } else {
251 **dfpp = **sfpp
252 }
253 }
254 */
255 sfp := src.getInt32Ptr()
256 if sfp != nil {
257 dfp := dst.getInt32Ptr()
258 if dfp == nil {
259 dst.setInt32Ptr(*sfp)
260 } else {
261 *dfp = *sfp
262 }
263 }
264 }
265 default: // E.g., int32
266 mfi.merge = func(dst, src pointer) {
267 if v := *src.toInt32(); v != 0 {
268 *dst.toInt32() = v
269 }
270 }
271 }
272 case reflect.Int64:
273 switch {
274 case isSlice: // E.g., []int64
275 mfi.merge = func(dst, src pointer) {
276 sfsp := src.toInt64Slice()
277 if *sfsp != nil {
278 dfsp := dst.toInt64Slice()
279 *dfsp = append(*dfsp, *sfsp...)
280 if *dfsp == nil {
281 *dfsp = []int64{}
282 }
283 }
284 }
285 case isPointer: // E.g., *int64
286 mfi.merge = func(dst, src pointer) {
287 sfpp := src.toInt64Ptr()
288 if *sfpp != nil {
289 dfpp := dst.toInt64Ptr()
290 if *dfpp == nil {
291 *dfpp = Int64(**sfpp)
292 } else {
293 **dfpp = **sfpp
294 }
295 }
296 }
297 default: // E.g., int64
298 mfi.merge = func(dst, src pointer) {
299 if v := *src.toInt64(); v != 0 {
300 *dst.toInt64() = v
301 }
302 }
303 }
304 case reflect.Uint32:
305 switch {
306 case isSlice: // E.g., []uint32
307 mfi.merge = func(dst, src pointer) {
308 sfsp := src.toUint32Slice()
309 if *sfsp != nil {
310 dfsp := dst.toUint32Slice()
311 *dfsp = append(*dfsp, *sfsp...)
312 if *dfsp == nil {
313 *dfsp = []uint32{}
314 }
315 }
316 }
317 case isPointer: // E.g., *uint32
318 mfi.merge = func(dst, src pointer) {
319 sfpp := src.toUint32Ptr()
320 if *sfpp != nil {
321 dfpp := dst.toUint32Ptr()
322 if *dfpp == nil {
323 *dfpp = Uint32(**sfpp)
324 } else {
325 **dfpp = **sfpp
326 }
327 }
328 }
329 default: // E.g., uint32
330 mfi.merge = func(dst, src pointer) {
331 if v := *src.toUint32(); v != 0 {
332 *dst.toUint32() = v
333 }
334 }
335 }
336 case reflect.Uint64:
337 switch {
338 case isSlice: // E.g., []uint64
339 mfi.merge = func(dst, src pointer) {
340 sfsp := src.toUint64Slice()
341 if *sfsp != nil {
342 dfsp := dst.toUint64Slice()
343 *dfsp = append(*dfsp, *sfsp...)
344 if *dfsp == nil {
345 *dfsp = []uint64{}
346 }
347 }
348 }
349 case isPointer: // E.g., *uint64
350 mfi.merge = func(dst, src pointer) {
351 sfpp := src.toUint64Ptr()
352 if *sfpp != nil {
353 dfpp := dst.toUint64Ptr()
354 if *dfpp == nil {
355 *dfpp = Uint64(**sfpp)
356 } else {
357 **dfpp = **sfpp
358 }
359 }
360 }
361 default: // E.g., uint64
362 mfi.merge = func(dst, src pointer) {
363 if v := *src.toUint64(); v != 0 {
364 *dst.toUint64() = v
365 }
366 }
367 }
368 case reflect.Float32:
369 switch {
370 case isSlice: // E.g., []float32
371 mfi.merge = func(dst, src pointer) {
372 sfsp := src.toFloat32Slice()
373 if *sfsp != nil {
374 dfsp := dst.toFloat32Slice()
375 *dfsp = append(*dfsp, *sfsp...)
376 if *dfsp == nil {
377 *dfsp = []float32{}
378 }
379 }
380 }
381 case isPointer: // E.g., *float32
382 mfi.merge = func(dst, src pointer) {
383 sfpp := src.toFloat32Ptr()
384 if *sfpp != nil {
385 dfpp := dst.toFloat32Ptr()
386 if *dfpp == nil {
387 *dfpp = Float32(**sfpp)
388 } else {
389 **dfpp = **sfpp
390 }
391 }
392 }
393 default: // E.g., float32
394 mfi.merge = func(dst, src pointer) {
395 if v := *src.toFloat32(); v != 0 {
396 *dst.toFloat32() = v
397 }
398 }
399 }
400 case reflect.Float64:
401 switch {
402 case isSlice: // E.g., []float64
403 mfi.merge = func(dst, src pointer) {
404 sfsp := src.toFloat64Slice()
405 if *sfsp != nil {
406 dfsp := dst.toFloat64Slice()
407 *dfsp = append(*dfsp, *sfsp...)
408 if *dfsp == nil {
409 *dfsp = []float64{}
410 }
411 }
412 }
413 case isPointer: // E.g., *float64
414 mfi.merge = func(dst, src pointer) {
415 sfpp := src.toFloat64Ptr()
416 if *sfpp != nil {
417 dfpp := dst.toFloat64Ptr()
418 if *dfpp == nil {
419 *dfpp = Float64(**sfpp)
420 } else {
421 **dfpp = **sfpp
422 }
423 }
424 }
425 default: // E.g., float64
426 mfi.merge = func(dst, src pointer) {
427 if v := *src.toFloat64(); v != 0 {
428 *dst.toFloat64() = v
429 }
430 }
431 }
432 case reflect.Bool:
433 switch {
434 case isSlice: // E.g., []bool
435 mfi.merge = func(dst, src pointer) {
436 sfsp := src.toBoolSlice()
437 if *sfsp != nil {
438 dfsp := dst.toBoolSlice()
439 *dfsp = append(*dfsp, *sfsp...)
440 if *dfsp == nil {
441 *dfsp = []bool{}
442 }
443 }
444 }
445 case isPointer: // E.g., *bool
446 mfi.merge = func(dst, src pointer) {
447 sfpp := src.toBoolPtr()
448 if *sfpp != nil {
449 dfpp := dst.toBoolPtr()
450 if *dfpp == nil {
451 *dfpp = Bool(**sfpp)
452 } else {
453 **dfpp = **sfpp
454 }
455 }
456 }
457 default: // E.g., bool
458 mfi.merge = func(dst, src pointer) {
459 if v := *src.toBool(); v {
460 *dst.toBool() = v
461 }
462 }
463 }
464 case reflect.String:
465 switch {
466 case isSlice: // E.g., []string
467 mfi.merge = func(dst, src pointer) {
468 sfsp := src.toStringSlice()
469 if *sfsp != nil {
470 dfsp := dst.toStringSlice()
471 *dfsp = append(*dfsp, *sfsp...)
472 if *dfsp == nil {
473 *dfsp = []string{}
474 }
475 }
476 }
477 case isPointer: // E.g., *string
478 mfi.merge = func(dst, src pointer) {
479 sfpp := src.toStringPtr()
480 if *sfpp != nil {
481 dfpp := dst.toStringPtr()
482 if *dfpp == nil {
483 *dfpp = String(**sfpp)
484 } else {
485 **dfpp = **sfpp
486 }
487 }
488 }
489 default: // E.g., string
490 mfi.merge = func(dst, src pointer) {
491 if v := *src.toString(); v != "" {
492 *dst.toString() = v
493 }
494 }
495 }
496 case reflect.Slice:
497 isProto3 := props.Prop[i].proto3
498 switch {
499 case isPointer:
500 panic("bad pointer in byte slice case in " + tf.Name())
501 case tf.Elem().Kind() != reflect.Uint8:
502 panic("bad element kind in byte slice case in " + tf.Name())
503 case isSlice: // E.g., [][]byte
504 mfi.merge = func(dst, src pointer) {
505 sbsp := src.toBytesSlice()
506 if *sbsp != nil {
507 dbsp := dst.toBytesSlice()
508 for _, sb := range *sbsp {
509 if sb == nil {
510 *dbsp = append(*dbsp, nil)
511 } else {
512 *dbsp = append(*dbsp, append([]byte{}, sb...))
513 }
514 }
515 if *dbsp == nil {
516 *dbsp = [][]byte{}
517 }
518 }
519 }
520 default: // E.g., []byte
521 mfi.merge = func(dst, src pointer) {
522 sbp := src.toBytes()
523 if *sbp != nil {
524 dbp := dst.toBytes()
525 if !isProto3 || len(*sbp) > 0 {
526 *dbp = append([]byte{}, *sbp...)
527 }
528 }
529 }
530 }
531 case reflect.Struct:
532 switch {
533 case !isPointer:
534 panic(fmt.Sprintf("message field %s without pointer", tf))
535 case isSlice: // E.g., []*pb.T
536 mi := getMergeInfo(tf)
537 mfi.merge = func(dst, src pointer) {
538 sps := src.getPointerSlice()
539 if sps != nil {
540 dps := dst.getPointerSlice()
541 for _, sp := range sps {
542 var dp pointer
543 if !sp.isNil() {
544 dp = valToPointer(reflect.New(tf))
545 mi.merge(dp, sp)
546 }
547 dps = append(dps, dp)
548 }
549 if dps == nil {
550 dps = []pointer{}
551 }
552 dst.setPointerSlice(dps)
553 }
554 }
555 default: // E.g., *pb.T
556 mi := getMergeInfo(tf)
557 mfi.merge = func(dst, src pointer) {
558 sp := src.getPointer()
559 if !sp.isNil() {
560 dp := dst.getPointer()
561 if dp.isNil() {
562 dp = valToPointer(reflect.New(tf))
563 dst.setPointer(dp)
564 }
565 mi.merge(dp, sp)
566 }
567 }
568 }
569 case reflect.Map:
570 switch {
571 case isPointer || isSlice:
572 panic("bad pointer or slice in map case in " + tf.Name())
573 default: // E.g., map[K]V
574 mfi.merge = func(dst, src pointer) {
575 sm := src.asPointerTo(tf).Elem()
576 if sm.Len() == 0 {
577 return
578 }
579 dm := dst.asPointerTo(tf).Elem()
580 if dm.IsNil() {
581 dm.Set(reflect.MakeMap(tf))
582 }
583
584 switch tf.Elem().Kind() {
585 case reflect.Ptr: // Proto struct (e.g., *T)
586 for _, key := range sm.MapKeys() {
587 val := sm.MapIndex(key)
588 val = reflect.ValueOf(Clone(val.Interface().(Message)))
589 dm.SetMapIndex(key, val)
590 }
591 case reflect.Slice: // E.g. Bytes type (e.g., []byte)
592 for _, key := range sm.MapKeys() {
593 val := sm.MapIndex(key)
594 val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
595 dm.SetMapIndex(key, val)
596 }
597 default: // Basic type (e.g., string)
598 for _, key := range sm.MapKeys() {
599 val := sm.MapIndex(key)
600 dm.SetMapIndex(key, val)
601 }
602 }
603 }
604 }
605 case reflect.Interface:
606 // Must be oneof field.
607 switch {
608 case isPointer || isSlice:
609 panic("bad pointer or slice in interface case in " + tf.Name())
610 default: // E.g., interface{}
611 // TODO: Make this faster?
612 mfi.merge = func(dst, src pointer) {
613 su := src.asPointerTo(tf).Elem()
614 if !su.IsNil() {
615 du := dst.asPointerTo(tf).Elem()
616 typ := su.Elem().Type()
617 if du.IsNil() || du.Elem().Type() != typ {
618 du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
619 }
620 sv := su.Elem().Elem().Field(0)
621 if sv.Kind() == reflect.Ptr && sv.IsNil() {
622 return
623 }
624 dv := du.Elem().Elem().Field(0)
625 if dv.Kind() == reflect.Ptr && dv.IsNil() {
626 dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
627 }
628 switch sv.Type().Kind() {
629 case reflect.Ptr: // Proto struct (e.g., *T)
630 Merge(dv.Interface().(Message), sv.Interface().(Message))
631 case reflect.Slice: // E.g. Bytes type (e.g., []byte)
632 dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
633 default: // Basic type (e.g., string)
634 dv.Set(sv)
635 }
636 }
637 }
638 }
639 default:
640 panic(fmt.Sprintf("merger not found for type:%s", tf))
641 }
642 mi.fields = append(mi.fields, mfi)
643 }
644
645 mi.unrecognized = invalidField
646 if f, ok := t.FieldByName("XXX_unrecognized"); ok {
647 if f.Type != reflect.TypeOf([]byte{}) {
648 panic("expected XXX_unrecognized to be of type []byte")
649 }
650 mi.unrecognized = toField(&f)
651 }
652
653 atomic.StoreInt32(&mi.initialized, 1)
654}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..ebf1caa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2051 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2016 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package proto
33
34import (
35 "errors"
36 "fmt"
37 "io"
38 "math"
39 "reflect"
40 "strconv"
41 "strings"
42 "sync"
43 "sync/atomic"
44 "unicode/utf8"
45)
46
47// Unmarshal is the entry point from the generated .pb.go files.
48// This function is not intended to be used by non-generated code.
49// This function is not subject to any compatibility guarantee.
50// msg contains a pointer to a protocol buffer struct.
51// b is the data to be unmarshaled into the protocol buffer.
52// a is a pointer to a place to store cached unmarshal information.
53func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
54 // Load the unmarshal information for this message type.
55 // The atomic load ensures memory consistency.
56 u := atomicLoadUnmarshalInfo(&a.unmarshal)
57 if u == nil {
58 // Slow path: find unmarshal info for msg, update a with it.
59 u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
60 atomicStoreUnmarshalInfo(&a.unmarshal, u)
61 }
62 // Then do the unmarshaling.
63 err := u.unmarshal(toPointer(&msg), b)
64 return err
65}
66
67type unmarshalInfo struct {
68 typ reflect.Type // type of the protobuf struct
69
70 // 0 = only typ field is initialized
71 // 1 = completely initialized
72 initialized int32
73 lock sync.Mutex // prevents double initialization
74 dense []unmarshalFieldInfo // fields indexed by tag #
75 sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
76 reqFields []string // names of required fields
77 reqMask uint64 // 1<<len(reqFields)-1
78 unrecognized field // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
79 extensions field // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
80 oldExtensions field // offset of old-form extensions field (of type map[int]Extension)
81 extensionRanges []ExtensionRange // if non-nil, implies extensions field is valid
82 isMessageSet bool // if true, implies extensions field is valid
83}
84
85// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
86// It decodes the field, stores it at f, and returns the unused bytes.
87// w is the wire encoding.
88// b is the data after the tag and wire encoding have been read.
89type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
90
91type unmarshalFieldInfo struct {
92 // location of the field in the proto message structure.
93 field field
94
95 // function to unmarshal the data for the field.
96 unmarshal unmarshaler
97
98 // if a required field, contains a single set bit at this field's index in the required field list.
99 reqMask uint64
100
101 name string // name of the field, for error reporting
102}
103
104var (
105 unmarshalInfoMap = map[reflect.Type]*unmarshalInfo{}
106 unmarshalInfoLock sync.Mutex
107)
108
109// getUnmarshalInfo returns the data structure which can be
110// subsequently used to unmarshal a message of the given type.
111// t is the type of the message (note: not pointer to message).
112func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
113 // It would be correct to return a new unmarshalInfo
114 // unconditionally. We would end up allocating one
115 // per occurrence of that type as a message or submessage.
116 // We use a cache here just to reduce memory usage.
117 unmarshalInfoLock.Lock()
118 defer unmarshalInfoLock.Unlock()
119 u := unmarshalInfoMap[t]
120 if u == nil {
121 u = &unmarshalInfo{typ: t}
122 // Note: we just set the type here. The rest of the fields
123 // will be initialized on first use.
124 unmarshalInfoMap[t] = u
125 }
126 return u
127}
128
129// unmarshal does the main work of unmarshaling a message.
130// u provides type information used to unmarshal the message.
131// m is a pointer to a protocol buffer message.
132// b is a byte stream to unmarshal into m.
133// This is top routine used when recursively unmarshaling submessages.
134func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
135 if atomic.LoadInt32(&u.initialized) == 0 {
136 u.computeUnmarshalInfo()
137 }
138 if u.isMessageSet {
139 return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
140 }
141 var reqMask uint64 // bitmask of required fields we've seen.
142 var errLater error
143 for len(b) > 0 {
144 // Read tag and wire type.
145 // Special case 1 and 2 byte varints.
146 var x uint64
147 if b[0] < 128 {
148 x = uint64(b[0])
149 b = b[1:]
150 } else if len(b) >= 2 && b[1] < 128 {
151 x = uint64(b[0]&0x7f) + uint64(b[1])<<7
152 b = b[2:]
153 } else {
154 var n int
155 x, n = decodeVarint(b)
156 if n == 0 {
157 return io.ErrUnexpectedEOF
158 }
159 b = b[n:]
160 }
161 tag := x >> 3
162 wire := int(x) & 7
163
164 // Dispatch on the tag to one of the unmarshal* functions below.
165 var f unmarshalFieldInfo
166 if tag < uint64(len(u.dense)) {
167 f = u.dense[tag]
168 } else {
169 f = u.sparse[tag]
170 }
171 if fn := f.unmarshal; fn != nil {
172 var err error
173 b, err = fn(b, m.offset(f.field), wire)
174 if err == nil {
175 reqMask |= f.reqMask
176 continue
177 }
178 if r, ok := err.(*RequiredNotSetError); ok {
179 // Remember this error, but keep parsing. We need to produce
180 // a full parse even if a required field is missing.
181 if errLater == nil {
182 errLater = r
183 }
184 reqMask |= f.reqMask
185 continue
186 }
187 if err != errInternalBadWireType {
188 if err == errInvalidUTF8 {
189 if errLater == nil {
190 fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
191 errLater = &invalidUTF8Error{fullName}
192 }
193 continue
194 }
195 return err
196 }
197 // Fragments with bad wire type are treated as unknown fields.
198 }
199
200 // Unknown tag.
201 if !u.unrecognized.IsValid() {
202 // Don't keep unrecognized data; just skip it.
203 var err error
204 b, err = skipField(b, wire)
205 if err != nil {
206 return err
207 }
208 continue
209 }
210 // Keep unrecognized data around.
211 // maybe in extensions, maybe in the unrecognized field.
212 z := m.offset(u.unrecognized).toBytes()
213 var emap map[int32]Extension
214 var e Extension
215 for _, r := range u.extensionRanges {
216 if uint64(r.Start) <= tag && tag <= uint64(r.End) {
217 if u.extensions.IsValid() {
218 mp := m.offset(u.extensions).toExtensions()
219 emap = mp.extensionsWrite()
220 e = emap[int32(tag)]
221 z = &e.enc
222 break
223 }
224 if u.oldExtensions.IsValid() {
225 p := m.offset(u.oldExtensions).toOldExtensions()
226 emap = *p
227 if emap == nil {
228 emap = map[int32]Extension{}
229 *p = emap
230 }
231 e = emap[int32(tag)]
232 z = &e.enc
233 break
234 }
235 panic("no extensions field available")
236 }
237 }
238
239 // Use wire type to skip data.
240 var err error
241 b0 := b
242 b, err = skipField(b, wire)
243 if err != nil {
244 return err
245 }
246 *z = encodeVarint(*z, tag<<3|uint64(wire))
247 *z = append(*z, b0[:len(b0)-len(b)]...)
248
249 if emap != nil {
250 emap[int32(tag)] = e
251 }
252 }
253 if reqMask != u.reqMask && errLater == nil {
254 // A required field of this message is missing.
255 for _, n := range u.reqFields {
256 if reqMask&1 == 0 {
257 errLater = &RequiredNotSetError{n}
258 }
259 reqMask >>= 1
260 }
261 }
262 return errLater
263}
264
265// computeUnmarshalInfo fills in u with information for use
266// in unmarshaling protocol buffers of type u.typ.
267func (u *unmarshalInfo) computeUnmarshalInfo() {
268 u.lock.Lock()
269 defer u.lock.Unlock()
270 if u.initialized != 0 {
271 return
272 }
273 t := u.typ
274 n := t.NumField()
275
276 // Set up the "not found" value for the unrecognized byte buffer.
277 // This is the default for proto3.
278 u.unrecognized = invalidField
279 u.extensions = invalidField
280 u.oldExtensions = invalidField
281
282 // List of the generated type and offset for each oneof field.
283 type oneofField struct {
284 ityp reflect.Type // interface type of oneof field
285 field field // offset in containing message
286 }
287 var oneofFields []oneofField
288
289 for i := 0; i < n; i++ {
290 f := t.Field(i)
291 if f.Name == "XXX_unrecognized" {
292 // The byte slice used to hold unrecognized input is special.
293 if f.Type != reflect.TypeOf(([]byte)(nil)) {
294 panic("bad type for XXX_unrecognized field: " + f.Type.Name())
295 }
296 u.unrecognized = toField(&f)
297 continue
298 }
299 if f.Name == "XXX_InternalExtensions" {
300 // Ditto here.
301 if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
302 panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
303 }
304 u.extensions = toField(&f)
305 if f.Tag.Get("protobuf_messageset") == "1" {
306 u.isMessageSet = true
307 }
308 continue
309 }
310 if f.Name == "XXX_extensions" {
311 // An older form of the extensions field.
312 if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
313 panic("bad type for XXX_extensions field: " + f.Type.Name())
314 }
315 u.oldExtensions = toField(&f)
316 continue
317 }
318 if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
319 continue
320 }
321
322 oneof := f.Tag.Get("protobuf_oneof")
323 if oneof != "" {
324 oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
325 // The rest of oneof processing happens below.
326 continue
327 }
328
329 tags := f.Tag.Get("protobuf")
330 tagArray := strings.Split(tags, ",")
331 if len(tagArray) < 2 {
332 panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
333 }
334 tag, err := strconv.Atoi(tagArray[1])
335 if err != nil {
336 panic("protobuf tag field not an integer: " + tagArray[1])
337 }
338
339 name := ""
340 for _, tag := range tagArray[3:] {
341 if strings.HasPrefix(tag, "name=") {
342 name = tag[5:]
343 }
344 }
345
346 // Extract unmarshaling function from the field (its type and tags).
347 unmarshal := fieldUnmarshaler(&f)
348
349 // Required field?
350 var reqMask uint64
351 if tagArray[2] == "req" {
352 bit := len(u.reqFields)
353 u.reqFields = append(u.reqFields, name)
354 reqMask = uint64(1) << uint(bit)
355 // TODO: if we have more than 64 required fields, we end up
356 // not verifying that all required fields are present.
357 // Fix this, perhaps using a count of required fields?
358 }
359
360 // Store the info in the correct slot in the message.
361 u.setTag(tag, toField(&f), unmarshal, reqMask, name)
362 }
363
364 // Find any types associated with oneof fields.
365 // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
366 fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
367 if fn.IsValid() {
368 res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
369 for i := res.Len() - 1; i >= 0; i-- {
370 v := res.Index(i) // interface{}
371 tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
372 typ := tptr.Elem() // Msg_X
373
374 f := typ.Field(0) // oneof implementers have one field
375 baseUnmarshal := fieldUnmarshaler(&f)
376 tags := strings.Split(f.Tag.Get("protobuf"), ",")
377 fieldNum, err := strconv.Atoi(tags[1])
378 if err != nil {
379 panic("protobuf tag field not an integer: " + tags[1])
380 }
381 var name string
382 for _, tag := range tags {
383 if strings.HasPrefix(tag, "name=") {
384 name = strings.TrimPrefix(tag, "name=")
385 break
386 }
387 }
388
389 // Find the oneof field that this struct implements.
390 // Might take O(n^2) to process all of the oneofs, but who cares.
391 for _, of := range oneofFields {
392 if tptr.Implements(of.ityp) {
393 // We have found the corresponding interface for this struct.
394 // That lets us know where this struct should be stored
395 // when we encounter it during unmarshaling.
396 unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
397 u.setTag(fieldNum, of.field, unmarshal, 0, name)
398 }
399 }
400 }
401 }
402
403 // Get extension ranges, if any.
404 fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
405 if fn.IsValid() {
406 if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
407 panic("a message with extensions, but no extensions field in " + t.Name())
408 }
409 u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
410 }
411
412 // Explicitly disallow tag 0. This will ensure we flag an error
413 // when decoding a buffer of all zeros. Without this code, we
414 // would decode and skip an all-zero buffer of even length.
415 // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
416 u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
417 return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
418 }, 0, "")
419
420 // Set mask for required field check.
421 u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
422
423 atomic.StoreInt32(&u.initialized, 1)
424}
425
426// setTag stores the unmarshal information for the given tag.
427// tag = tag # for field
428// field/unmarshal = unmarshal info for that field.
429// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
430// name = short name of the field.
431func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
432 i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
433 n := u.typ.NumField()
434 if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
435 for len(u.dense) <= tag {
436 u.dense = append(u.dense, unmarshalFieldInfo{})
437 }
438 u.dense[tag] = i
439 return
440 }
441 if u.sparse == nil {
442 u.sparse = map[uint64]unmarshalFieldInfo{}
443 }
444 u.sparse[uint64(tag)] = i
445}
446
447// fieldUnmarshaler returns an unmarshaler for the given field.
448func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
449 if f.Type.Kind() == reflect.Map {
450 return makeUnmarshalMap(f)
451 }
452 return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
453}
454
455// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
456func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
457 tagArray := strings.Split(tags, ",")
458 encoding := tagArray[0]
459 name := "unknown"
460 proto3 := false
461 validateUTF8 := true
462 for _, tag := range tagArray[3:] {
463 if strings.HasPrefix(tag, "name=") {
464 name = tag[5:]
465 }
466 if tag == "proto3" {
467 proto3 = true
468 }
469 }
470 validateUTF8 = validateUTF8 && proto3
471
472 // Figure out packaging (pointer, slice, or both)
473 slice := false
474 pointer := false
475 if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
476 slice = true
477 t = t.Elem()
478 }
479 if t.Kind() == reflect.Ptr {
480 pointer = true
481 t = t.Elem()
482 }
483
484 // We'll never have both pointer and slice for basic types.
485 if pointer && slice && t.Kind() != reflect.Struct {
486 panic("both pointer and slice for basic type in " + t.Name())
487 }
488
489 switch t.Kind() {
490 case reflect.Bool:
491 if pointer {
492 return unmarshalBoolPtr
493 }
494 if slice {
495 return unmarshalBoolSlice
496 }
497 return unmarshalBoolValue
498 case reflect.Int32:
499 switch encoding {
500 case "fixed32":
501 if pointer {
502 return unmarshalFixedS32Ptr
503 }
504 if slice {
505 return unmarshalFixedS32Slice
506 }
507 return unmarshalFixedS32Value
508 case "varint":
509 // this could be int32 or enum
510 if pointer {
511 return unmarshalInt32Ptr
512 }
513 if slice {
514 return unmarshalInt32Slice
515 }
516 return unmarshalInt32Value
517 case "zigzag32":
518 if pointer {
519 return unmarshalSint32Ptr
520 }
521 if slice {
522 return unmarshalSint32Slice
523 }
524 return unmarshalSint32Value
525 }
526 case reflect.Int64:
527 switch encoding {
528 case "fixed64":
529 if pointer {
530 return unmarshalFixedS64Ptr
531 }
532 if slice {
533 return unmarshalFixedS64Slice
534 }
535 return unmarshalFixedS64Value
536 case "varint":
537 if pointer {
538 return unmarshalInt64Ptr
539 }
540 if slice {
541 return unmarshalInt64Slice
542 }
543 return unmarshalInt64Value
544 case "zigzag64":
545 if pointer {
546 return unmarshalSint64Ptr
547 }
548 if slice {
549 return unmarshalSint64Slice
550 }
551 return unmarshalSint64Value
552 }
553 case reflect.Uint32:
554 switch encoding {
555 case "fixed32":
556 if pointer {
557 return unmarshalFixed32Ptr
558 }
559 if slice {
560 return unmarshalFixed32Slice
561 }
562 return unmarshalFixed32Value
563 case "varint":
564 if pointer {
565 return unmarshalUint32Ptr
566 }
567 if slice {
568 return unmarshalUint32Slice
569 }
570 return unmarshalUint32Value
571 }
572 case reflect.Uint64:
573 switch encoding {
574 case "fixed64":
575 if pointer {
576 return unmarshalFixed64Ptr
577 }
578 if slice {
579 return unmarshalFixed64Slice
580 }
581 return unmarshalFixed64Value
582 case "varint":
583 if pointer {
584 return unmarshalUint64Ptr
585 }
586 if slice {
587 return unmarshalUint64Slice
588 }
589 return unmarshalUint64Value
590 }
591 case reflect.Float32:
592 if pointer {
593 return unmarshalFloat32Ptr
594 }
595 if slice {
596 return unmarshalFloat32Slice
597 }
598 return unmarshalFloat32Value
599 case reflect.Float64:
600 if pointer {
601 return unmarshalFloat64Ptr
602 }
603 if slice {
604 return unmarshalFloat64Slice
605 }
606 return unmarshalFloat64Value
607 case reflect.Map:
608 panic("map type in typeUnmarshaler in " + t.Name())
609 case reflect.Slice:
610 if pointer {
611 panic("bad pointer in slice case in " + t.Name())
612 }
613 if slice {
614 return unmarshalBytesSlice
615 }
616 return unmarshalBytesValue
617 case reflect.String:
618 if validateUTF8 {
619 if pointer {
620 return unmarshalUTF8StringPtr
621 }
622 if slice {
623 return unmarshalUTF8StringSlice
624 }
625 return unmarshalUTF8StringValue
626 }
627 if pointer {
628 return unmarshalStringPtr
629 }
630 if slice {
631 return unmarshalStringSlice
632 }
633 return unmarshalStringValue
634 case reflect.Struct:
635 // message or group field
636 if !pointer {
637 panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
638 }
639 switch encoding {
640 case "bytes":
641 if slice {
642 return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
643 }
644 return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
645 case "group":
646 if slice {
647 return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
648 }
649 return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
650 }
651 }
652 panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
653}
654
655// Below are all the unmarshalers for individual fields of various types.
656
657func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
658 if w != WireVarint {
659 return b, errInternalBadWireType
660 }
661 x, n := decodeVarint(b)
662 if n == 0 {
663 return nil, io.ErrUnexpectedEOF
664 }
665 b = b[n:]
666 v := int64(x)
667 *f.toInt64() = v
668 return b, nil
669}
670
671func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
672 if w != WireVarint {
673 return b, errInternalBadWireType
674 }
675 x, n := decodeVarint(b)
676 if n == 0 {
677 return nil, io.ErrUnexpectedEOF
678 }
679 b = b[n:]
680 v := int64(x)
681 *f.toInt64Ptr() = &v
682 return b, nil
683}
684
685func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
686 if w == WireBytes { // packed
687 x, n := decodeVarint(b)
688 if n == 0 {
689 return nil, io.ErrUnexpectedEOF
690 }
691 b = b[n:]
692 if x > uint64(len(b)) {
693 return nil, io.ErrUnexpectedEOF
694 }
695 res := b[x:]
696 b = b[:x]
697 for len(b) > 0 {
698 x, n = decodeVarint(b)
699 if n == 0 {
700 return nil, io.ErrUnexpectedEOF
701 }
702 b = b[n:]
703 v := int64(x)
704 s := f.toInt64Slice()
705 *s = append(*s, v)
706 }
707 return res, nil
708 }
709 if w != WireVarint {
710 return b, errInternalBadWireType
711 }
712 x, n := decodeVarint(b)
713 if n == 0 {
714 return nil, io.ErrUnexpectedEOF
715 }
716 b = b[n:]
717 v := int64(x)
718 s := f.toInt64Slice()
719 *s = append(*s, v)
720 return b, nil
721}
722
723func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
724 if w != WireVarint {
725 return b, errInternalBadWireType
726 }
727 x, n := decodeVarint(b)
728 if n == 0 {
729 return nil, io.ErrUnexpectedEOF
730 }
731 b = b[n:]
732 v := int64(x>>1) ^ int64(x)<<63>>63
733 *f.toInt64() = v
734 return b, nil
735}
736
737func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
738 if w != WireVarint {
739 return b, errInternalBadWireType
740 }
741 x, n := decodeVarint(b)
742 if n == 0 {
743 return nil, io.ErrUnexpectedEOF
744 }
745 b = b[n:]
746 v := int64(x>>1) ^ int64(x)<<63>>63
747 *f.toInt64Ptr() = &v
748 return b, nil
749}
750
751func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
752 if w == WireBytes { // packed
753 x, n := decodeVarint(b)
754 if n == 0 {
755 return nil, io.ErrUnexpectedEOF
756 }
757 b = b[n:]
758 if x > uint64(len(b)) {
759 return nil, io.ErrUnexpectedEOF
760 }
761 res := b[x:]
762 b = b[:x]
763 for len(b) > 0 {
764 x, n = decodeVarint(b)
765 if n == 0 {
766 return nil, io.ErrUnexpectedEOF
767 }
768 b = b[n:]
769 v := int64(x>>1) ^ int64(x)<<63>>63
770 s := f.toInt64Slice()
771 *s = append(*s, v)
772 }
773 return res, nil
774 }
775 if w != WireVarint {
776 return b, errInternalBadWireType
777 }
778 x, n := decodeVarint(b)
779 if n == 0 {
780 return nil, io.ErrUnexpectedEOF
781 }
782 b = b[n:]
783 v := int64(x>>1) ^ int64(x)<<63>>63
784 s := f.toInt64Slice()
785 *s = append(*s, v)
786 return b, nil
787}
788
789func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
790 if w != WireVarint {
791 return b, errInternalBadWireType
792 }
793 x, n := decodeVarint(b)
794 if n == 0 {
795 return nil, io.ErrUnexpectedEOF
796 }
797 b = b[n:]
798 v := uint64(x)
799 *f.toUint64() = v
800 return b, nil
801}
802
803func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
804 if w != WireVarint {
805 return b, errInternalBadWireType
806 }
807 x, n := decodeVarint(b)
808 if n == 0 {
809 return nil, io.ErrUnexpectedEOF
810 }
811 b = b[n:]
812 v := uint64(x)
813 *f.toUint64Ptr() = &v
814 return b, nil
815}
816
817func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
818 if w == WireBytes { // packed
819 x, n := decodeVarint(b)
820 if n == 0 {
821 return nil, io.ErrUnexpectedEOF
822 }
823 b = b[n:]
824 if x > uint64(len(b)) {
825 return nil, io.ErrUnexpectedEOF
826 }
827 res := b[x:]
828 b = b[:x]
829 for len(b) > 0 {
830 x, n = decodeVarint(b)
831 if n == 0 {
832 return nil, io.ErrUnexpectedEOF
833 }
834 b = b[n:]
835 v := uint64(x)
836 s := f.toUint64Slice()
837 *s = append(*s, v)
838 }
839 return res, nil
840 }
841 if w != WireVarint {
842 return b, errInternalBadWireType
843 }
844 x, n := decodeVarint(b)
845 if n == 0 {
846 return nil, io.ErrUnexpectedEOF
847 }
848 b = b[n:]
849 v := uint64(x)
850 s := f.toUint64Slice()
851 *s = append(*s, v)
852 return b, nil
853}
854
855func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
856 if w != WireVarint {
857 return b, errInternalBadWireType
858 }
859 x, n := decodeVarint(b)
860 if n == 0 {
861 return nil, io.ErrUnexpectedEOF
862 }
863 b = b[n:]
864 v := int32(x)
865 *f.toInt32() = v
866 return b, nil
867}
868
869func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
870 if w != WireVarint {
871 return b, errInternalBadWireType
872 }
873 x, n := decodeVarint(b)
874 if n == 0 {
875 return nil, io.ErrUnexpectedEOF
876 }
877 b = b[n:]
878 v := int32(x)
879 f.setInt32Ptr(v)
880 return b, nil
881}
882
883func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
884 if w == WireBytes { // packed
885 x, n := decodeVarint(b)
886 if n == 0 {
887 return nil, io.ErrUnexpectedEOF
888 }
889 b = b[n:]
890 if x > uint64(len(b)) {
891 return nil, io.ErrUnexpectedEOF
892 }
893 res := b[x:]
894 b = b[:x]
895 for len(b) > 0 {
896 x, n = decodeVarint(b)
897 if n == 0 {
898 return nil, io.ErrUnexpectedEOF
899 }
900 b = b[n:]
901 v := int32(x)
902 f.appendInt32Slice(v)
903 }
904 return res, nil
905 }
906 if w != WireVarint {
907 return b, errInternalBadWireType
908 }
909 x, n := decodeVarint(b)
910 if n == 0 {
911 return nil, io.ErrUnexpectedEOF
912 }
913 b = b[n:]
914 v := int32(x)
915 f.appendInt32Slice(v)
916 return b, nil
917}
918
919func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
920 if w != WireVarint {
921 return b, errInternalBadWireType
922 }
923 x, n := decodeVarint(b)
924 if n == 0 {
925 return nil, io.ErrUnexpectedEOF
926 }
927 b = b[n:]
928 v := int32(x>>1) ^ int32(x)<<31>>31
929 *f.toInt32() = v
930 return b, nil
931}
932
933func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
934 if w != WireVarint {
935 return b, errInternalBadWireType
936 }
937 x, n := decodeVarint(b)
938 if n == 0 {
939 return nil, io.ErrUnexpectedEOF
940 }
941 b = b[n:]
942 v := int32(x>>1) ^ int32(x)<<31>>31
943 f.setInt32Ptr(v)
944 return b, nil
945}
946
947func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
948 if w == WireBytes { // packed
949 x, n := decodeVarint(b)
950 if n == 0 {
951 return nil, io.ErrUnexpectedEOF
952 }
953 b = b[n:]
954 if x > uint64(len(b)) {
955 return nil, io.ErrUnexpectedEOF
956 }
957 res := b[x:]
958 b = b[:x]
959 for len(b) > 0 {
960 x, n = decodeVarint(b)
961 if n == 0 {
962 return nil, io.ErrUnexpectedEOF
963 }
964 b = b[n:]
965 v := int32(x>>1) ^ int32(x)<<31>>31
966 f.appendInt32Slice(v)
967 }
968 return res, nil
969 }
970 if w != WireVarint {
971 return b, errInternalBadWireType
972 }
973 x, n := decodeVarint(b)
974 if n == 0 {
975 return nil, io.ErrUnexpectedEOF
976 }
977 b = b[n:]
978 v := int32(x>>1) ^ int32(x)<<31>>31
979 f.appendInt32Slice(v)
980 return b, nil
981}
982
983func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
984 if w != WireVarint {
985 return b, errInternalBadWireType
986 }
987 x, n := decodeVarint(b)
988 if n == 0 {
989 return nil, io.ErrUnexpectedEOF
990 }
991 b = b[n:]
992 v := uint32(x)
993 *f.toUint32() = v
994 return b, nil
995}
996
997func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
998 if w != WireVarint {
999 return b, errInternalBadWireType
1000 }
1001 x, n := decodeVarint(b)
1002 if n == 0 {
1003 return nil, io.ErrUnexpectedEOF
1004 }
1005 b = b[n:]
1006 v := uint32(x)
1007 *f.toUint32Ptr() = &v
1008 return b, nil
1009}
1010
1011func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
1012 if w == WireBytes { // packed
1013 x, n := decodeVarint(b)
1014 if n == 0 {
1015 return nil, io.ErrUnexpectedEOF
1016 }
1017 b = b[n:]
1018 if x > uint64(len(b)) {
1019 return nil, io.ErrUnexpectedEOF
1020 }
1021 res := b[x:]
1022 b = b[:x]
1023 for len(b) > 0 {
1024 x, n = decodeVarint(b)
1025 if n == 0 {
1026 return nil, io.ErrUnexpectedEOF
1027 }
1028 b = b[n:]
1029 v := uint32(x)
1030 s := f.toUint32Slice()
1031 *s = append(*s, v)
1032 }
1033 return res, nil
1034 }
1035 if w != WireVarint {
1036 return b, errInternalBadWireType
1037 }
1038 x, n := decodeVarint(b)
1039 if n == 0 {
1040 return nil, io.ErrUnexpectedEOF
1041 }
1042 b = b[n:]
1043 v := uint32(x)
1044 s := f.toUint32Slice()
1045 *s = append(*s, v)
1046 return b, nil
1047}
1048
1049func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
1050 if w != WireFixed64 {
1051 return b, errInternalBadWireType
1052 }
1053 if len(b) < 8 {
1054 return nil, io.ErrUnexpectedEOF
1055 }
1056 v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
1057 *f.toUint64() = v
1058 return b[8:], nil
1059}
1060
1061func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
1062 if w != WireFixed64 {
1063 return b, errInternalBadWireType
1064 }
1065 if len(b) < 8 {
1066 return nil, io.ErrUnexpectedEOF
1067 }
1068 v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
1069 *f.toUint64Ptr() = &v
1070 return b[8:], nil
1071}
1072
1073func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
1074 if w == WireBytes { // packed
1075 x, n := decodeVarint(b)
1076 if n == 0 {
1077 return nil, io.ErrUnexpectedEOF
1078 }
1079 b = b[n:]
1080 if x > uint64(len(b)) {
1081 return nil, io.ErrUnexpectedEOF
1082 }
1083 res := b[x:]
1084 b = b[:x]
1085 for len(b) > 0 {
1086 if len(b) < 8 {
1087 return nil, io.ErrUnexpectedEOF
1088 }
1089 v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
1090 s := f.toUint64Slice()
1091 *s = append(*s, v)
1092 b = b[8:]
1093 }
1094 return res, nil
1095 }
1096 if w != WireFixed64 {
1097 return b, errInternalBadWireType
1098 }
1099 if len(b) < 8 {
1100 return nil, io.ErrUnexpectedEOF
1101 }
1102 v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
1103 s := f.toUint64Slice()
1104 *s = append(*s, v)
1105 return b[8:], nil
1106}
1107
1108func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
1109 if w != WireFixed64 {
1110 return b, errInternalBadWireType
1111 }
1112 if len(b) < 8 {
1113 return nil, io.ErrUnexpectedEOF
1114 }
1115 v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
1116 *f.toInt64() = v
1117 return b[8:], nil
1118}
1119
1120func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
1121 if w != WireFixed64 {
1122 return b, errInternalBadWireType
1123 }
1124 if len(b) < 8 {
1125 return nil, io.ErrUnexpectedEOF
1126 }
1127 v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
1128 *f.toInt64Ptr() = &v
1129 return b[8:], nil
1130}
1131
1132func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
1133 if w == WireBytes { // packed
1134 x, n := decodeVarint(b)
1135 if n == 0 {
1136 return nil, io.ErrUnexpectedEOF
1137 }
1138 b = b[n:]
1139 if x > uint64(len(b)) {
1140 return nil, io.ErrUnexpectedEOF
1141 }
1142 res := b[x:]
1143 b = b[:x]
1144 for len(b) > 0 {
1145 if len(b) < 8 {
1146 return nil, io.ErrUnexpectedEOF
1147 }
1148 v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
1149 s := f.toInt64Slice()
1150 *s = append(*s, v)
1151 b = b[8:]
1152 }
1153 return res, nil
1154 }
1155 if w != WireFixed64 {
1156 return b, errInternalBadWireType
1157 }
1158 if len(b) < 8 {
1159 return nil, io.ErrUnexpectedEOF
1160 }
1161 v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
1162 s := f.toInt64Slice()
1163 *s = append(*s, v)
1164 return b[8:], nil
1165}
1166
1167func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
1168 if w != WireFixed32 {
1169 return b, errInternalBadWireType
1170 }
1171 if len(b) < 4 {
1172 return nil, io.ErrUnexpectedEOF
1173 }
1174 v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
1175 *f.toUint32() = v
1176 return b[4:], nil
1177}
1178
1179func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
1180 if w != WireFixed32 {
1181 return b, errInternalBadWireType
1182 }
1183 if len(b) < 4 {
1184 return nil, io.ErrUnexpectedEOF
1185 }
1186 v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
1187 *f.toUint32Ptr() = &v
1188 return b[4:], nil
1189}
1190
1191func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
1192 if w == WireBytes { // packed
1193 x, n := decodeVarint(b)
1194 if n == 0 {
1195 return nil, io.ErrUnexpectedEOF
1196 }
1197 b = b[n:]
1198 if x > uint64(len(b)) {
1199 return nil, io.ErrUnexpectedEOF
1200 }
1201 res := b[x:]
1202 b = b[:x]
1203 for len(b) > 0 {
1204 if len(b) < 4 {
1205 return nil, io.ErrUnexpectedEOF
1206 }
1207 v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
1208 s := f.toUint32Slice()
1209 *s = append(*s, v)
1210 b = b[4:]
1211 }
1212 return res, nil
1213 }
1214 if w != WireFixed32 {
1215 return b, errInternalBadWireType
1216 }
1217 if len(b) < 4 {
1218 return nil, io.ErrUnexpectedEOF
1219 }
1220 v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
1221 s := f.toUint32Slice()
1222 *s = append(*s, v)
1223 return b[4:], nil
1224}
1225
1226func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
1227 if w != WireFixed32 {
1228 return b, errInternalBadWireType
1229 }
1230 if len(b) < 4 {
1231 return nil, io.ErrUnexpectedEOF
1232 }
1233 v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
1234 *f.toInt32() = v
1235 return b[4:], nil
1236}
1237
1238func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
1239 if w != WireFixed32 {
1240 return b, errInternalBadWireType
1241 }
1242 if len(b) < 4 {
1243 return nil, io.ErrUnexpectedEOF
1244 }
1245 v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
1246 f.setInt32Ptr(v)
1247 return b[4:], nil
1248}
1249
1250func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
1251 if w == WireBytes { // packed
1252 x, n := decodeVarint(b)
1253 if n == 0 {
1254 return nil, io.ErrUnexpectedEOF
1255 }
1256 b = b[n:]
1257 if x > uint64(len(b)) {
1258 return nil, io.ErrUnexpectedEOF
1259 }
1260 res := b[x:]
1261 b = b[:x]
1262 for len(b) > 0 {
1263 if len(b) < 4 {
1264 return nil, io.ErrUnexpectedEOF
1265 }
1266 v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
1267 f.appendInt32Slice(v)
1268 b = b[4:]
1269 }
1270 return res, nil
1271 }
1272 if w != WireFixed32 {
1273 return b, errInternalBadWireType
1274 }
1275 if len(b) < 4 {
1276 return nil, io.ErrUnexpectedEOF
1277 }
1278 v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
1279 f.appendInt32Slice(v)
1280 return b[4:], nil
1281}
1282
1283func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
1284 if w != WireVarint {
1285 return b, errInternalBadWireType
1286 }
1287 // Note: any length varint is allowed, even though any sane
1288 // encoder will use one byte.
1289 // See https://github.com/golang/protobuf/issues/76
1290 x, n := decodeVarint(b)
1291 if n == 0 {
1292 return nil, io.ErrUnexpectedEOF
1293 }
1294 // TODO: check if x>1? Tests seem to indicate no.
1295 v := x != 0
1296 *f.toBool() = v
1297 return b[n:], nil
1298}
1299
1300func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
1301 if w != WireVarint {
1302 return b, errInternalBadWireType
1303 }
1304 x, n := decodeVarint(b)
1305 if n == 0 {
1306 return nil, io.ErrUnexpectedEOF
1307 }
1308 v := x != 0
1309 *f.toBoolPtr() = &v
1310 return b[n:], nil
1311}
1312
1313func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
1314 if w == WireBytes { // packed
1315 x, n := decodeVarint(b)
1316 if n == 0 {
1317 return nil, io.ErrUnexpectedEOF
1318 }
1319 b = b[n:]
1320 if x > uint64(len(b)) {
1321 return nil, io.ErrUnexpectedEOF
1322 }
1323 res := b[x:]
1324 b = b[:x]
1325 for len(b) > 0 {
1326 x, n = decodeVarint(b)
1327 if n == 0 {
1328 return nil, io.ErrUnexpectedEOF
1329 }
1330 v := x != 0
1331 s := f.toBoolSlice()
1332 *s = append(*s, v)
1333 b = b[n:]
1334 }
1335 return res, nil
1336 }
1337 if w != WireVarint {
1338 return b, errInternalBadWireType
1339 }
1340 x, n := decodeVarint(b)
1341 if n == 0 {
1342 return nil, io.ErrUnexpectedEOF
1343 }
1344 v := x != 0
1345 s := f.toBoolSlice()
1346 *s = append(*s, v)
1347 return b[n:], nil
1348}
1349
1350func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
1351 if w != WireFixed64 {
1352 return b, errInternalBadWireType
1353 }
1354 if len(b) < 8 {
1355 return nil, io.ErrUnexpectedEOF
1356 }
1357 v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
1358 *f.toFloat64() = v
1359 return b[8:], nil
1360}
1361
1362func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
1363 if w != WireFixed64 {
1364 return b, errInternalBadWireType
1365 }
1366 if len(b) < 8 {
1367 return nil, io.ErrUnexpectedEOF
1368 }
1369 v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
1370 *f.toFloat64Ptr() = &v
1371 return b[8:], nil
1372}
1373
1374func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
1375 if w == WireBytes { // packed
1376 x, n := decodeVarint(b)
1377 if n == 0 {
1378 return nil, io.ErrUnexpectedEOF
1379 }
1380 b = b[n:]
1381 if x > uint64(len(b)) {
1382 return nil, io.ErrUnexpectedEOF
1383 }
1384 res := b[x:]
1385 b = b[:x]
1386 for len(b) > 0 {
1387 if len(b) < 8 {
1388 return nil, io.ErrUnexpectedEOF
1389 }
1390 v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
1391 s := f.toFloat64Slice()
1392 *s = append(*s, v)
1393 b = b[8:]
1394 }
1395 return res, nil
1396 }
1397 if w != WireFixed64 {
1398 return b, errInternalBadWireType
1399 }
1400 if len(b) < 8 {
1401 return nil, io.ErrUnexpectedEOF
1402 }
1403 v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
1404 s := f.toFloat64Slice()
1405 *s = append(*s, v)
1406 return b[8:], nil
1407}
1408
1409func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
1410 if w != WireFixed32 {
1411 return b, errInternalBadWireType
1412 }
1413 if len(b) < 4 {
1414 return nil, io.ErrUnexpectedEOF
1415 }
1416 v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
1417 *f.toFloat32() = v
1418 return b[4:], nil
1419}
1420
1421func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
1422 if w != WireFixed32 {
1423 return b, errInternalBadWireType
1424 }
1425 if len(b) < 4 {
1426 return nil, io.ErrUnexpectedEOF
1427 }
1428 v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
1429 *f.toFloat32Ptr() = &v
1430 return b[4:], nil
1431}
1432
1433func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
1434 if w == WireBytes { // packed
1435 x, n := decodeVarint(b)
1436 if n == 0 {
1437 return nil, io.ErrUnexpectedEOF
1438 }
1439 b = b[n:]
1440 if x > uint64(len(b)) {
1441 return nil, io.ErrUnexpectedEOF
1442 }
1443 res := b[x:]
1444 b = b[:x]
1445 for len(b) > 0 {
1446 if len(b) < 4 {
1447 return nil, io.ErrUnexpectedEOF
1448 }
1449 v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
1450 s := f.toFloat32Slice()
1451 *s = append(*s, v)
1452 b = b[4:]
1453 }
1454 return res, nil
1455 }
1456 if w != WireFixed32 {
1457 return b, errInternalBadWireType
1458 }
1459 if len(b) < 4 {
1460 return nil, io.ErrUnexpectedEOF
1461 }
1462 v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
1463 s := f.toFloat32Slice()
1464 *s = append(*s, v)
1465 return b[4:], nil
1466}
1467
1468func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
1469 if w != WireBytes {
1470 return b, errInternalBadWireType
1471 }
1472 x, n := decodeVarint(b)
1473 if n == 0 {
1474 return nil, io.ErrUnexpectedEOF
1475 }
1476 b = b[n:]
1477 if x > uint64(len(b)) {
1478 return nil, io.ErrUnexpectedEOF
1479 }
1480 v := string(b[:x])
1481 *f.toString() = v
1482 return b[x:], nil
1483}
1484
1485func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
1486 if w != WireBytes {
1487 return b, errInternalBadWireType
1488 }
1489 x, n := decodeVarint(b)
1490 if n == 0 {
1491 return nil, io.ErrUnexpectedEOF
1492 }
1493 b = b[n:]
1494 if x > uint64(len(b)) {
1495 return nil, io.ErrUnexpectedEOF
1496 }
1497 v := string(b[:x])
1498 *f.toStringPtr() = &v
1499 return b[x:], nil
1500}
1501
1502func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
1503 if w != WireBytes {
1504 return b, errInternalBadWireType
1505 }
1506 x, n := decodeVarint(b)
1507 if n == 0 {
1508 return nil, io.ErrUnexpectedEOF
1509 }
1510 b = b[n:]
1511 if x > uint64(len(b)) {
1512 return nil, io.ErrUnexpectedEOF
1513 }
1514 v := string(b[:x])
1515 s := f.toStringSlice()
1516 *s = append(*s, v)
1517 return b[x:], nil
1518}
1519
1520func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
1521 if w != WireBytes {
1522 return b, errInternalBadWireType
1523 }
1524 x, n := decodeVarint(b)
1525 if n == 0 {
1526 return nil, io.ErrUnexpectedEOF
1527 }
1528 b = b[n:]
1529 if x > uint64(len(b)) {
1530 return nil, io.ErrUnexpectedEOF
1531 }
1532 v := string(b[:x])
1533 *f.toString() = v
1534 if !utf8.ValidString(v) {
1535 return b[x:], errInvalidUTF8
1536 }
1537 return b[x:], nil
1538}
1539
1540func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
1541 if w != WireBytes {
1542 return b, errInternalBadWireType
1543 }
1544 x, n := decodeVarint(b)
1545 if n == 0 {
1546 return nil, io.ErrUnexpectedEOF
1547 }
1548 b = b[n:]
1549 if x > uint64(len(b)) {
1550 return nil, io.ErrUnexpectedEOF
1551 }
1552 v := string(b[:x])
1553 *f.toStringPtr() = &v
1554 if !utf8.ValidString(v) {
1555 return b[x:], errInvalidUTF8
1556 }
1557 return b[x:], nil
1558}
1559
1560func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
1561 if w != WireBytes {
1562 return b, errInternalBadWireType
1563 }
1564 x, n := decodeVarint(b)
1565 if n == 0 {
1566 return nil, io.ErrUnexpectedEOF
1567 }
1568 b = b[n:]
1569 if x > uint64(len(b)) {
1570 return nil, io.ErrUnexpectedEOF
1571 }
1572 v := string(b[:x])
1573 s := f.toStringSlice()
1574 *s = append(*s, v)
1575 if !utf8.ValidString(v) {
1576 return b[x:], errInvalidUTF8
1577 }
1578 return b[x:], nil
1579}
1580
1581var emptyBuf [0]byte
1582
1583func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
1584 if w != WireBytes {
1585 return b, errInternalBadWireType
1586 }
1587 x, n := decodeVarint(b)
1588 if n == 0 {
1589 return nil, io.ErrUnexpectedEOF
1590 }
1591 b = b[n:]
1592 if x > uint64(len(b)) {
1593 return nil, io.ErrUnexpectedEOF
1594 }
1595 // The use of append here is a trick which avoids the zeroing
1596 // that would be required if we used a make/copy pair.
1597 // We append to emptyBuf instead of nil because we want
1598 // a non-nil result even when the length is 0.
1599 v := append(emptyBuf[:], b[:x]...)
1600 *f.toBytes() = v
1601 return b[x:], nil
1602}
1603
1604func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
1605 if w != WireBytes {
1606 return b, errInternalBadWireType
1607 }
1608 x, n := decodeVarint(b)
1609 if n == 0 {
1610 return nil, io.ErrUnexpectedEOF
1611 }
1612 b = b[n:]
1613 if x > uint64(len(b)) {
1614 return nil, io.ErrUnexpectedEOF
1615 }
1616 v := append(emptyBuf[:], b[:x]...)
1617 s := f.toBytesSlice()
1618 *s = append(*s, v)
1619 return b[x:], nil
1620}
1621
1622func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
1623 return func(b []byte, f pointer, w int) ([]byte, error) {
1624 if w != WireBytes {
1625 return b, errInternalBadWireType
1626 }
1627 x, n := decodeVarint(b)
1628 if n == 0 {
1629 return nil, io.ErrUnexpectedEOF
1630 }
1631 b = b[n:]
1632 if x > uint64(len(b)) {
1633 return nil, io.ErrUnexpectedEOF
1634 }
1635 // First read the message field to see if something is there.
1636 // The semantics of multiple submessages are weird. Instead of
1637 // the last one winning (as it is for all other fields), multiple
1638 // submessages are merged.
1639 v := f.getPointer()
1640 if v.isNil() {
1641 v = valToPointer(reflect.New(sub.typ))
1642 f.setPointer(v)
1643 }
1644 err := sub.unmarshal(v, b[:x])
1645 if err != nil {
1646 if r, ok := err.(*RequiredNotSetError); ok {
1647 r.field = name + "." + r.field
1648 } else {
1649 return nil, err
1650 }
1651 }
1652 return b[x:], err
1653 }
1654}
1655
1656func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
1657 return func(b []byte, f pointer, w int) ([]byte, error) {
1658 if w != WireBytes {
1659 return b, errInternalBadWireType
1660 }
1661 x, n := decodeVarint(b)
1662 if n == 0 {
1663 return nil, io.ErrUnexpectedEOF
1664 }
1665 b = b[n:]
1666 if x > uint64(len(b)) {
1667 return nil, io.ErrUnexpectedEOF
1668 }
1669 v := valToPointer(reflect.New(sub.typ))
1670 err := sub.unmarshal(v, b[:x])
1671 if err != nil {
1672 if r, ok := err.(*RequiredNotSetError); ok {
1673 r.field = name + "." + r.field
1674 } else {
1675 return nil, err
1676 }
1677 }
1678 f.appendPointer(v)
1679 return b[x:], err
1680 }
1681}
1682
1683func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
1684 return func(b []byte, f pointer, w int) ([]byte, error) {
1685 if w != WireStartGroup {
1686 return b, errInternalBadWireType
1687 }
1688 x, y := findEndGroup(b)
1689 if x < 0 {
1690 return nil, io.ErrUnexpectedEOF
1691 }
1692 v := f.getPointer()
1693 if v.isNil() {
1694 v = valToPointer(reflect.New(sub.typ))
1695 f.setPointer(v)
1696 }
1697 err := sub.unmarshal(v, b[:x])
1698 if err != nil {
1699 if r, ok := err.(*RequiredNotSetError); ok {
1700 r.field = name + "." + r.field
1701 } else {
1702 return nil, err
1703 }
1704 }
1705 return b[y:], err
1706 }
1707}
1708
1709func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
1710 return func(b []byte, f pointer, w int) ([]byte, error) {
1711 if w != WireStartGroup {
1712 return b, errInternalBadWireType
1713 }
1714 x, y := findEndGroup(b)
1715 if x < 0 {
1716 return nil, io.ErrUnexpectedEOF
1717 }
1718 v := valToPointer(reflect.New(sub.typ))
1719 err := sub.unmarshal(v, b[:x])
1720 if err != nil {
1721 if r, ok := err.(*RequiredNotSetError); ok {
1722 r.field = name + "." + r.field
1723 } else {
1724 return nil, err
1725 }
1726 }
1727 f.appendPointer(v)
1728 return b[y:], err
1729 }
1730}
1731
1732func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
1733 t := f.Type
1734 kt := t.Key()
1735 vt := t.Elem()
1736 unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
1737 unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
1738 return func(b []byte, f pointer, w int) ([]byte, error) {
1739 // The map entry is a submessage. Figure out how big it is.
1740 if w != WireBytes {
1741 return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
1742 }
1743 x, n := decodeVarint(b)
1744 if n == 0 {
1745 return nil, io.ErrUnexpectedEOF
1746 }
1747 b = b[n:]
1748 if x > uint64(len(b)) {
1749 return nil, io.ErrUnexpectedEOF
1750 }
1751 r := b[x:] // unused data to return
1752 b = b[:x] // data for map entry
1753
1754 // Note: we could use #keys * #values ~= 200 functions
1755 // to do map decoding without reflection. Probably not worth it.
1756 // Maps will be somewhat slow. Oh well.
1757
1758 // Read key and value from data.
1759 var nerr nonFatal
1760 k := reflect.New(kt)
1761 v := reflect.New(vt)
1762 for len(b) > 0 {
1763 x, n := decodeVarint(b)
1764 if n == 0 {
1765 return nil, io.ErrUnexpectedEOF
1766 }
1767 wire := int(x) & 7
1768 b = b[n:]
1769
1770 var err error
1771 switch x >> 3 {
1772 case 1:
1773 b, err = unmarshalKey(b, valToPointer(k), wire)
1774 case 2:
1775 b, err = unmarshalVal(b, valToPointer(v), wire)
1776 default:
1777 err = errInternalBadWireType // skip unknown tag
1778 }
1779
1780 if nerr.Merge(err) {
1781 continue
1782 }
1783 if err != errInternalBadWireType {
1784 return nil, err
1785 }
1786
1787 // Skip past unknown fields.
1788 b, err = skipField(b, wire)
1789 if err != nil {
1790 return nil, err
1791 }
1792 }
1793
1794 // Get map, allocate if needed.
1795 m := f.asPointerTo(t).Elem() // an addressable map[K]T
1796 if m.IsNil() {
1797 m.Set(reflect.MakeMap(t))
1798 }
1799
1800 // Insert into map.
1801 m.SetMapIndex(k.Elem(), v.Elem())
1802
1803 return r, nerr.E
1804 }
1805}
1806
1807// makeUnmarshalOneof makes an unmarshaler for oneof fields.
1808// for:
1809// message Msg {
1810// oneof F {
1811// int64 X = 1;
1812// float64 Y = 2;
1813// }
1814// }
1815// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
1816// ityp is the interface type of the oneof field (e.g. isMsg_F).
1817// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
1818// Note that this function will be called once for each case in the oneof.
1819func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
1820 sf := typ.Field(0)
1821 field0 := toField(&sf)
1822 return func(b []byte, f pointer, w int) ([]byte, error) {
1823 // Allocate holder for value.
1824 v := reflect.New(typ)
1825
1826 // Unmarshal data into holder.
1827 // We unmarshal into the first field of the holder object.
1828 var err error
1829 var nerr nonFatal
1830 b, err = unmarshal(b, valToPointer(v).offset(field0), w)
1831 if !nerr.Merge(err) {
1832 return nil, err
1833 }
1834
1835 // Write pointer to holder into target field.
1836 f.asPointerTo(ityp).Elem().Set(v)
1837
1838 return b, nerr.E
1839 }
1840}
1841
1842// Error used by decode internally.
1843var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
1844
1845// skipField skips past a field of type wire and returns the remaining bytes.
1846func skipField(b []byte, wire int) ([]byte, error) {
1847 switch wire {
1848 case WireVarint:
1849 _, k := decodeVarint(b)
1850 if k == 0 {
1851 return b, io.ErrUnexpectedEOF
1852 }
1853 b = b[k:]
1854 case WireFixed32:
1855 if len(b) < 4 {
1856 return b, io.ErrUnexpectedEOF
1857 }
1858 b = b[4:]
1859 case WireFixed64:
1860 if len(b) < 8 {
1861 return b, io.ErrUnexpectedEOF
1862 }
1863 b = b[8:]
1864 case WireBytes:
1865 m, k := decodeVarint(b)
1866 if k == 0 || uint64(len(b)-k) < m {
1867 return b, io.ErrUnexpectedEOF
1868 }
1869 b = b[uint64(k)+m:]
1870 case WireStartGroup:
1871 _, i := findEndGroup(b)
1872 if i == -1 {
1873 return b, io.ErrUnexpectedEOF
1874 }
1875 b = b[i:]
1876 default:
1877 return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
1878 }
1879 return b, nil
1880}
1881
1882// findEndGroup finds the index of the next EndGroup tag.
1883// Groups may be nested, so the "next" EndGroup tag is the first
1884// unpaired EndGroup.
1885// findEndGroup returns the indexes of the start and end of the EndGroup tag.
1886// Returns (-1,-1) if it can't find one.
1887func findEndGroup(b []byte) (int, int) {
1888 depth := 1
1889 i := 0
1890 for {
1891 x, n := decodeVarint(b[i:])
1892 if n == 0 {
1893 return -1, -1
1894 }
1895 j := i
1896 i += n
1897 switch x & 7 {
1898 case WireVarint:
1899 _, k := decodeVarint(b[i:])
1900 if k == 0 {
1901 return -1, -1
1902 }
1903 i += k
1904 case WireFixed32:
1905 if len(b)-4 < i {
1906 return -1, -1
1907 }
1908 i += 4
1909 case WireFixed64:
1910 if len(b)-8 < i {
1911 return -1, -1
1912 }
1913 i += 8
1914 case WireBytes:
1915 m, k := decodeVarint(b[i:])
1916 if k == 0 {
1917 return -1, -1
1918 }
1919 i += k
1920 if uint64(len(b)-i) < m {
1921 return -1, -1
1922 }
1923 i += int(m)
1924 case WireStartGroup:
1925 depth++
1926 case WireEndGroup:
1927 depth--
1928 if depth == 0 {
1929 return j, i
1930 }
1931 default:
1932 return -1, -1
1933 }
1934 }
1935}
1936
1937// encodeVarint appends a varint-encoded integer to b and returns the result.
1938func encodeVarint(b []byte, x uint64) []byte {
1939 for x >= 1<<7 {
1940 b = append(b, byte(x&0x7f|0x80))
1941 x >>= 7
1942 }
1943 return append(b, byte(x))
1944}
1945
1946// decodeVarint reads a varint-encoded integer from b.
1947// Returns the decoded integer and the number of bytes read.
1948// If there is an error, it returns 0,0.
1949func decodeVarint(b []byte) (uint64, int) {
1950 var x, y uint64
1951 if len(b) <= 0 {
1952 goto bad
1953 }
1954 x = uint64(b[0])
1955 if x < 0x80 {
1956 return x, 1
1957 }
1958 x -= 0x80
1959
1960 if len(b) <= 1 {
1961 goto bad
1962 }
1963 y = uint64(b[1])
1964 x += y << 7
1965 if y < 0x80 {
1966 return x, 2
1967 }
1968 x -= 0x80 << 7
1969
1970 if len(b) <= 2 {
1971 goto bad
1972 }
1973 y = uint64(b[2])
1974 x += y << 14
1975 if y < 0x80 {
1976 return x, 3
1977 }
1978 x -= 0x80 << 14
1979
1980 if len(b) <= 3 {
1981 goto bad
1982 }
1983 y = uint64(b[3])
1984 x += y << 21
1985 if y < 0x80 {
1986 return x, 4
1987 }
1988 x -= 0x80 << 21
1989
1990 if len(b) <= 4 {
1991 goto bad
1992 }
1993 y = uint64(b[4])
1994 x += y << 28
1995 if y < 0x80 {
1996 return x, 5
1997 }
1998 x -= 0x80 << 28
1999
2000 if len(b) <= 5 {
2001 goto bad
2002 }
2003 y = uint64(b[5])
2004 x += y << 35
2005 if y < 0x80 {
2006 return x, 6
2007 }
2008 x -= 0x80 << 35
2009
2010 if len(b) <= 6 {
2011 goto bad
2012 }
2013 y = uint64(b[6])
2014 x += y << 42
2015 if y < 0x80 {
2016 return x, 7
2017 }
2018 x -= 0x80 << 42
2019
2020 if len(b) <= 7 {
2021 goto bad
2022 }
2023 y = uint64(b[7])
2024 x += y << 49
2025 if y < 0x80 {
2026 return x, 8
2027 }
2028 x -= 0x80 << 49
2029
2030 if len(b) <= 8 {
2031 goto bad
2032 }
2033 y = uint64(b[8])
2034 x += y << 56
2035 if y < 0x80 {
2036 return x, 9
2037 }
2038 x -= 0x80 << 56
2039
2040 if len(b) <= 9 {
2041 goto bad
2042 }
2043 y = uint64(b[9])
2044 x += y << 63
2045 if y < 2 {
2046 return x, 10
2047 }
2048
2049bad:
2050 return 0, 0
2051}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..1aaee72
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2010 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package proto
33
34// Functions for writing the text protocol buffer format.
35
36import (
37 "bufio"
38 "bytes"
39 "encoding"
40 "errors"
41 "fmt"
42 "io"
43 "log"
44 "math"
45 "reflect"
46 "sort"
47 "strings"
48)
49
50var (
51 newline = []byte("\n")
52 spaces = []byte(" ")
53 endBraceNewline = []byte("}\n")
54 backslashN = []byte{'\\', 'n'}
55 backslashR = []byte{'\\', 'r'}
56 backslashT = []byte{'\\', 't'}
57 backslashDQ = []byte{'\\', '"'}
58 backslashBS = []byte{'\\', '\\'}
59 posInf = []byte("inf")
60 negInf = []byte("-inf")
61 nan = []byte("nan")
62)
63
64type writer interface {
65 io.Writer
66 WriteByte(byte) error
67}
68
69// textWriter is an io.Writer that tracks its indentation level.
70type textWriter struct {
71 ind int
72 complete bool // if the current position is a complete line
73 compact bool // whether to write out as a one-liner
74 w writer
75}
76
77func (w *textWriter) WriteString(s string) (n int, err error) {
78 if !strings.Contains(s, "\n") {
79 if !w.compact && w.complete {
80 w.writeIndent()
81 }
82 w.complete = false
83 return io.WriteString(w.w, s)
84 }
85 // WriteString is typically called without newlines, so this
86 // codepath and its copy are rare. We copy to avoid
87 // duplicating all of Write's logic here.
88 return w.Write([]byte(s))
89}
90
91func (w *textWriter) Write(p []byte) (n int, err error) {
92 newlines := bytes.Count(p, newline)
93 if newlines == 0 {
94 if !w.compact && w.complete {
95 w.writeIndent()
96 }
97 n, err = w.w.Write(p)
98 w.complete = false
99 return n, err
100 }
101
102 frags := bytes.SplitN(p, newline, newlines+1)
103 if w.compact {
104 for i, frag := range frags {
105 if i > 0 {
106 if err := w.w.WriteByte(' '); err != nil {
107 return n, err
108 }
109 n++
110 }
111 nn, err := w.w.Write(frag)
112 n += nn
113 if err != nil {
114 return n, err
115 }
116 }
117 return n, nil
118 }
119
120 for i, frag := range frags {
121 if w.complete {
122 w.writeIndent()
123 }
124 nn, err := w.w.Write(frag)
125 n += nn
126 if err != nil {
127 return n, err
128 }
129 if i+1 < len(frags) {
130 if err := w.w.WriteByte('\n'); err != nil {
131 return n, err
132 }
133 n++
134 }
135 }
136 w.complete = len(frags[len(frags)-1]) == 0
137 return n, nil
138}
139
140func (w *textWriter) WriteByte(c byte) error {
141 if w.compact && c == '\n' {
142 c = ' '
143 }
144 if !w.compact && w.complete {
145 w.writeIndent()
146 }
147 err := w.w.WriteByte(c)
148 w.complete = c == '\n'
149 return err
150}
151
152func (w *textWriter) indent() { w.ind++ }
153
154func (w *textWriter) unindent() {
155 if w.ind == 0 {
156 log.Print("proto: textWriter unindented too far")
157 return
158 }
159 w.ind--
160}
161
162func writeName(w *textWriter, props *Properties) error {
163 if _, err := w.WriteString(props.OrigName); err != nil {
164 return err
165 }
166 if props.Wire != "group" {
167 return w.WriteByte(':')
168 }
169 return nil
170}
171
172func requiresQuotes(u string) bool {
173 // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
174 for _, ch := range u {
175 switch {
176 case ch == '.' || ch == '/' || ch == '_':
177 continue
178 case '0' <= ch && ch <= '9':
179 continue
180 case 'A' <= ch && ch <= 'Z':
181 continue
182 case 'a' <= ch && ch <= 'z':
183 continue
184 default:
185 return true
186 }
187 }
188 return false
189}
190
191// isAny reports whether sv is a google.protobuf.Any message
192func isAny(sv reflect.Value) bool {
193 type wkt interface {
194 XXX_WellKnownType() string
195 }
196 t, ok := sv.Addr().Interface().(wkt)
197 return ok && t.XXX_WellKnownType() == "Any"
198}
199
200// writeProto3Any writes an expanded google.protobuf.Any message.
201//
202// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
203// required messages are not linked in).
204//
205// It returns (true, error) when sv was written in expanded format or an error
206// was encountered.
207func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
208 turl := sv.FieldByName("TypeUrl")
209 val := sv.FieldByName("Value")
210 if !turl.IsValid() || !val.IsValid() {
211 return true, errors.New("proto: invalid google.protobuf.Any message")
212 }
213
214 b, ok := val.Interface().([]byte)
215 if !ok {
216 return true, errors.New("proto: invalid google.protobuf.Any message")
217 }
218
219 parts := strings.Split(turl.String(), "/")
220 mt := MessageType(parts[len(parts)-1])
221 if mt == nil {
222 return false, nil
223 }
224 m := reflect.New(mt.Elem())
225 if err := Unmarshal(b, m.Interface().(Message)); err != nil {
226 return false, nil
227 }
228 w.Write([]byte("["))
229 u := turl.String()
230 if requiresQuotes(u) {
231 writeString(w, u)
232 } else {
233 w.Write([]byte(u))
234 }
235 if w.compact {
236 w.Write([]byte("]:<"))
237 } else {
238 w.Write([]byte("]: <\n"))
239 w.ind++
240 }
241 if err := tm.writeStruct(w, m.Elem()); err != nil {
242 return true, err
243 }
244 if w.compact {
245 w.Write([]byte("> "))
246 } else {
247 w.ind--
248 w.Write([]byte(">\n"))
249 }
250 return true, nil
251}
252
253func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
254 if tm.ExpandAny && isAny(sv) {
255 if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
256 return err
257 }
258 }
259 st := sv.Type()
260 sprops := GetProperties(st)
261 for i := 0; i < sv.NumField(); i++ {
262 fv := sv.Field(i)
263 props := sprops.Prop[i]
264 name := st.Field(i).Name
265
266 if name == "XXX_NoUnkeyedLiteral" {
267 continue
268 }
269
270 if strings.HasPrefix(name, "XXX_") {
271 // There are two XXX_ fields:
272 // XXX_unrecognized []byte
273 // XXX_extensions map[int32]proto.Extension
274 // The first is handled here;
275 // the second is handled at the bottom of this function.
276 if name == "XXX_unrecognized" && !fv.IsNil() {
277 if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
278 return err
279 }
280 }
281 continue
282 }
283 if fv.Kind() == reflect.Ptr && fv.IsNil() {
284 // Field not filled in. This could be an optional field or
285 // a required field that wasn't filled in. Either way, there
286 // isn't anything we can show for it.
287 continue
288 }
289 if fv.Kind() == reflect.Slice && fv.IsNil() {
290 // Repeated field that is empty, or a bytes field that is unused.
291 continue
292 }
293
294 if props.Repeated && fv.Kind() == reflect.Slice {
295 // Repeated field.
296 for j := 0; j < fv.Len(); j++ {
297 if err := writeName(w, props); err != nil {
298 return err
299 }
300 if !w.compact {
301 if err := w.WriteByte(' '); err != nil {
302 return err
303 }
304 }
305 v := fv.Index(j)
306 if v.Kind() == reflect.Ptr && v.IsNil() {
307 // A nil message in a repeated field is not valid,
308 // but we can handle that more gracefully than panicking.
309 if _, err := w.Write([]byte("<nil>\n")); err != nil {
310 return err
311 }
312 continue
313 }
314 if err := tm.writeAny(w, v, props); err != nil {
315 return err
316 }
317 if err := w.WriteByte('\n'); err != nil {
318 return err
319 }
320 }
321 continue
322 }
323 if fv.Kind() == reflect.Map {
324 // Map fields are rendered as a repeated struct with key/value fields.
325 keys := fv.MapKeys()
326 sort.Sort(mapKeys(keys))
327 for _, key := range keys {
328 val := fv.MapIndex(key)
329 if err := writeName(w, props); err != nil {
330 return err
331 }
332 if !w.compact {
333 if err := w.WriteByte(' '); err != nil {
334 return err
335 }
336 }
337 // open struct
338 if err := w.WriteByte('<'); err != nil {
339 return err
340 }
341 if !w.compact {
342 if err := w.WriteByte('\n'); err != nil {
343 return err
344 }
345 }
346 w.indent()
347 // key
348 if _, err := w.WriteString("key:"); err != nil {
349 return err
350 }
351 if !w.compact {
352 if err := w.WriteByte(' '); err != nil {
353 return err
354 }
355 }
356 if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
357 return err
358 }
359 if err := w.WriteByte('\n'); err != nil {
360 return err
361 }
362 // nil values aren't legal, but we can avoid panicking because of them.
363 if val.Kind() != reflect.Ptr || !val.IsNil() {
364 // value
365 if _, err := w.WriteString("value:"); err != nil {
366 return err
367 }
368 if !w.compact {
369 if err := w.WriteByte(' '); err != nil {
370 return err
371 }
372 }
373 if err := tm.writeAny(w, val, props.MapValProp); err != nil {
374 return err
375 }
376 if err := w.WriteByte('\n'); err != nil {
377 return err
378 }
379 }
380 // close struct
381 w.unindent()
382 if err := w.WriteByte('>'); err != nil {
383 return err
384 }
385 if err := w.WriteByte('\n'); err != nil {
386 return err
387 }
388 }
389 continue
390 }
391 if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
392 // empty bytes field
393 continue
394 }
395 if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
396 // proto3 non-repeated scalar field; skip if zero value
397 if isProto3Zero(fv) {
398 continue
399 }
400 }
401
402 if fv.Kind() == reflect.Interface {
403 // Check if it is a oneof.
404 if st.Field(i).Tag.Get("protobuf_oneof") != "" {
405 // fv is nil, or holds a pointer to generated struct.
406 // That generated struct has exactly one field,
407 // which has a protobuf struct tag.
408 if fv.IsNil() {
409 continue
410 }
411 inner := fv.Elem().Elem() // interface -> *T -> T
412 tag := inner.Type().Field(0).Tag.Get("protobuf")
413 props = new(Properties) // Overwrite the outer props var, but not its pointee.
414 props.Parse(tag)
415 // Write the value in the oneof, not the oneof itself.
416 fv = inner.Field(0)
417
418 // Special case to cope with malformed messages gracefully:
419 // If the value in the oneof is a nil pointer, don't panic
420 // in writeAny.
421 if fv.Kind() == reflect.Ptr && fv.IsNil() {
422 // Use errors.New so writeAny won't render quotes.
423 msg := errors.New("/* nil */")
424 fv = reflect.ValueOf(&msg).Elem()
425 }
426 }
427 }
428
429 if err := writeName(w, props); err != nil {
430 return err
431 }
432 if !w.compact {
433 if err := w.WriteByte(' '); err != nil {
434 return err
435 }
436 }
437
438 // Enums have a String method, so writeAny will work fine.
439 if err := tm.writeAny(w, fv, props); err != nil {
440 return err
441 }
442
443 if err := w.WriteByte('\n'); err != nil {
444 return err
445 }
446 }
447
448 // Extensions (the XXX_extensions field).
449 pv := sv.Addr()
450 if _, err := extendable(pv.Interface()); err == nil {
451 if err := tm.writeExtensions(w, pv); err != nil {
452 return err
453 }
454 }
455
456 return nil
457}
458
459// writeAny writes an arbitrary field.
460func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
461 v = reflect.Indirect(v)
462
463 // Floats have special cases.
464 if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
465 x := v.Float()
466 var b []byte
467 switch {
468 case math.IsInf(x, 1):
469 b = posInf
470 case math.IsInf(x, -1):
471 b = negInf
472 case math.IsNaN(x):
473 b = nan
474 }
475 if b != nil {
476 _, err := w.Write(b)
477 return err
478 }
479 // Other values are handled below.
480 }
481
482 // We don't attempt to serialise every possible value type; only those
483 // that can occur in protocol buffers.
484 switch v.Kind() {
485 case reflect.Slice:
486 // Should only be a []byte; repeated fields are handled in writeStruct.
487 if err := writeString(w, string(v.Bytes())); err != nil {
488 return err
489 }
490 case reflect.String:
491 if err := writeString(w, v.String()); err != nil {
492 return err
493 }
494 case reflect.Struct:
495 // Required/optional group/message.
496 var bra, ket byte = '<', '>'
497 if props != nil && props.Wire == "group" {
498 bra, ket = '{', '}'
499 }
500 if err := w.WriteByte(bra); err != nil {
501 return err
502 }
503 if !w.compact {
504 if err := w.WriteByte('\n'); err != nil {
505 return err
506 }
507 }
508 w.indent()
509 if v.CanAddr() {
510 // Calling v.Interface on a struct causes the reflect package to
511 // copy the entire struct. This is racy with the new Marshaler
512 // since we atomically update the XXX_sizecache.
513 //
514 // Thus, we retrieve a pointer to the struct if possible to avoid
515 // a race since v.Interface on the pointer doesn't copy the struct.
516 //
517 // If v is not addressable, then we are not worried about a race
518 // since it implies that the binary Marshaler cannot possibly be
519 // mutating this value.
520 v = v.Addr()
521 }
522 if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
523 text, err := etm.MarshalText()
524 if err != nil {
525 return err
526 }
527 if _, err = w.Write(text); err != nil {
528 return err
529 }
530 } else {
531 if v.Kind() == reflect.Ptr {
532 v = v.Elem()
533 }
534 if err := tm.writeStruct(w, v); err != nil {
535 return err
536 }
537 }
538 w.unindent()
539 if err := w.WriteByte(ket); err != nil {
540 return err
541 }
542 default:
543 _, err := fmt.Fprint(w, v.Interface())
544 return err
545 }
546 return nil
547}
548
549// equivalent to C's isprint.
550func isprint(c byte) bool {
551 return c >= 0x20 && c < 0x7f
552}
553
554// writeString writes a string in the protocol buffer text format.
555// It is similar to strconv.Quote except we don't use Go escape sequences,
556// we treat the string as a byte sequence, and we use octal escapes.
557// These differences are to maintain interoperability with the other
558// languages' implementations of the text format.
559func writeString(w *textWriter, s string) error {
560 // use WriteByte here to get any needed indent
561 if err := w.WriteByte('"'); err != nil {
562 return err
563 }
564 // Loop over the bytes, not the runes.
565 for i := 0; i < len(s); i++ {
566 var err error
567 // Divergence from C++: we don't escape apostrophes.
568 // There's no need to escape them, and the C++ parser
569 // copes with a naked apostrophe.
570 switch c := s[i]; c {
571 case '\n':
572 _, err = w.w.Write(backslashN)
573 case '\r':
574 _, err = w.w.Write(backslashR)
575 case '\t':
576 _, err = w.w.Write(backslashT)
577 case '"':
578 _, err = w.w.Write(backslashDQ)
579 case '\\':
580 _, err = w.w.Write(backslashBS)
581 default:
582 if isprint(c) {
583 err = w.w.WriteByte(c)
584 } else {
585 _, err = fmt.Fprintf(w.w, "\\%03o", c)
586 }
587 }
588 if err != nil {
589 return err
590 }
591 }
592 return w.WriteByte('"')
593}
594
595func writeUnknownStruct(w *textWriter, data []byte) (err error) {
596 if !w.compact {
597 if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
598 return err
599 }
600 }
601 b := NewBuffer(data)
602 for b.index < len(b.buf) {
603 x, err := b.DecodeVarint()
604 if err != nil {
605 _, err := fmt.Fprintf(w, "/* %v */\n", err)
606 return err
607 }
608 wire, tag := x&7, x>>3
609 if wire == WireEndGroup {
610 w.unindent()
611 if _, err := w.Write(endBraceNewline); err != nil {
612 return err
613 }
614 continue
615 }
616 if _, err := fmt.Fprint(w, tag); err != nil {
617 return err
618 }
619 if wire != WireStartGroup {
620 if err := w.WriteByte(':'); err != nil {
621 return err
622 }
623 }
624 if !w.compact || wire == WireStartGroup {
625 if err := w.WriteByte(' '); err != nil {
626 return err
627 }
628 }
629 switch wire {
630 case WireBytes:
631 buf, e := b.DecodeRawBytes(false)
632 if e == nil {
633 _, err = fmt.Fprintf(w, "%q", buf)
634 } else {
635 _, err = fmt.Fprintf(w, "/* %v */", e)
636 }
637 case WireFixed32:
638 x, err = b.DecodeFixed32()
639 err = writeUnknownInt(w, x, err)
640 case WireFixed64:
641 x, err = b.DecodeFixed64()
642 err = writeUnknownInt(w, x, err)
643 case WireStartGroup:
644 err = w.WriteByte('{')
645 w.indent()
646 case WireVarint:
647 x, err = b.DecodeVarint()
648 err = writeUnknownInt(w, x, err)
649 default:
650 _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
651 }
652 if err != nil {
653 return err
654 }
655 if err = w.WriteByte('\n'); err != nil {
656 return err
657 }
658 }
659 return nil
660}
661
662func writeUnknownInt(w *textWriter, x uint64, err error) error {
663 if err == nil {
664 _, err = fmt.Fprint(w, x)
665 } else {
666 _, err = fmt.Fprintf(w, "/* %v */", err)
667 }
668 return err
669}
670
671type int32Slice []int32
672
673func (s int32Slice) Len() int { return len(s) }
674func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
675func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
676
677// writeExtensions writes all the extensions in pv.
678// pv is assumed to be a pointer to a protocol message struct that is extendable.
679func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
680 emap := extensionMaps[pv.Type().Elem()]
681 ep, _ := extendable(pv.Interface())
682
683 // Order the extensions by ID.
684 // This isn't strictly necessary, but it will give us
685 // canonical output, which will also make testing easier.
686 m, mu := ep.extensionsRead()
687 if m == nil {
688 return nil
689 }
690 mu.Lock()
691 ids := make([]int32, 0, len(m))
692 for id := range m {
693 ids = append(ids, id)
694 }
695 sort.Sort(int32Slice(ids))
696 mu.Unlock()
697
698 for _, extNum := range ids {
699 ext := m[extNum]
700 var desc *ExtensionDesc
701 if emap != nil {
702 desc = emap[extNum]
703 }
704 if desc == nil {
705 // Unknown extension.
706 if err := writeUnknownStruct(w, ext.enc); err != nil {
707 return err
708 }
709 continue
710 }
711
712 pb, err := GetExtension(ep, desc)
713 if err != nil {
714 return fmt.Errorf("failed getting extension: %v", err)
715 }
716
717 // Repeated extensions will appear as a slice.
718 if !desc.repeated() {
719 if err := tm.writeExtension(w, desc.Name, pb); err != nil {
720 return err
721 }
722 } else {
723 v := reflect.ValueOf(pb)
724 for i := 0; i < v.Len(); i++ {
725 if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
726 return err
727 }
728 }
729 }
730 }
731 return nil
732}
733
734func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
735 if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
736 return err
737 }
738 if !w.compact {
739 if err := w.WriteByte(' '); err != nil {
740 return err
741 }
742 }
743 if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
744 return err
745 }
746 if err := w.WriteByte('\n'); err != nil {
747 return err
748 }
749 return nil
750}
751
752func (w *textWriter) writeIndent() {
753 if !w.complete {
754 return
755 }
756 remain := w.ind * 2
757 for remain > 0 {
758 n := remain
759 if n > len(spaces) {
760 n = len(spaces)
761 }
762 w.w.Write(spaces[:n])
763 remain -= n
764 }
765 w.complete = false
766}
767
768// TextMarshaler is a configurable text format marshaler.
769type TextMarshaler struct {
770 Compact bool // use compact text format (one line).
771 ExpandAny bool // expand google.protobuf.Any messages of known types
772}
773
774// Marshal writes a given protocol buffer in text format.
775// The only errors returned are from w.
776func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
777 val := reflect.ValueOf(pb)
778 if pb == nil || val.IsNil() {
779 w.Write([]byte("<nil>"))
780 return nil
781 }
782 var bw *bufio.Writer
783 ww, ok := w.(writer)
784 if !ok {
785 bw = bufio.NewWriter(w)
786 ww = bw
787 }
788 aw := &textWriter{
789 w: ww,
790 complete: true,
791 compact: tm.Compact,
792 }
793
794 if etm, ok := pb.(encoding.TextMarshaler); ok {
795 text, err := etm.MarshalText()
796 if err != nil {
797 return err
798 }
799 if _, err = aw.Write(text); err != nil {
800 return err
801 }
802 if bw != nil {
803 return bw.Flush()
804 }
805 return nil
806 }
807 // Dereference the received pointer so we don't have outer < and >.
808 v := reflect.Indirect(val)
809 if err := tm.writeStruct(aw, v); err != nil {
810 return err
811 }
812 if bw != nil {
813 return bw.Flush()
814 }
815 return nil
816}
817
818// Text is the same as Marshal, but returns the string directly.
819func (tm *TextMarshaler) Text(pb Message) string {
820 var buf bytes.Buffer
821 tm.Marshal(&buf, pb)
822 return buf.String()
823}
824
825var (
826 defaultTextMarshaler = TextMarshaler{}
827 compactTextMarshaler = TextMarshaler{Compact: true}
828)
829
830// TODO: consider removing some of the Marshal functions below.
831
832// MarshalText writes a given protocol buffer in text format.
833// The only errors returned are from w.
834func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
835
836// MarshalTextString is the same as MarshalText, but returns the string directly.
837func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
838
839// CompactText writes a given protocol buffer in compact text format (one line).
840func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
841
842// CompactTextString is the same as CompactText, but returns the string directly.
843func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..bb55a3a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2010 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package proto
33
34// Functions for parsing the Text protocol buffer format.
35// TODO: message sets.
36
37import (
38 "encoding"
39 "errors"
40 "fmt"
41 "reflect"
42 "strconv"
43 "strings"
44 "unicode/utf8"
45)
46
47// Error string emitted when deserializing Any and fields are already set
48const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
49
50type ParseError struct {
51 Message string
52 Line int // 1-based line number
53 Offset int // 0-based byte offset from start of input
54}
55
56func (p *ParseError) Error() string {
57 if p.Line == 1 {
58 // show offset only for first line
59 return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
60 }
61 return fmt.Sprintf("line %d: %v", p.Line, p.Message)
62}
63
64type token struct {
65 value string
66 err *ParseError
67 line int // line number
68 offset int // byte number from start of input, not start of line
69 unquoted string // the unquoted version of value, if it was a quoted string
70}
71
72func (t *token) String() string {
73 if t.err == nil {
74 return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
75 }
76 return fmt.Sprintf("parse error: %v", t.err)
77}
78
79type textParser struct {
80 s string // remaining input
81 done bool // whether the parsing is finished (success or error)
82 backed bool // whether back() was called
83 offset, line int
84 cur token
85}
86
87func newTextParser(s string) *textParser {
88 p := new(textParser)
89 p.s = s
90 p.line = 1
91 p.cur.line = 1
92 return p
93}
94
95func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
96 pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
97 p.cur.err = pe
98 p.done = true
99 return pe
100}
101
102// Numbers and identifiers are matched by [-+._A-Za-z0-9]
103func isIdentOrNumberChar(c byte) bool {
104 switch {
105 case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
106 return true
107 case '0' <= c && c <= '9':
108 return true
109 }
110 switch c {
111 case '-', '+', '.', '_':
112 return true
113 }
114 return false
115}
116
117func isWhitespace(c byte) bool {
118 switch c {
119 case ' ', '\t', '\n', '\r':
120 return true
121 }
122 return false
123}
124
125func isQuote(c byte) bool {
126 switch c {
127 case '"', '\'':
128 return true
129 }
130 return false
131}
132
133func (p *textParser) skipWhitespace() {
134 i := 0
135 for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
136 if p.s[i] == '#' {
137 // comment; skip to end of line or input
138 for i < len(p.s) && p.s[i] != '\n' {
139 i++
140 }
141 if i == len(p.s) {
142 break
143 }
144 }
145 if p.s[i] == '\n' {
146 p.line++
147 }
148 i++
149 }
150 p.offset += i
151 p.s = p.s[i:len(p.s)]
152 if len(p.s) == 0 {
153 p.done = true
154 }
155}
156
157func (p *textParser) advance() {
158 // Skip whitespace
159 p.skipWhitespace()
160 if p.done {
161 return
162 }
163
164 // Start of non-whitespace
165 p.cur.err = nil
166 p.cur.offset, p.cur.line = p.offset, p.line
167 p.cur.unquoted = ""
168 switch p.s[0] {
169 case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
170 // Single symbol
171 p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
172 case '"', '\'':
173 // Quoted string
174 i := 1
175 for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
176 if p.s[i] == '\\' && i+1 < len(p.s) {
177 // skip escaped char
178 i++
179 }
180 i++
181 }
182 if i >= len(p.s) || p.s[i] != p.s[0] {
183 p.errorf("unmatched quote")
184 return
185 }
186 unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
187 if err != nil {
188 p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
189 return
190 }
191 p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
192 p.cur.unquoted = unq
193 default:
194 i := 0
195 for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
196 i++
197 }
198 if i == 0 {
199 p.errorf("unexpected byte %#x", p.s[0])
200 return
201 }
202 p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
203 }
204 p.offset += len(p.cur.value)
205}
206
207var (
208 errBadUTF8 = errors.New("proto: bad UTF-8")
209)
210
211func unquoteC(s string, quote rune) (string, error) {
212 // This is based on C++'s tokenizer.cc.
213 // Despite its name, this is *not* parsing C syntax.
214 // For instance, "\0" is an invalid quoted string.
215
216 // Avoid allocation in trivial cases.
217 simple := true
218 for _, r := range s {
219 if r == '\\' || r == quote {
220 simple = false
221 break
222 }
223 }
224 if simple {
225 return s, nil
226 }
227
228 buf := make([]byte, 0, 3*len(s)/2)
229 for len(s) > 0 {
230 r, n := utf8.DecodeRuneInString(s)
231 if r == utf8.RuneError && n == 1 {
232 return "", errBadUTF8
233 }
234 s = s[n:]
235 if r != '\\' {
236 if r < utf8.RuneSelf {
237 buf = append(buf, byte(r))
238 } else {
239 buf = append(buf, string(r)...)
240 }
241 continue
242 }
243
244 ch, tail, err := unescape(s)
245 if err != nil {
246 return "", err
247 }
248 buf = append(buf, ch...)
249 s = tail
250 }
251 return string(buf), nil
252}
253
254func unescape(s string) (ch string, tail string, err error) {
255 r, n := utf8.DecodeRuneInString(s)
256 if r == utf8.RuneError && n == 1 {
257 return "", "", errBadUTF8
258 }
259 s = s[n:]
260 switch r {
261 case 'a':
262 return "\a", s, nil
263 case 'b':
264 return "\b", s, nil
265 case 'f':
266 return "\f", s, nil
267 case 'n':
268 return "\n", s, nil
269 case 'r':
270 return "\r", s, nil
271 case 't':
272 return "\t", s, nil
273 case 'v':
274 return "\v", s, nil
275 case '?':
276 return "?", s, nil // trigraph workaround
277 case '\'', '"', '\\':
278 return string(r), s, nil
279 case '0', '1', '2', '3', '4', '5', '6', '7':
280 if len(s) < 2 {
281 return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
282 }
283 ss := string(r) + s[:2]
284 s = s[2:]
285 i, err := strconv.ParseUint(ss, 8, 8)
286 if err != nil {
287 return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
288 }
289 return string([]byte{byte(i)}), s, nil
290 case 'x', 'X', 'u', 'U':
291 var n int
292 switch r {
293 case 'x', 'X':
294 n = 2
295 case 'u':
296 n = 4
297 case 'U':
298 n = 8
299 }
300 if len(s) < n {
301 return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
302 }
303 ss := s[:n]
304 s = s[n:]
305 i, err := strconv.ParseUint(ss, 16, 64)
306 if err != nil {
307 return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
308 }
309 if r == 'x' || r == 'X' {
310 return string([]byte{byte(i)}), s, nil
311 }
312 if i > utf8.MaxRune {
313 return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
314 }
315 return string(i), s, nil
316 }
317 return "", "", fmt.Errorf(`unknown escape \%c`, r)
318}
319
320// Back off the parser by one token. Can only be done between calls to next().
321// It makes the next advance() a no-op.
322func (p *textParser) back() { p.backed = true }
323
324// Advances the parser and returns the new current token.
325func (p *textParser) next() *token {
326 if p.backed || p.done {
327 p.backed = false
328 return &p.cur
329 }
330 p.advance()
331 if p.done {
332 p.cur.value = ""
333 } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
334 // Look for multiple quoted strings separated by whitespace,
335 // and concatenate them.
336 cat := p.cur
337 for {
338 p.skipWhitespace()
339 if p.done || !isQuote(p.s[0]) {
340 break
341 }
342 p.advance()
343 if p.cur.err != nil {
344 return &p.cur
345 }
346 cat.value += " " + p.cur.value
347 cat.unquoted += p.cur.unquoted
348 }
349 p.done = false // parser may have seen EOF, but we want to return cat
350 p.cur = cat
351 }
352 return &p.cur
353}
354
355func (p *textParser) consumeToken(s string) error {
356 tok := p.next()
357 if tok.err != nil {
358 return tok.err
359 }
360 if tok.value != s {
361 p.back()
362 return p.errorf("expected %q, found %q", s, tok.value)
363 }
364 return nil
365}
366
367// Return a RequiredNotSetError indicating which required field was not set.
368func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
369 st := sv.Type()
370 sprops := GetProperties(st)
371 for i := 0; i < st.NumField(); i++ {
372 if !isNil(sv.Field(i)) {
373 continue
374 }
375
376 props := sprops.Prop[i]
377 if props.Required {
378 return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
379 }
380 }
381 return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
382}
383
384// Returns the index in the struct for the named field, as well as the parsed tag properties.
385func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
386 i, ok := sprops.decoderOrigNames[name]
387 if ok {
388 return i, sprops.Prop[i], true
389 }
390 return -1, nil, false
391}
392
393// Consume a ':' from the input stream (if the next token is a colon),
394// returning an error if a colon is needed but not present.
395func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
396 tok := p.next()
397 if tok.err != nil {
398 return tok.err
399 }
400 if tok.value != ":" {
401 // Colon is optional when the field is a group or message.
402 needColon := true
403 switch props.Wire {
404 case "group":
405 needColon = false
406 case "bytes":
407 // A "bytes" field is either a message, a string, or a repeated field;
408 // those three become *T, *string and []T respectively, so we can check for
409 // this field being a pointer to a non-string.
410 if typ.Kind() == reflect.Ptr {
411 // *T or *string
412 if typ.Elem().Kind() == reflect.String {
413 break
414 }
415 } else if typ.Kind() == reflect.Slice {
416 // []T or []*T
417 if typ.Elem().Kind() != reflect.Ptr {
418 break
419 }
420 } else if typ.Kind() == reflect.String {
421 // The proto3 exception is for a string field,
422 // which requires a colon.
423 break
424 }
425 needColon = false
426 }
427 if needColon {
428 return p.errorf("expected ':', found %q", tok.value)
429 }
430 p.back()
431 }
432 return nil
433}
434
435func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
436 st := sv.Type()
437 sprops := GetProperties(st)
438 reqCount := sprops.reqCount
439 var reqFieldErr error
440 fieldSet := make(map[string]bool)
441 // A struct is a sequence of "name: value", terminated by one of
442 // '>' or '}', or the end of the input. A name may also be
443 // "[extension]" or "[type/url]".
444 //
445 // The whole struct can also be an expanded Any message, like:
446 // [type/url] < ... struct contents ... >
447 for {
448 tok := p.next()
449 if tok.err != nil {
450 return tok.err
451 }
452 if tok.value == terminator {
453 break
454 }
455 if tok.value == "[" {
456 // Looks like an extension or an Any.
457 //
458 // TODO: Check whether we need to handle
459 // namespace rooted names (e.g. ".something.Foo").
460 extName, err := p.consumeExtName()
461 if err != nil {
462 return err
463 }
464
465 if s := strings.LastIndex(extName, "/"); s >= 0 {
466 // If it contains a slash, it's an Any type URL.
467 messageName := extName[s+1:]
468 mt := MessageType(messageName)
469 if mt == nil {
470 return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
471 }
472 tok = p.next()
473 if tok.err != nil {
474 return tok.err
475 }
476 // consume an optional colon
477 if tok.value == ":" {
478 tok = p.next()
479 if tok.err != nil {
480 return tok.err
481 }
482 }
483 var terminator string
484 switch tok.value {
485 case "<":
486 terminator = ">"
487 case "{":
488 terminator = "}"
489 default:
490 return p.errorf("expected '{' or '<', found %q", tok.value)
491 }
492 v := reflect.New(mt.Elem())
493 if pe := p.readStruct(v.Elem(), terminator); pe != nil {
494 return pe
495 }
496 b, err := Marshal(v.Interface().(Message))
497 if err != nil {
498 return p.errorf("failed to marshal message of type %q: %v", messageName, err)
499 }
500 if fieldSet["type_url"] {
501 return p.errorf(anyRepeatedlyUnpacked, "type_url")
502 }
503 if fieldSet["value"] {
504 return p.errorf(anyRepeatedlyUnpacked, "value")
505 }
506 sv.FieldByName("TypeUrl").SetString(extName)
507 sv.FieldByName("Value").SetBytes(b)
508 fieldSet["type_url"] = true
509 fieldSet["value"] = true
510 continue
511 }
512
513 var desc *ExtensionDesc
514 // This could be faster, but it's functional.
515 // TODO: Do something smarter than a linear scan.
516 for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
517 if d.Name == extName {
518 desc = d
519 break
520 }
521 }
522 if desc == nil {
523 return p.errorf("unrecognized extension %q", extName)
524 }
525
526 props := &Properties{}
527 props.Parse(desc.Tag)
528
529 typ := reflect.TypeOf(desc.ExtensionType)
530 if err := p.checkForColon(props, typ); err != nil {
531 return err
532 }
533
534 rep := desc.repeated()
535
536 // Read the extension structure, and set it in
537 // the value we're constructing.
538 var ext reflect.Value
539 if !rep {
540 ext = reflect.New(typ).Elem()
541 } else {
542 ext = reflect.New(typ.Elem()).Elem()
543 }
544 if err := p.readAny(ext, props); err != nil {
545 if _, ok := err.(*RequiredNotSetError); !ok {
546 return err
547 }
548 reqFieldErr = err
549 }
550 ep := sv.Addr().Interface().(Message)
551 if !rep {
552 SetExtension(ep, desc, ext.Interface())
553 } else {
554 old, err := GetExtension(ep, desc)
555 var sl reflect.Value
556 if err == nil {
557 sl = reflect.ValueOf(old) // existing slice
558 } else {
559 sl = reflect.MakeSlice(typ, 0, 1)
560 }
561 sl = reflect.Append(sl, ext)
562 SetExtension(ep, desc, sl.Interface())
563 }
564 if err := p.consumeOptionalSeparator(); err != nil {
565 return err
566 }
567 continue
568 }
569
570 // This is a normal, non-extension field.
571 name := tok.value
572 var dst reflect.Value
573 fi, props, ok := structFieldByName(sprops, name)
574 if ok {
575 dst = sv.Field(fi)
576 } else if oop, ok := sprops.OneofTypes[name]; ok {
577 // It is a oneof.
578 props = oop.Prop
579 nv := reflect.New(oop.Type.Elem())
580 dst = nv.Elem().Field(0)
581 field := sv.Field(oop.Field)
582 if !field.IsNil() {
583 return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
584 }
585 field.Set(nv)
586 }
587 if !dst.IsValid() {
588 return p.errorf("unknown field name %q in %v", name, st)
589 }
590
591 if dst.Kind() == reflect.Map {
592 // Consume any colon.
593 if err := p.checkForColon(props, dst.Type()); err != nil {
594 return err
595 }
596
597 // Construct the map if it doesn't already exist.
598 if dst.IsNil() {
599 dst.Set(reflect.MakeMap(dst.Type()))
600 }
601 key := reflect.New(dst.Type().Key()).Elem()
602 val := reflect.New(dst.Type().Elem()).Elem()
603
604 // The map entry should be this sequence of tokens:
605 // < key : KEY value : VALUE >
606 // However, implementations may omit key or value, and technically
607 // we should support them in any order. See b/28924776 for a time
608 // this went wrong.
609
610 tok := p.next()
611 var terminator string
612 switch tok.value {
613 case "<":
614 terminator = ">"
615 case "{":
616 terminator = "}"
617 default:
618 return p.errorf("expected '{' or '<', found %q", tok.value)
619 }
620 for {
621 tok := p.next()
622 if tok.err != nil {
623 return tok.err
624 }
625 if tok.value == terminator {
626 break
627 }
628 switch tok.value {
629 case "key":
630 if err := p.consumeToken(":"); err != nil {
631 return err
632 }
633 if err := p.readAny(key, props.MapKeyProp); err != nil {
634 return err
635 }
636 if err := p.consumeOptionalSeparator(); err != nil {
637 return err
638 }
639 case "value":
640 if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
641 return err
642 }
643 if err := p.readAny(val, props.MapValProp); err != nil {
644 return err
645 }
646 if err := p.consumeOptionalSeparator(); err != nil {
647 return err
648 }
649 default:
650 p.back()
651 return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
652 }
653 }
654
655 dst.SetMapIndex(key, val)
656 continue
657 }
658
659 // Check that it's not already set if it's not a repeated field.
660 if !props.Repeated && fieldSet[name] {
661 return p.errorf("non-repeated field %q was repeated", name)
662 }
663
664 if err := p.checkForColon(props, dst.Type()); err != nil {
665 return err
666 }
667
668 // Parse into the field.
669 fieldSet[name] = true
670 if err := p.readAny(dst, props); err != nil {
671 if _, ok := err.(*RequiredNotSetError); !ok {
672 return err
673 }
674 reqFieldErr = err
675 }
676 if props.Required {
677 reqCount--
678 }
679
680 if err := p.consumeOptionalSeparator(); err != nil {
681 return err
682 }
683
684 }
685
686 if reqCount > 0 {
687 return p.missingRequiredFieldError(sv)
688 }
689 return reqFieldErr
690}
691
692// consumeExtName consumes extension name or expanded Any type URL and the
693// following ']'. It returns the name or URL consumed.
694func (p *textParser) consumeExtName() (string, error) {
695 tok := p.next()
696 if tok.err != nil {
697 return "", tok.err
698 }
699
700 // If extension name or type url is quoted, it's a single token.
701 if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
702 name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
703 if err != nil {
704 return "", err
705 }
706 return name, p.consumeToken("]")
707 }
708
709 // Consume everything up to "]"
710 var parts []string
711 for tok.value != "]" {
712 parts = append(parts, tok.value)
713 tok = p.next()
714 if tok.err != nil {
715 return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
716 }
717 if p.done && tok.value != "]" {
718 return "", p.errorf("unclosed type_url or extension name")
719 }
720 }
721 return strings.Join(parts, ""), nil
722}
723
724// consumeOptionalSeparator consumes an optional semicolon or comma.
725// It is used in readStruct to provide backward compatibility.
726func (p *textParser) consumeOptionalSeparator() error {
727 tok := p.next()
728 if tok.err != nil {
729 return tok.err
730 }
731 if tok.value != ";" && tok.value != "," {
732 p.back()
733 }
734 return nil
735}
736
737func (p *textParser) readAny(v reflect.Value, props *Properties) error {
738 tok := p.next()
739 if tok.err != nil {
740 return tok.err
741 }
742 if tok.value == "" {
743 return p.errorf("unexpected EOF")
744 }
745
746 switch fv := v; fv.Kind() {
747 case reflect.Slice:
748 at := v.Type()
749 if at.Elem().Kind() == reflect.Uint8 {
750 // Special case for []byte
751 if tok.value[0] != '"' && tok.value[0] != '\'' {
752 // Deliberately written out here, as the error after
753 // this switch statement would write "invalid []byte: ...",
754 // which is not as user-friendly.
755 return p.errorf("invalid string: %v", tok.value)
756 }
757 bytes := []byte(tok.unquoted)
758 fv.Set(reflect.ValueOf(bytes))
759 return nil
760 }
761 // Repeated field.
762 if tok.value == "[" {
763 // Repeated field with list notation, like [1,2,3].
764 for {
765 fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
766 err := p.readAny(fv.Index(fv.Len()-1), props)
767 if err != nil {
768 return err
769 }
770 tok := p.next()
771 if tok.err != nil {
772 return tok.err
773 }
774 if tok.value == "]" {
775 break
776 }
777 if tok.value != "," {
778 return p.errorf("Expected ']' or ',' found %q", tok.value)
779 }
780 }
781 return nil
782 }
783 // One value of the repeated field.
784 p.back()
785 fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
786 return p.readAny(fv.Index(fv.Len()-1), props)
787 case reflect.Bool:
788 // true/1/t/True or false/f/0/False.
789 switch tok.value {
790 case "true", "1", "t", "True":
791 fv.SetBool(true)
792 return nil
793 case "false", "0", "f", "False":
794 fv.SetBool(false)
795 return nil
796 }
797 case reflect.Float32, reflect.Float64:
798 v := tok.value
799 // Ignore 'f' for compatibility with output generated by C++, but don't
800 // remove 'f' when the value is "-inf" or "inf".
801 if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
802 v = v[:len(v)-1]
803 }
804 if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
805 fv.SetFloat(f)
806 return nil
807 }
808 case reflect.Int32:
809 if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
810 fv.SetInt(x)
811 return nil
812 }
813
814 if len(props.Enum) == 0 {
815 break
816 }
817 m, ok := enumValueMaps[props.Enum]
818 if !ok {
819 break
820 }
821 x, ok := m[tok.value]
822 if !ok {
823 break
824 }
825 fv.SetInt(int64(x))
826 return nil
827 case reflect.Int64:
828 if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
829 fv.SetInt(x)
830 return nil
831 }
832
833 case reflect.Ptr:
834 // A basic field (indirected through pointer), or a repeated message/group
835 p.back()
836 fv.Set(reflect.New(fv.Type().Elem()))
837 return p.readAny(fv.Elem(), props)
838 case reflect.String:
839 if tok.value[0] == '"' || tok.value[0] == '\'' {
840 fv.SetString(tok.unquoted)
841 return nil
842 }
843 case reflect.Struct:
844 var terminator string
845 switch tok.value {
846 case "{":
847 terminator = "}"
848 case "<":
849 terminator = ">"
850 default:
851 return p.errorf("expected '{' or '<', found %q", tok.value)
852 }
853 // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
854 return p.readStruct(fv, terminator)
855 case reflect.Uint32:
856 if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
857 fv.SetUint(uint64(x))
858 return nil
859 }
860 case reflect.Uint64:
861 if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
862 fv.SetUint(x)
863 return nil
864 }
865 }
866 return p.errorf("invalid %v: %v", v.Type(), tok.value)
867}
868
869// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
870// before starting to unmarshal, so any existing data in pb is always removed.
871// If a required field is not set and no other error occurs,
872// UnmarshalText returns *RequiredNotSetError.
873func UnmarshalText(s string, pb Message) error {
874 if um, ok := pb.(encoding.TextUnmarshaler); ok {
875 return um.UnmarshalText([]byte(s))
876 }
877 pb.Reset()
878 v := reflect.ValueOf(pb)
879 return newTextParser(s).readStruct(v.Elem(), "")
880}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 0000000..70276e8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,141 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2016 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package ptypes
33
34// This file implements functions to marshal proto.Message to/from
35// google.protobuf.Any message.
36
37import (
38 "fmt"
39 "reflect"
40 "strings"
41
42 "github.com/golang/protobuf/proto"
43 "github.com/golang/protobuf/ptypes/any"
44)
45
46const googleApis = "type.googleapis.com/"
47
48// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
49//
50// Note that regular type assertions should be done using the Is
51// function. AnyMessageName is provided for less common use cases like filtering a
52// sequence of Any messages based on a set of allowed message type names.
53func AnyMessageName(any *any.Any) (string, error) {
54 if any == nil {
55 return "", fmt.Errorf("message is nil")
56 }
57 slash := strings.LastIndex(any.TypeUrl, "/")
58 if slash < 0 {
59 return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
60 }
61 return any.TypeUrl[slash+1:], nil
62}
63
64// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
65func MarshalAny(pb proto.Message) (*any.Any, error) {
66 value, err := proto.Marshal(pb)
67 if err != nil {
68 return nil, err
69 }
70 return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
71}
72
73// DynamicAny is a value that can be passed to UnmarshalAny to automatically
74// allocate a proto.Message for the type specified in a google.protobuf.Any
75// message. The allocated message is stored in the embedded proto.Message.
76//
77// Example:
78//
79// var x ptypes.DynamicAny
80// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
81// fmt.Printf("unmarshaled message: %v", x.Message)
82type DynamicAny struct {
83 proto.Message
84}
85
86// Empty returns a new proto.Message of the type specified in a
87// google.protobuf.Any message. It returns an error if corresponding message
88// type isn't linked in.
89func Empty(any *any.Any) (proto.Message, error) {
90 aname, err := AnyMessageName(any)
91 if err != nil {
92 return nil, err
93 }
94
95 t := proto.MessageType(aname)
96 if t == nil {
97 return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
98 }
99 return reflect.New(t.Elem()).Interface().(proto.Message), nil
100}
101
102// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
103// message and places the decoded result in pb. It returns an error if type of
104// contents of Any message does not match type of pb message.
105//
106// pb can be a proto.Message, or a *DynamicAny.
107func UnmarshalAny(any *any.Any, pb proto.Message) error {
108 if d, ok := pb.(*DynamicAny); ok {
109 if d.Message == nil {
110 var err error
111 d.Message, err = Empty(any)
112 if err != nil {
113 return err
114 }
115 }
116 return UnmarshalAny(any, d.Message)
117 }
118
119 aname, err := AnyMessageName(any)
120 if err != nil {
121 return err
122 }
123
124 mname := proto.MessageName(pb)
125 if aname != mname {
126 return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
127 }
128 return proto.Unmarshal(any.Value, pb)
129}
130
131// Is returns true if any value contains a given message type.
132func Is(any *any.Any, pb proto.Message) bool {
133 // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
134 // but it avoids scanning TypeUrl for the slash.
135 if any == nil {
136 return false
137 }
138 name := proto.MessageName(pb)
139 prefix := len(any.TypeUrl) - len(name)
140 return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
141}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 0000000..e3c56d3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,191 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/protobuf/any.proto
3
4package any // import "github.com/golang/protobuf/ptypes/any"
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21// `Any` contains an arbitrary serialized protocol buffer message along with a
22// URL that describes the type of the serialized message.
23//
24// Protobuf library provides support to pack/unpack Any values in the form
25// of utility functions or additional generated methods of the Any type.
26//
27// Example 1: Pack and unpack a message in C++.
28//
29// Foo foo = ...;
30// Any any;
31// any.PackFrom(foo);
32// ...
33// if (any.UnpackTo(&foo)) {
34// ...
35// }
36//
37// Example 2: Pack and unpack a message in Java.
38//
39// Foo foo = ...;
40// Any any = Any.pack(foo);
41// ...
42// if (any.is(Foo.class)) {
43// foo = any.unpack(Foo.class);
44// }
45//
46// Example 3: Pack and unpack a message in Python.
47//
48// foo = Foo(...)
49// any = Any()
50// any.Pack(foo)
51// ...
52// if any.Is(Foo.DESCRIPTOR):
53// any.Unpack(foo)
54// ...
55//
56// Example 4: Pack and unpack a message in Go
57//
58// foo := &pb.Foo{...}
59// any, err := ptypes.MarshalAny(foo)
60// ...
61// foo := &pb.Foo{}
62// if err := ptypes.UnmarshalAny(any, foo); err != nil {
63// ...
64// }
65//
66// The pack methods provided by protobuf library will by default use
67// 'type.googleapis.com/full.type.name' as the type URL and the unpack
68// methods only use the fully qualified type name after the last '/'
69// in the type URL, for example "foo.bar.com/x/y.z" will yield type
70// name "y.z".
71//
72//
73// JSON
74// ====
75// The JSON representation of an `Any` value uses the regular
76// representation of the deserialized, embedded message, with an
77// additional field `@type` which contains the type URL. Example:
78//
79// package google.profile;
80// message Person {
81// string first_name = 1;
82// string last_name = 2;
83// }
84//
85// {
86// "@type": "type.googleapis.com/google.profile.Person",
87// "firstName": <string>,
88// "lastName": <string>
89// }
90//
91// If the embedded message type is well-known and has a custom JSON
92// representation, that representation will be embedded adding a field
93// `value` which holds the custom JSON in addition to the `@type`
94// field. Example (for message [google.protobuf.Duration][]):
95//
96// {
97// "@type": "type.googleapis.com/google.protobuf.Duration",
98// "value": "1.212s"
99// }
100//
101type Any struct {
102 // A URL/resource name whose content describes the type of the
103 // serialized protocol buffer message.
104 //
105 // For URLs which use the scheme `http`, `https`, or no scheme, the
106 // following restrictions and interpretations apply:
107 //
108 // * If no scheme is provided, `https` is assumed.
109 // * The last segment of the URL's path must represent the fully
110 // qualified name of the type (as in `path/google.protobuf.Duration`).
111 // The name should be in a canonical form (e.g., leading "." is
112 // not accepted).
113 // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
114 // value in binary format, or produce an error.
115 // * Applications are allowed to cache lookup results based on the
116 // URL, or have them precompiled into a binary to avoid any
117 // lookup. Therefore, binary compatibility needs to be preserved
118 // on changes to types. (Use versioned type names to manage
119 // breaking changes.)
120 //
121 // Schemes other than `http`, `https` (or the empty scheme) might be
122 // used with implementation specific semantics.
123 //
124 TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
125 // Must be a valid serialized protocol buffer of the above specified type.
126 Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
127 XXX_NoUnkeyedLiteral struct{} `json:"-"`
128 XXX_unrecognized []byte `json:"-"`
129 XXX_sizecache int32 `json:"-"`
130}
131
132func (m *Any) Reset() { *m = Any{} }
133func (m *Any) String() string { return proto.CompactTextString(m) }
134func (*Any) ProtoMessage() {}
135func (*Any) Descriptor() ([]byte, []int) {
136 return fileDescriptor_any_744b9ca530f228db, []int{0}
137}
138func (*Any) XXX_WellKnownType() string { return "Any" }
139func (m *Any) XXX_Unmarshal(b []byte) error {
140 return xxx_messageInfo_Any.Unmarshal(m, b)
141}
142func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
143 return xxx_messageInfo_Any.Marshal(b, m, deterministic)
144}
145func (dst *Any) XXX_Merge(src proto.Message) {
146 xxx_messageInfo_Any.Merge(dst, src)
147}
148func (m *Any) XXX_Size() int {
149 return xxx_messageInfo_Any.Size(m)
150}
151func (m *Any) XXX_DiscardUnknown() {
152 xxx_messageInfo_Any.DiscardUnknown(m)
153}
154
155var xxx_messageInfo_Any proto.InternalMessageInfo
156
157func (m *Any) GetTypeUrl() string {
158 if m != nil {
159 return m.TypeUrl
160 }
161 return ""
162}
163
164func (m *Any) GetValue() []byte {
165 if m != nil {
166 return m.Value
167 }
168 return nil
169}
170
171func init() {
172 proto.RegisterType((*Any)(nil), "google.protobuf.Any")
173}
174
175func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) }
176
177var fileDescriptor_any_744b9ca530f228db = []byte{
178 // 185 bytes of a gzipped FileDescriptorProto
179 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
180 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
181 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
182 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
183 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
184 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
185 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
186 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
187 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
188 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
189 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
190 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
191}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
new file mode 100644
index 0000000..c748667
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -0,0 +1,149 @@
1// Protocol Buffers - Google's data interchange format
2// Copyright 2008 Google Inc. All rights reserved.
3// https://developers.google.com/protocol-buffers/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31syntax = "proto3";
32
33package google.protobuf;
34
35option csharp_namespace = "Google.Protobuf.WellKnownTypes";
36option go_package = "github.com/golang/protobuf/ptypes/any";
37option java_package = "com.google.protobuf";
38option java_outer_classname = "AnyProto";
39option java_multiple_files = true;
40option objc_class_prefix = "GPB";
41
42// `Any` contains an arbitrary serialized protocol buffer message along with a
43// URL that describes the type of the serialized message.
44//
45// Protobuf library provides support to pack/unpack Any values in the form
46// of utility functions or additional generated methods of the Any type.
47//
48// Example 1: Pack and unpack a message in C++.
49//
50// Foo foo = ...;
51// Any any;
52// any.PackFrom(foo);
53// ...
54// if (any.UnpackTo(&foo)) {
55// ...
56// }
57//
58// Example 2: Pack and unpack a message in Java.
59//
60// Foo foo = ...;
61// Any any = Any.pack(foo);
62// ...
63// if (any.is(Foo.class)) {
64// foo = any.unpack(Foo.class);
65// }
66//
67// Example 3: Pack and unpack a message in Python.
68//
69// foo = Foo(...)
70// any = Any()
71// any.Pack(foo)
72// ...
73// if any.Is(Foo.DESCRIPTOR):
74// any.Unpack(foo)
75// ...
76//
77// Example 4: Pack and unpack a message in Go
78//
79// foo := &pb.Foo{...}
80// any, err := ptypes.MarshalAny(foo)
81// ...
82// foo := &pb.Foo{}
83// if err := ptypes.UnmarshalAny(any, foo); err != nil {
84// ...
85// }
86//
87// The pack methods provided by protobuf library will by default use
88// 'type.googleapis.com/full.type.name' as the type URL and the unpack
89// methods only use the fully qualified type name after the last '/'
90// in the type URL, for example "foo.bar.com/x/y.z" will yield type
91// name "y.z".
92//
93//
94// JSON
95// ====
96// The JSON representation of an `Any` value uses the regular
97// representation of the deserialized, embedded message, with an
98// additional field `@type` which contains the type URL. Example:
99//
100// package google.profile;
101// message Person {
102// string first_name = 1;
103// string last_name = 2;
104// }
105//
106// {
107// "@type": "type.googleapis.com/google.profile.Person",
108// "firstName": <string>,
109// "lastName": <string>
110// }
111//
112// If the embedded message type is well-known and has a custom JSON
113// representation, that representation will be embedded adding a field
114// `value` which holds the custom JSON in addition to the `@type`
115// field. Example (for message [google.protobuf.Duration][]):
116//
117// {
118// "@type": "type.googleapis.com/google.protobuf.Duration",
119// "value": "1.212s"
120// }
121//
122message Any {
123 // A URL/resource name whose content describes the type of the
124 // serialized protocol buffer message.
125 //
126 // For URLs which use the scheme `http`, `https`, or no scheme, the
127 // following restrictions and interpretations apply:
128 //
129 // * If no scheme is provided, `https` is assumed.
130 // * The last segment of the URL's path must represent the fully
131 // qualified name of the type (as in `path/google.protobuf.Duration`).
132 // The name should be in a canonical form (e.g., leading "." is
133 // not accepted).
134 // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
135 // value in binary format, or produce an error.
136 // * Applications are allowed to cache lookup results based on the
137 // URL, or have them precompiled into a binary to avoid any
138 // lookup. Therefore, binary compatibility needs to be preserved
139 // on changes to types. (Use versioned type names to manage
140 // breaking changes.)
141 //
142 // Schemes other than `http`, `https` (or the empty scheme) might be
143 // used with implementation specific semantics.
144 //
145 string type_url = 1;
146
147 // Must be a valid serialized protocol buffer of the above specified type.
148 bytes value = 2;
149}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 0000000..c0d595d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,35 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2016 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32/*
33Package ptypes contains code for interacting with well-known types.
34*/
35package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 0000000..65cb0f8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,102 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2016 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package ptypes
33
34// This file implements conversions between google.protobuf.Duration
35// and time.Duration.
36
37import (
38 "errors"
39 "fmt"
40 "time"
41
42 durpb "github.com/golang/protobuf/ptypes/duration"
43)
44
45const (
46 // Range of a durpb.Duration in seconds, as specified in
47 // google/protobuf/duration.proto. This is about 10,000 years in seconds.
48 maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
49 minSeconds = -maxSeconds
50)
51
52// validateDuration determines whether the durpb.Duration is valid according to the
53// definition in google/protobuf/duration.proto. A valid durpb.Duration
54// may still be too large to fit into a time.Duration (the range of durpb.Duration
55// is about 10,000 years, and the range of time.Duration is about 290).
56func validateDuration(d *durpb.Duration) error {
57 if d == nil {
58 return errors.New("duration: nil Duration")
59 }
60 if d.Seconds < minSeconds || d.Seconds > maxSeconds {
61 return fmt.Errorf("duration: %v: seconds out of range", d)
62 }
63 if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
64 return fmt.Errorf("duration: %v: nanos out of range", d)
65 }
66 // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
67 if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
68 return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
69 }
70 return nil
71}
72
73// Duration converts a durpb.Duration to a time.Duration. Duration
74// returns an error if the durpb.Duration is invalid or is too large to be
75// represented in a time.Duration.
76func Duration(p *durpb.Duration) (time.Duration, error) {
77 if err := validateDuration(p); err != nil {
78 return 0, err
79 }
80 d := time.Duration(p.Seconds) * time.Second
81 if int64(d/time.Second) != p.Seconds {
82 return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
83 }
84 if p.Nanos != 0 {
85 d += time.Duration(p.Nanos)
86 if (d < 0) != (p.Nanos < 0) {
87 return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
88 }
89 }
90 return d, nil
91}
92
93// DurationProto converts a time.Duration to a durpb.Duration.
94func DurationProto(d time.Duration) *durpb.Duration {
95 nanos := d.Nanoseconds()
96 secs := nanos / 1e9
97 nanos -= secs * 1e9
98 return &durpb.Duration{
99 Seconds: secs,
100 Nanos: int32(nanos),
101 }
102}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 0000000..a7beb2c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,159 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/protobuf/duration.proto
3
4package duration // import "github.com/golang/protobuf/ptypes/duration"
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21// A Duration represents a signed, fixed-length span of time represented
22// as a count of seconds and fractions of seconds at nanosecond
23// resolution. It is independent of any calendar and concepts like "day"
24// or "month". It is related to Timestamp in that the difference between
25// two Timestamp values is a Duration and it can be added or subtracted
26// from a Timestamp. Range is approximately +-10,000 years.
27//
28// # Examples
29//
30// Example 1: Compute Duration from two Timestamps in pseudo code.
31//
32// Timestamp start = ...;
33// Timestamp end = ...;
34// Duration duration = ...;
35//
36// duration.seconds = end.seconds - start.seconds;
37// duration.nanos = end.nanos - start.nanos;
38//
39// if (duration.seconds < 0 && duration.nanos > 0) {
40// duration.seconds += 1;
41// duration.nanos -= 1000000000;
42// } else if (durations.seconds > 0 && duration.nanos < 0) {
43// duration.seconds -= 1;
44// duration.nanos += 1000000000;
45// }
46//
47// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
48//
49// Timestamp start = ...;
50// Duration duration = ...;
51// Timestamp end = ...;
52//
53// end.seconds = start.seconds + duration.seconds;
54// end.nanos = start.nanos + duration.nanos;
55//
56// if (end.nanos < 0) {
57// end.seconds -= 1;
58// end.nanos += 1000000000;
59// } else if (end.nanos >= 1000000000) {
60// end.seconds += 1;
61// end.nanos -= 1000000000;
62// }
63//
64// Example 3: Compute Duration from datetime.timedelta in Python.
65//
66// td = datetime.timedelta(days=3, minutes=10)
67// duration = Duration()
68// duration.FromTimedelta(td)
69//
70// # JSON Mapping
71//
72// In JSON format, the Duration type is encoded as a string rather than an
73// object, where the string ends in the suffix "s" (indicating seconds) and
74// is preceded by the number of seconds, with nanoseconds expressed as
75// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
76// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
77// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
78// microsecond should be expressed in JSON format as "3.000001s".
79//
80//
81type Duration struct {
82 // Signed seconds of the span of time. Must be from -315,576,000,000
83 // to +315,576,000,000 inclusive. Note: these bounds are computed from:
84 // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
85 Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
86 // Signed fractions of a second at nanosecond resolution of the span
87 // of time. Durations less than one second are represented with a 0
88 // `seconds` field and a positive or negative `nanos` field. For durations
89 // of one second or more, a non-zero value for the `nanos` field must be
90 // of the same sign as the `seconds` field. Must be from -999,999,999
91 // to +999,999,999 inclusive.
92 Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
93 XXX_NoUnkeyedLiteral struct{} `json:"-"`
94 XXX_unrecognized []byte `json:"-"`
95 XXX_sizecache int32 `json:"-"`
96}
97
98func (m *Duration) Reset() { *m = Duration{} }
99func (m *Duration) String() string { return proto.CompactTextString(m) }
100func (*Duration) ProtoMessage() {}
101func (*Duration) Descriptor() ([]byte, []int) {
102 return fileDescriptor_duration_e7d612259e3f0613, []int{0}
103}
104func (*Duration) XXX_WellKnownType() string { return "Duration" }
105func (m *Duration) XXX_Unmarshal(b []byte) error {
106 return xxx_messageInfo_Duration.Unmarshal(m, b)
107}
108func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
109 return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
110}
111func (dst *Duration) XXX_Merge(src proto.Message) {
112 xxx_messageInfo_Duration.Merge(dst, src)
113}
114func (m *Duration) XXX_Size() int {
115 return xxx_messageInfo_Duration.Size(m)
116}
117func (m *Duration) XXX_DiscardUnknown() {
118 xxx_messageInfo_Duration.DiscardUnknown(m)
119}
120
121var xxx_messageInfo_Duration proto.InternalMessageInfo
122
123func (m *Duration) GetSeconds() int64 {
124 if m != nil {
125 return m.Seconds
126 }
127 return 0
128}
129
130func (m *Duration) GetNanos() int32 {
131 if m != nil {
132 return m.Nanos
133 }
134 return 0
135}
136
137func init() {
138 proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
139}
140
141func init() {
142 proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613)
143}
144
145var fileDescriptor_duration_e7d612259e3f0613 = []byte{
146 // 190 bytes of a gzipped FileDescriptorProto
147 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
148 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
149 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
150 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
151 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
152 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
153 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
154 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
155 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
156 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
157 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
158 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
159}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
new file mode 100644
index 0000000..975fce4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -0,0 +1,117 @@
1// Protocol Buffers - Google's data interchange format
2// Copyright 2008 Google Inc. All rights reserved.
3// https://developers.google.com/protocol-buffers/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31syntax = "proto3";
32
33package google.protobuf;
34
35option csharp_namespace = "Google.Protobuf.WellKnownTypes";
36option cc_enable_arenas = true;
37option go_package = "github.com/golang/protobuf/ptypes/duration";
38option java_package = "com.google.protobuf";
39option java_outer_classname = "DurationProto";
40option java_multiple_files = true;
41option objc_class_prefix = "GPB";
42
43// A Duration represents a signed, fixed-length span of time represented
44// as a count of seconds and fractions of seconds at nanosecond
45// resolution. It is independent of any calendar and concepts like "day"
46// or "month". It is related to Timestamp in that the difference between
47// two Timestamp values is a Duration and it can be added or subtracted
48// from a Timestamp. Range is approximately +-10,000 years.
49//
50// # Examples
51//
52// Example 1: Compute Duration from two Timestamps in pseudo code.
53//
54// Timestamp start = ...;
55// Timestamp end = ...;
56// Duration duration = ...;
57//
58// duration.seconds = end.seconds - start.seconds;
59// duration.nanos = end.nanos - start.nanos;
60//
61// if (duration.seconds < 0 && duration.nanos > 0) {
62// duration.seconds += 1;
63// duration.nanos -= 1000000000;
64// } else if (durations.seconds > 0 && duration.nanos < 0) {
65// duration.seconds -= 1;
66// duration.nanos += 1000000000;
67// }
68//
69// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
70//
71// Timestamp start = ...;
72// Duration duration = ...;
73// Timestamp end = ...;
74//
75// end.seconds = start.seconds + duration.seconds;
76// end.nanos = start.nanos + duration.nanos;
77//
78// if (end.nanos < 0) {
79// end.seconds -= 1;
80// end.nanos += 1000000000;
81// } else if (end.nanos >= 1000000000) {
82// end.seconds += 1;
83// end.nanos -= 1000000000;
84// }
85//
86// Example 3: Compute Duration from datetime.timedelta in Python.
87//
88// td = datetime.timedelta(days=3, minutes=10)
89// duration = Duration()
90// duration.FromTimedelta(td)
91//
92// # JSON Mapping
93//
94// In JSON format, the Duration type is encoded as a string rather than an
95// object, where the string ends in the suffix "s" (indicating seconds) and
96// is preceded by the number of seconds, with nanoseconds expressed as
97// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
98// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
99// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
100// microsecond should be expressed in JSON format as "3.000001s".
101//
102//
103message Duration {
104
105 // Signed seconds of the span of time. Must be from -315,576,000,000
106 // to +315,576,000,000 inclusive. Note: these bounds are computed from:
107 // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
108 int64 seconds = 1;
109
110 // Signed fractions of a second at nanosecond resolution of the span
111 // of time. Durations less than one second are represented with a 0
112 // `seconds` field and a positive or negative `nanos` field. For durations
113 // of one second or more, a non-zero value for the `nanos` field must be
114 // of the same sign as the `seconds` field. Must be from -999,999,999
115 // to +999,999,999 inclusive.
116 int32 nanos = 2;
117}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000..47f10db
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,134 @@
1// Go support for Protocol Buffers - Google's data interchange format
2//
3// Copyright 2016 The Go Authors. All rights reserved.
4// https://github.com/golang/protobuf
5//
6// Redistribution and use in source and binary forms, with or without
7// modification, are permitted provided that the following conditions are
8// met:
9//
10// * Redistributions of source code must retain the above copyright
11// notice, this list of conditions and the following disclaimer.
12// * Redistributions in binary form must reproduce the above
13// copyright notice, this list of conditions and the following disclaimer
14// in the documentation and/or other materials provided with the
15// distribution.
16// * Neither the name of Google Inc. nor the names of its
17// contributors may be used to endorse or promote products derived from
18// this software without specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32package ptypes
33
34// This file implements operations on google.protobuf.Timestamp.
35
36import (
37 "errors"
38 "fmt"
39 "time"
40
41 tspb "github.com/golang/protobuf/ptypes/timestamp"
42)
43
44const (
45 // Seconds field of the earliest valid Timestamp.
46 // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
47 minValidSeconds = -62135596800
48 // Seconds field just after the latest valid Timestamp.
49 // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
50 maxValidSeconds = 253402300800
51)
52
53// validateTimestamp determines whether a Timestamp is valid.
54// A valid timestamp represents a time in the range
55// [0001-01-01, 10000-01-01) and has a Nanos field
56// in the range [0, 1e9).
57//
58// If the Timestamp is valid, validateTimestamp returns nil.
59// Otherwise, it returns an error that describes
60// the problem.
61//
62// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
63func validateTimestamp(ts *tspb.Timestamp) error {
64 if ts == nil {
65 return errors.New("timestamp: nil Timestamp")
66 }
67 if ts.Seconds < minValidSeconds {
68 return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
69 }
70 if ts.Seconds >= maxValidSeconds {
71 return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
72 }
73 if ts.Nanos < 0 || ts.Nanos >= 1e9 {
74 return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
75 }
76 return nil
77}
78
79// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
80// It returns an error if the argument is invalid.
81//
82// Unlike most Go functions, if Timestamp returns an error, the first return value
83// is not the zero time.Time. Instead, it is the value obtained from the
84// time.Unix function when passed the contents of the Timestamp, in the UTC
85// locale. This may or may not be a meaningful time; many invalid Timestamps
86// do map to valid time.Times.
87//
88// A nil Timestamp returns an error. The first return value in that case is
89// undefined.
90func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
91 // Don't return the zero value on error, because corresponds to a valid
92 // timestamp. Instead return whatever time.Unix gives us.
93 var t time.Time
94 if ts == nil {
95 t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
96 } else {
97 t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
98 }
99 return t, validateTimestamp(ts)
100}
101
102// TimestampNow returns a google.protobuf.Timestamp for the current time.
103func TimestampNow() *tspb.Timestamp {
104 ts, err := TimestampProto(time.Now())
105 if err != nil {
106 panic("ptypes: time.Now() out of Timestamp range")
107 }
108 return ts
109}
110
111// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
112// It returns an error if the resulting Timestamp is invalid.
113func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
114 seconds := t.Unix()
115 nanos := int32(t.Sub(time.Unix(seconds, 0)))
116 ts := &tspb.Timestamp{
117 Seconds: seconds,
118 Nanos: nanos,
119 }
120 if err := validateTimestamp(ts); err != nil {
121 return nil, err
122 }
123 return ts, nil
124}
125
126// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
127// Timestamps, it returns an error message in parentheses.
128func TimestampString(ts *tspb.Timestamp) string {
129 t, err := Timestamp(ts)
130 if err != nil {
131 return fmt.Sprintf("(%v)", err)
132 }
133 return t.Format(time.RFC3339Nano)
134}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 0000000..8e76ae9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,175 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/protobuf/timestamp.proto
3
4package timestamp // import "github.com/golang/protobuf/ptypes/timestamp"
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21// A Timestamp represents a point in time independent of any time zone
22// or calendar, represented as seconds and fractions of seconds at
23// nanosecond resolution in UTC Epoch time. It is encoded using the
24// Proleptic Gregorian Calendar which extends the Gregorian calendar
25// backwards to year one. It is encoded assuming all minutes are 60
26// seconds long, i.e. leap seconds are "smeared" so that no leap second
27// table is needed for interpretation. Range is from
28// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
29// By restricting to that range, we ensure that we can convert to
30// and from RFC 3339 date strings.
31// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
32//
33// # Examples
34//
35// Example 1: Compute Timestamp from POSIX `time()`.
36//
37// Timestamp timestamp;
38// timestamp.set_seconds(time(NULL));
39// timestamp.set_nanos(0);
40//
41// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
42//
43// struct timeval tv;
44// gettimeofday(&tv, NULL);
45//
46// Timestamp timestamp;
47// timestamp.set_seconds(tv.tv_sec);
48// timestamp.set_nanos(tv.tv_usec * 1000);
49//
50// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
51//
52// FILETIME ft;
53// GetSystemTimeAsFileTime(&ft);
54// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
55//
56// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
57// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
58// Timestamp timestamp;
59// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
60// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
61//
62// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
63//
64// long millis = System.currentTimeMillis();
65//
66// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
67// .setNanos((int) ((millis % 1000) * 1000000)).build();
68//
69//
70// Example 5: Compute Timestamp from current time in Python.
71//
72// timestamp = Timestamp()
73// timestamp.GetCurrentTime()
74//
75// # JSON Mapping
76//
77// In JSON format, the Timestamp type is encoded as a string in the
78// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
79// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
80// where {year} is always expressed using four digits while {month}, {day},
81// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
82// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
83// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
84// is required, though only UTC (as indicated by "Z") is presently supported.
85//
86// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
87// 01:30 UTC on January 15, 2017.
88//
89// In JavaScript, one can convert a Date object to this format using the
90// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
91// method. In Python, a standard `datetime.datetime` object can be converted
92// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
93// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
94// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
95// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
96// to obtain a formatter capable of generating timestamps in this format.
97//
98//
99type Timestamp struct {
100 // Represents seconds of UTC time since Unix epoch
101 // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
102 // 9999-12-31T23:59:59Z inclusive.
103 Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
104 // Non-negative fractions of a second at nanosecond resolution. Negative
105 // second values with fractions must still have non-negative nanos values
106 // that count forward in time. Must be from 0 to 999,999,999
107 // inclusive.
108 Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
109 XXX_NoUnkeyedLiteral struct{} `json:"-"`
110 XXX_unrecognized []byte `json:"-"`
111 XXX_sizecache int32 `json:"-"`
112}
113
114func (m *Timestamp) Reset() { *m = Timestamp{} }
115func (m *Timestamp) String() string { return proto.CompactTextString(m) }
116func (*Timestamp) ProtoMessage() {}
117func (*Timestamp) Descriptor() ([]byte, []int) {
118 return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0}
119}
120func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
121func (m *Timestamp) XXX_Unmarshal(b []byte) error {
122 return xxx_messageInfo_Timestamp.Unmarshal(m, b)
123}
124func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
125 return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
126}
127func (dst *Timestamp) XXX_Merge(src proto.Message) {
128 xxx_messageInfo_Timestamp.Merge(dst, src)
129}
130func (m *Timestamp) XXX_Size() int {
131 return xxx_messageInfo_Timestamp.Size(m)
132}
133func (m *Timestamp) XXX_DiscardUnknown() {
134 xxx_messageInfo_Timestamp.DiscardUnknown(m)
135}
136
137var xxx_messageInfo_Timestamp proto.InternalMessageInfo
138
139func (m *Timestamp) GetSeconds() int64 {
140 if m != nil {
141 return m.Seconds
142 }
143 return 0
144}
145
146func (m *Timestamp) GetNanos() int32 {
147 if m != nil {
148 return m.Nanos
149 }
150 return 0
151}
152
153func init() {
154 proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
155}
156
157func init() {
158 proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8)
159}
160
161var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{
162 // 191 bytes of a gzipped FileDescriptorProto
163 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
164 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
165 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
166 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
167 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
168 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
169 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
170 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
171 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
172 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
173 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
174 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
175}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
new file mode 100644
index 0000000..06750ab
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -0,0 +1,133 @@
1// Protocol Buffers - Google's data interchange format
2// Copyright 2008 Google Inc. All rights reserved.
3// https://developers.google.com/protocol-buffers/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31syntax = "proto3";
32
33package google.protobuf;
34
35option csharp_namespace = "Google.Protobuf.WellKnownTypes";
36option cc_enable_arenas = true;
37option go_package = "github.com/golang/protobuf/ptypes/timestamp";
38option java_package = "com.google.protobuf";
39option java_outer_classname = "TimestampProto";
40option java_multiple_files = true;
41option objc_class_prefix = "GPB";
42
43// A Timestamp represents a point in time independent of any time zone
44// or calendar, represented as seconds and fractions of seconds at
45// nanosecond resolution in UTC Epoch time. It is encoded using the
46// Proleptic Gregorian Calendar which extends the Gregorian calendar
47// backwards to year one. It is encoded assuming all minutes are 60
48// seconds long, i.e. leap seconds are "smeared" so that no leap second
49// table is needed for interpretation. Range is from
50// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
51// By restricting to that range, we ensure that we can convert to
52// and from RFC 3339 date strings.
53// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
54//
55// # Examples
56//
57// Example 1: Compute Timestamp from POSIX `time()`.
58//
59// Timestamp timestamp;
60// timestamp.set_seconds(time(NULL));
61// timestamp.set_nanos(0);
62//
63// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
64//
65// struct timeval tv;
66// gettimeofday(&tv, NULL);
67//
68// Timestamp timestamp;
69// timestamp.set_seconds(tv.tv_sec);
70// timestamp.set_nanos(tv.tv_usec * 1000);
71//
72// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
73//
74// FILETIME ft;
75// GetSystemTimeAsFileTime(&ft);
76// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
77//
78// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
79// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
80// Timestamp timestamp;
81// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
82// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
83//
84// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
85//
86// long millis = System.currentTimeMillis();
87//
88// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
89// .setNanos((int) ((millis % 1000) * 1000000)).build();
90//
91//
92// Example 5: Compute Timestamp from current time in Python.
93//
94// timestamp = Timestamp()
95// timestamp.GetCurrentTime()
96//
97// # JSON Mapping
98//
99// In JSON format, the Timestamp type is encoded as a string in the
100// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
101// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
102// where {year} is always expressed using four digits while {month}, {day},
103// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
104// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
105// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
106// is required, though only UTC (as indicated by "Z") is presently supported.
107//
108// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
109// 01:30 UTC on January 15, 2017.
110//
111// In JavaScript, one can convert a Date object to this format using the
112// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
113// method. In Python, a standard `datetime.datetime` object can be converted
114// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
115// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
116// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
117// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
118// to obtain a formatter capable of generating timestamps in this format.
119//
120//
121message Timestamp {
122
123 // Represents seconds of UTC time since Unix epoch
124 // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
125 // 9999-12-31T23:59:59Z inclusive.
126 int64 seconds = 1;
127
128 // Non-negative fractions of a second at nanosecond resolution. Negative
129 // second values with fractions must still have non-negative nanos values
130 // that count forward in time. Must be from 0 to 999,999,999
131 // inclusive.
132 int32 nanos = 2;
133}
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
index 7d8a57c..8d306bf 100644
--- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
+++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
@@ -26,6 +26,7 @@ func DefaultPooledTransport() *http.Transport {
26 DialContext: (&net.Dialer{ 26 DialContext: (&net.Dialer{
27 Timeout: 30 * time.Second, 27 Timeout: 30 * time.Second,
28 KeepAlive: 30 * time.Second, 28 KeepAlive: 30 * time.Second,
29 DualStack: true,
29 }).DialContext, 30 }).DialContext,
30 MaxIdleConns: 100, 31 MaxIdleConns: 100,
31 IdleConnTimeout: 90 * time.Second, 32 IdleConnTimeout: 90 * time.Second,
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod
new file mode 100644
index 0000000..310f075
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/go.mod
@@ -0,0 +1 @@
module github.com/hashicorp/go-cleanhttp
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
new file mode 100644
index 0000000..7eda377
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
@@ -0,0 +1,43 @@
1package cleanhttp
2
3import (
4 "net/http"
5 "strings"
6 "unicode"
7)
8
9// HandlerInput provides input options to cleanhttp's handlers
10type HandlerInput struct {
11 ErrStatus int
12}
13
14// PrintablePathCheckHandler is a middleware that ensures the request path
15// contains only printable runes.
16func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
17 // Nil-check on input to make it optional
18 if input == nil {
19 input = &HandlerInput{
20 ErrStatus: http.StatusBadRequest,
21 }
22 }
23
24 // Default to http.StatusBadRequest on error
25 if input.ErrStatus == 0 {
26 input.ErrStatus = http.StatusBadRequest
27 }
28
29 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
30 // Check URL path for non-printable characters
31 idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
32 return !unicode.IsPrint(c)
33 })
34
35 if idx != -1 {
36 w.WriteHeader(input.ErrStatus)
37 return
38 }
39
40 next.ServeHTTP(w, r)
41 return
42 })
43}
diff --git a/vendor/github.com/hashicorp/go-getter/.travis.yml b/vendor/github.com/hashicorp/go-getter/.travis.yml
index 4438286..da804c2 100644
--- a/vendor/github.com/hashicorp/go-getter/.travis.yml
+++ b/vendor/github.com/hashicorp/go-getter/.travis.yml
@@ -10,8 +10,14 @@ addons:
10language: go 10language: go
11 11
12go: 12go:
13 - 1.5 13 - 1.8.x
14 - 1.9.x
15 - master
14 16
15branches: 17branches:
16 only: 18 only:
17 - master 19 - master
20
21matrix:
22 allow_failures:
23 - go: master
diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md
index 4a0b6a6..40ace74 100644
--- a/vendor/github.com/hashicorp/go-getter/README.md
+++ b/vendor/github.com/hashicorp/go-getter/README.md
@@ -21,8 +21,7 @@ URLs. For example: "github.com/hashicorp/go-getter" would turn into a
21Git URL. Or "./foo" would turn into a file URL. These are extensible. 21Git URL. Or "./foo" would turn into a file URL. These are extensible.
22 22
23This library is used by [Terraform](https://terraform.io) for 23This library is used by [Terraform](https://terraform.io) for
24downloading modules, [Otto](https://ottoproject.io) for dependencies and 24downloading modules and [Nomad](https://nomadproject.io) for downloading
25Appfile imports, and [Nomad](https://nomadproject.io) for downloading
26binaries. 25binaries.
27 26
28## Installation and Usage 27## Installation and Usage
@@ -119,6 +118,37 @@ The protocol-specific options are documented below the URL format
119section. But because they are part of the URL, we point it out here so 118section. But because they are part of the URL, we point it out here so
120you know they exist. 119you know they exist.
121 120
121### Subdirectories
122
123If you want to download only a specific subdirectory from a downloaded
124directory, you can specify a subdirectory after a double-slash `//`.
125go-getter will first download the URL specified _before_ the double-slash
126(as if you didn't specify a double-slash), but will then copy the
127path after the double slash into the target directory.
128
129For example, if you're downloading this GitHub repository, but you only
130want to download the `test-fixtures` directory, you can do the following:
131
132```
133https://github.com/hashicorp/go-getter.git//test-fixtures
134```
135
136If you downloaded this to the `/tmp` directory, then the file
137`/tmp/archive.gz` would exist. Notice that this file is in the `test-fixtures`
138directory in this repository, but because we specified a subdirectory,
139go-getter automatically copied only that directory contents.
140
141Subdirectory paths may contain may also use filesystem glob patterns.
142The path must match _exactly one_ entry or go-getter will return an error.
143This is useful if you're not sure the exact directory name but it follows
144a predictable naming structure.
145
146For example, the following URL would also work:
147
148```
149https://github.com/hashicorp/go-getter.git//test-*
150```
151
122### Checksumming 152### Checksumming
123 153
124For file downloads of any protocol, go-getter can automatically verify 154For file downloads of any protocol, go-getter can automatically verify
@@ -154,9 +184,11 @@ The following archive formats are supported:
154 184
155 * `tar.gz` and `tgz` 185 * `tar.gz` and `tgz`
156 * `tar.bz2` and `tbz2` 186 * `tar.bz2` and `tbz2`
187 * `tar.xz` and `txz`
157 * `zip` 188 * `zip`
158 * `gz` 189 * `gz`
159 * `bz2` 190 * `bz2`
191 * `xz`
160 192
161For example, an example URL is shown below: 193For example, an example URL is shown below:
162 194
@@ -200,6 +232,9 @@ The options below are available to all protocols:
200 * `checksum` - Checksum to verify the downloaded file or archive. See 232 * `checksum` - Checksum to verify the downloaded file or archive. See
201 the entire section on checksumming above for format and more details. 233 the entire section on checksumming above for format and more details.
202 234
235 * `filename` - When in file download mode, allows specifying the name of the
236 downloaded file on disk. Has no effect in directory mode.
237
203### Local Files (`file`) 238### Local Files (`file`)
204 239
205None 240None
@@ -222,13 +257,17 @@ None
222 257
223### HTTP (`http`) 258### HTTP (`http`)
224 259
225None 260#### Basic Authentication
261
262To use HTTP basic authentication with go-getter, simply prepend `username:password@` to the
263hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special
264characters, including the username and password, must be URL encoded.
226 265
227### S3 (`s3`) 266### S3 (`s3`)
228 267
229S3 takes various access configurations in the URL. Note that it will also 268S3 takes various access configurations in the URL. Note that it will also
230read these from standard AWS environment variables if they're set. If 269read these from standard AWS environment variables if they're set. S3 compliant servers like Minio
231the query parameters are present, these take priority. 270are also supported. If the query parameters are present, these take priority.
232 271
233 * `aws_access_key_id` - AWS access key. 272 * `aws_access_key_id` - AWS access key.
234 * `aws_access_key_secret` - AWS access key secret. 273 * `aws_access_key_secret` - AWS access key secret.
@@ -240,6 +279,14 @@ If you use go-getter and want to use an EC2 IAM Instance Profile to avoid
240using credentials, then just omit these and the profile, if available will 279using credentials, then just omit these and the profile, if available will
241be used automatically. 280be used automatically.
242 281
282### Using S3 with Minio
283 If you use go-gitter for Minio support, you must consider the following:
284
285 * `aws_access_key_id` (required) - Minio access key.
286 * `aws_access_key_secret` (required) - Minio access key secret.
287 * `region` (optional - defaults to us-east-1) - Region identifier to use.
288 * `version` (optional - defaults to Minio default) - Configuration file format.
289
243#### S3 Bucket Examples 290#### S3 Bucket Examples
244 291
245S3 has several addressing schemes used to reference your bucket. These are 292S3 has several addressing schemes used to reference your bucket. These are
@@ -250,4 +297,5 @@ Some examples for these addressing schemes:
250- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo 297- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo
251- bucket.s3.amazonaws.com/foo 298- bucket.s3.amazonaws.com/foo
252- bucket.s3-eu-west-1.amazonaws.com/foo/bar 299- bucket.s3-eu-west-1.amazonaws.com/foo/bar
300- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY&region=us-east-2"
253 301
diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml
index 159dad4..ec48d45 100644
--- a/vendor/github.com/hashicorp/go-getter/appveyor.yml
+++ b/vendor/github.com/hashicorp/go-getter/appveyor.yml
@@ -1,5 +1,5 @@
1version: "build-{branch}-{build}" 1version: "build-{branch}-{build}"
2image: Visual Studio 2015 2image: Visual Studio 2017
3clone_folder: c:\gopath\github.com\hashicorp\go-getter 3clone_folder: c:\gopath\github.com\hashicorp\go-getter
4environment: 4environment:
5 GOPATH: c:\gopath 5 GOPATH: c:\gopath
diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go
index 876812a..300301c 100644
--- a/vendor/github.com/hashicorp/go-getter/client.go
+++ b/vendor/github.com/hashicorp/go-getter/client.go
@@ -17,6 +17,7 @@ import (
17 "strings" 17 "strings"
18 18
19 urlhelper "github.com/hashicorp/go-getter/helper/url" 19 urlhelper "github.com/hashicorp/go-getter/helper/url"
20 "github.com/hashicorp/go-safetemp"
20) 21)
21 22
22// Client is a client for downloading things. 23// Client is a client for downloading things.
@@ -100,17 +101,14 @@ func (c *Client) Get() error {
100 dst := c.Dst 101 dst := c.Dst
101 src, subDir := SourceDirSubdir(src) 102 src, subDir := SourceDirSubdir(src)
102 if subDir != "" { 103 if subDir != "" {
103 tmpDir, err := ioutil.TempDir("", "tf") 104 td, tdcloser, err := safetemp.Dir("", "getter")
104 if err != nil { 105 if err != nil {
105 return err 106 return err
106 } 107 }
107 if err := os.RemoveAll(tmpDir); err != nil { 108 defer tdcloser.Close()
108 return err
109 }
110 defer os.RemoveAll(tmpDir)
111 109
112 realDst = dst 110 realDst = dst
113 dst = tmpDir 111 dst = td
114 } 112 }
115 113
116 u, err := urlhelper.Parse(src) 114 u, err := urlhelper.Parse(src)
@@ -232,7 +230,18 @@ func (c *Client) Get() error {
232 // Destination is the base name of the URL path in "any" mode when 230 // Destination is the base name of the URL path in "any" mode when
233 // a file source is detected. 231 // a file source is detected.
234 if mode == ClientModeFile { 232 if mode == ClientModeFile {
235 dst = filepath.Join(dst, filepath.Base(u.Path)) 233 filename := filepath.Base(u.Path)
234
235 // Determine if we have a custom file name
236 if v := q.Get("filename"); v != "" {
237 // Delete the query parameter if we have it.
238 q.Del("filename")
239 u.RawQuery = q.Encode()
240
241 filename = v
242 }
243
244 dst = filepath.Join(dst, filename)
236 } 245 }
237 } 246 }
238 247
@@ -305,7 +314,13 @@ func (c *Client) Get() error {
305 return err 314 return err
306 } 315 }
307 316
308 return copyDir(realDst, filepath.Join(dst, subDir), false) 317 // Process any globs
318 subDir, err := SubdirGlob(dst, subDir)
319 if err != nil {
320 return err
321 }
322
323 return copyDir(realDst, subDir, false)
309 } 324 }
310 325
311 return nil 326 return nil
diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go
index d18174c..198bb0e 100644
--- a/vendor/github.com/hashicorp/go-getter/decompress.go
+++ b/vendor/github.com/hashicorp/go-getter/decompress.go
@@ -1,7 +1,15 @@
1package getter 1package getter
2 2
3import (
4 "strings"
5)
6
3// Decompressor defines the interface that must be implemented to add 7// Decompressor defines the interface that must be implemented to add
4// support for decompressing a type. 8// support for decompressing a type.
9//
10// Important: if you're implementing a decompressor, please use the
11// containsDotDot helper in this file to ensure that files can't be
12// decompressed outside of the specified directory.
5type Decompressor interface { 13type Decompressor interface {
6 // Decompress should decompress src to dst. dir specifies whether dst 14 // Decompress should decompress src to dst. dir specifies whether dst
7 // is a directory or single file. src is guaranteed to be a single file 15 // is a directory or single file. src is guaranteed to be a single file
@@ -16,14 +24,35 @@ var Decompressors map[string]Decompressor
16func init() { 24func init() {
17 tbzDecompressor := new(TarBzip2Decompressor) 25 tbzDecompressor := new(TarBzip2Decompressor)
18 tgzDecompressor := new(TarGzipDecompressor) 26 tgzDecompressor := new(TarGzipDecompressor)
27 txzDecompressor := new(TarXzDecompressor)
19 28
20 Decompressors = map[string]Decompressor{ 29 Decompressors = map[string]Decompressor{
21 "bz2": new(Bzip2Decompressor), 30 "bz2": new(Bzip2Decompressor),
22 "gz": new(GzipDecompressor), 31 "gz": new(GzipDecompressor),
32 "xz": new(XzDecompressor),
23 "tar.bz2": tbzDecompressor, 33 "tar.bz2": tbzDecompressor,
24 "tar.gz": tgzDecompressor, 34 "tar.gz": tgzDecompressor,
35 "tar.xz": txzDecompressor,
25 "tbz2": tbzDecompressor, 36 "tbz2": tbzDecompressor,
26 "tgz": tgzDecompressor, 37 "tgz": tgzDecompressor,
38 "txz": txzDecompressor,
27 "zip": new(ZipDecompressor), 39 "zip": new(ZipDecompressor),
28 } 40 }
29} 41}
42
43// containsDotDot checks if the filepath value v contains a ".." entry.
44// This will check filepath components by splitting along / or \. This
45// function is copied directly from the Go net/http implementation.
46func containsDotDot(v string) bool {
47 if !strings.Contains(v, "..") {
48 return false
49 }
50 for _, ent := range strings.FieldsFunc(v, isSlashRune) {
51 if ent == ".." {
52 return true
53 }
54 }
55 return false
56}
57
58func isSlashRune(r rune) bool { return r == '/' || r == '\\' }
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go
index 2001054..5ebf709 100644
--- a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go
+++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go
@@ -9,7 +9,7 @@ import (
9) 9)
10 10
11// GzipDecompressor is an implementation of Decompressor that can 11// GzipDecompressor is an implementation of Decompressor that can
12// decompress bz2 files. 12// decompress gzip files.
13type GzipDecompressor struct{} 13type GzipDecompressor struct{}
14 14
15func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { 15func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error {
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go
new file mode 100644
index 0000000..39cb392
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tar.go
@@ -0,0 +1,138 @@
1package getter
2
3import (
4 "archive/tar"
5 "fmt"
6 "io"
7 "os"
8 "path/filepath"
9)
10
11// untar is a shared helper for untarring an archive. The reader should provide
12// an uncompressed view of the tar archive.
13func untar(input io.Reader, dst, src string, dir bool) error {
14 tarR := tar.NewReader(input)
15 done := false
16 dirHdrs := []*tar.Header{}
17 for {
18 hdr, err := tarR.Next()
19 if err == io.EOF {
20 if !done {
21 // Empty archive
22 return fmt.Errorf("empty archive: %s", src)
23 }
24
25 break
26 }
27 if err != nil {
28 return err
29 }
30
31 if hdr.Typeflag == tar.TypeXGlobalHeader || hdr.Typeflag == tar.TypeXHeader {
32 // don't unpack extended headers as files
33 continue
34 }
35
36 path := dst
37 if dir {
38 // Disallow parent traversal
39 if containsDotDot(hdr.Name) {
40 return fmt.Errorf("entry contains '..': %s", hdr.Name)
41 }
42
43 path = filepath.Join(path, hdr.Name)
44 }
45
46 if hdr.FileInfo().IsDir() {
47 if !dir {
48 return fmt.Errorf("expected a single file: %s", src)
49 }
50
51 // A directory, just make the directory and continue unarchiving...
52 if err := os.MkdirAll(path, 0755); err != nil {
53 return err
54 }
55
56 // Record the directory information so that we may set its attributes
57 // after all files have been extracted
58 dirHdrs = append(dirHdrs, hdr)
59
60 continue
61 } else {
62 // There is no ordering guarantee that a file in a directory is
63 // listed before the directory
64 dstPath := filepath.Dir(path)
65
66 // Check that the directory exists, otherwise create it
67 if _, err := os.Stat(dstPath); os.IsNotExist(err) {
68 if err := os.MkdirAll(dstPath, 0755); err != nil {
69 return err
70 }
71 }
72 }
73
74 // We have a file. If we already decoded, then it is an error
75 if !dir && done {
76 return fmt.Errorf("expected a single file, got multiple: %s", src)
77 }
78
79 // Mark that we're done so future in single file mode errors
80 done = true
81
82 // Open the file for writing
83 dstF, err := os.Create(path)
84 if err != nil {
85 return err
86 }
87 _, err = io.Copy(dstF, tarR)
88 dstF.Close()
89 if err != nil {
90 return err
91 }
92
93 // Chmod the file
94 if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil {
95 return err
96 }
97
98 // Set the access and modification time
99 if err := os.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
100 return err
101 }
102 }
103
104 // Adding a file or subdirectory changes the mtime of a directory
105 // We therefore wait until we've extracted everything and then set the mtime and atime attributes
106 for _, dirHdr := range dirHdrs {
107 path := filepath.Join(dst, dirHdr.Name)
108 if err := os.Chtimes(path, dirHdr.AccessTime, dirHdr.ModTime); err != nil {
109 return err
110 }
111 }
112
113 return nil
114}
115
116// tarDecompressor is an implementation of Decompressor that can
117// unpack tar files.
118type tarDecompressor struct{}
119
120func (d *tarDecompressor) Decompress(dst, src string, dir bool) error {
121 // If we're going into a directory we should make that first
122 mkdir := dst
123 if !dir {
124 mkdir = filepath.Dir(dst)
125 }
126 if err := os.MkdirAll(mkdir, 0755); err != nil {
127 return err
128 }
129
130 // File first
131 f, err := os.Open(src)
132 if err != nil {
133 return err
134 }
135 defer f.Close()
136
137 return untar(f, dst, src, dir)
138}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go
index c46ed44..5391b5c 100644
--- a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go
@@ -1,10 +1,7 @@
1package getter 1package getter
2 2
3import ( 3import (
4 "archive/tar"
5 "compress/bzip2" 4 "compress/bzip2"
6 "fmt"
7 "io"
8 "os" 5 "os"
9 "path/filepath" 6 "path/filepath"
10) 7)
@@ -32,64 +29,5 @@ func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error {
32 29
33 // Bzip2 compression is second 30 // Bzip2 compression is second
34 bzipR := bzip2.NewReader(f) 31 bzipR := bzip2.NewReader(f)
35 32 return untar(bzipR, dst, src, dir)
36 // Once bzip decompressed we have a tar format
37 tarR := tar.NewReader(bzipR)
38 done := false
39 for {
40 hdr, err := tarR.Next()
41 if err == io.EOF {
42 if !done {
43 // Empty archive
44 return fmt.Errorf("empty archive: %s", src)
45 }
46
47 return nil
48 }
49 if err != nil {
50 return err
51 }
52
53 path := dst
54 if dir {
55 path = filepath.Join(path, hdr.Name)
56 }
57
58 if hdr.FileInfo().IsDir() {
59 if dir {
60 return fmt.Errorf("expected a single file: %s", src)
61 }
62
63 // A directory, just make the directory and continue unarchiving...
64 if err := os.MkdirAll(path, 0755); err != nil {
65 return err
66 }
67
68 continue
69 }
70
71 // We have a file. If we already decoded, then it is an error
72 if !dir && done {
73 return fmt.Errorf("expected a single file, got multiple: %s", src)
74 }
75
76 // Mark that we're done so future in single file mode errors
77 done = true
78
79 // Open the file for writing
80 dstF, err := os.Create(path)
81 if err != nil {
82 return err
83 }
84 _, err = io.Copy(dstF, tarR)
85 dstF.Close()
86 if err != nil {
87 return err
88 }
89
90 // Chmod the file
91 if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil {
92 return err
93 }
94 }
95} 33}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go
index 686d6c2..91cf33d 100644
--- a/vendor/github.com/hashicorp/go-getter/decompress_testing.go
+++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go
@@ -11,7 +11,9 @@ import (
11 "runtime" 11 "runtime"
12 "sort" 12 "sort"
13 "strings" 13 "strings"
14 "testing" 14 "time"
15
16 "github.com/mitchellh/go-testing-interface"
15) 17)
16 18
17// TestDecompressCase is a single test case for testing decompressors 19// TestDecompressCase is a single test case for testing decompressors
@@ -21,10 +23,11 @@ type TestDecompressCase struct {
21 Err bool // Err is whether we expect an error or not 23 Err bool // Err is whether we expect an error or not
22 DirList []string // DirList is the list of files for Dir mode 24 DirList []string // DirList is the list of files for Dir mode
23 FileMD5 string // FileMD5 is the expected MD5 for a single file 25 FileMD5 string // FileMD5 is the expected MD5 for a single file
26 Mtime *time.Time // Mtime is the optionally expected mtime for a single file (or all files if in Dir mode)
24} 27}
25 28
26// TestDecompressor is a helper function for testing generic decompressors. 29// TestDecompressor is a helper function for testing generic decompressors.
27func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) { 30func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) {
28 for _, tc := range cases { 31 for _, tc := range cases {
29 t.Logf("Testing: %s", tc.Input) 32 t.Logf("Testing: %s", tc.Input)
30 33
@@ -67,6 +70,14 @@ func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase)
67 } 70 }
68 } 71 }
69 72
73 if tc.Mtime != nil {
74 actual := fi.ModTime()
75 expected := *tc.Mtime
76 if actual != expected {
77 t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), dst, actual.String())
78 }
79 }
80
70 return 81 return
71 } 82 }
72 83
@@ -83,11 +94,26 @@ func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase)
83 if !reflect.DeepEqual(actual, expected) { 94 if !reflect.DeepEqual(actual, expected) {
84 t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected) 95 t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected)
85 } 96 }
97 // Check for correct atime/mtime
98 for _, dir := range actual {
99 path := filepath.Join(dst, dir)
100 if tc.Mtime != nil {
101 fi, err := os.Stat(path)
102 if err != nil {
103 t.Fatalf("err: %s", err)
104 }
105 actual := fi.ModTime()
106 expected := *tc.Mtime
107 if actual != expected {
108 t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), path, actual.String())
109 }
110 }
111 }
86 }() 112 }()
87 } 113 }
88} 114}
89 115
90func testListDir(t *testing.T, path string) []string { 116func testListDir(t testing.T, path string) []string {
91 var result []string 117 var result []string
92 err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { 118 err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error {
93 if err != nil { 119 if err != nil {
@@ -102,7 +128,7 @@ func testListDir(t *testing.T, path string) []string {
102 128
103 // If it is a dir, add trailing sep 129 // If it is a dir, add trailing sep
104 if info.IsDir() { 130 if info.IsDir() {
105 sub += "/" 131 sub += string(os.PathSeparator)
106 } 132 }
107 133
108 result = append(result, sub) 134 result = append(result, sub)
@@ -116,7 +142,7 @@ func testListDir(t *testing.T, path string) []string {
116 return result 142 return result
117} 143}
118 144
119func testMD5(t *testing.T, path string) string { 145func testMD5(t testing.T, path string) string {
120 f, err := os.Open(path) 146 f, err := os.Open(path)
121 if err != nil { 147 if err != nil {
122 t.Fatalf("err: %s", err) 148 t.Fatalf("err: %s", err)
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go
index e8b1c31..65eb70d 100644
--- a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go
@@ -1,10 +1,8 @@
1package getter 1package getter
2 2
3import ( 3import (
4 "archive/tar"
5 "compress/gzip" 4 "compress/gzip"
6 "fmt" 5 "fmt"
7 "io"
8 "os" 6 "os"
9 "path/filepath" 7 "path/filepath"
10) 8)
@@ -37,63 +35,5 @@ func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error {
37 } 35 }
38 defer gzipR.Close() 36 defer gzipR.Close()
39 37
40 // Once gzip decompressed we have a tar format 38 return untar(gzipR, dst, src, dir)
41 tarR := tar.NewReader(gzipR)
42 done := false
43 for {
44 hdr, err := tarR.Next()
45 if err == io.EOF {
46 if !done {
47 // Empty archive
48 return fmt.Errorf("empty archive: %s", src)
49 }
50
51 return nil
52 }
53 if err != nil {
54 return err
55 }
56
57 path := dst
58 if dir {
59 path = filepath.Join(path, hdr.Name)
60 }
61
62 if hdr.FileInfo().IsDir() {
63 if !dir {
64 return fmt.Errorf("expected a single file: %s", src)
65 }
66
67 // A directory, just make the directory and continue unarchiving...
68 if err := os.MkdirAll(path, 0755); err != nil {
69 return err
70 }
71
72 continue
73 }
74
75 // We have a file. If we already decoded, then it is an error
76 if !dir && done {
77 return fmt.Errorf("expected a single file, got multiple: %s", src)
78 }
79
80 // Mark that we're done so future in single file mode errors
81 done = true
82
83 // Open the file for writing
84 dstF, err := os.Create(path)
85 if err != nil {
86 return err
87 }
88 _, err = io.Copy(dstF, tarR)
89 dstF.Close()
90 if err != nil {
91 return err
92 }
93
94 // Chmod the file
95 if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil {
96 return err
97 }
98 }
99} 39}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_txz.go b/vendor/github.com/hashicorp/go-getter/decompress_txz.go
new file mode 100644
index 0000000..5e151c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_txz.go
@@ -0,0 +1,39 @@
1package getter
2
3import (
4 "fmt"
5 "os"
6 "path/filepath"
7
8 "github.com/ulikunitz/xz"
9)
10
11// TarXzDecompressor is an implementation of Decompressor that can
12// decompress tar.xz files.
13type TarXzDecompressor struct{}
14
15func (d *TarXzDecompressor) Decompress(dst, src string, dir bool) error {
16 // If we're going into a directory we should make that first
17 mkdir := dst
18 if !dir {
19 mkdir = filepath.Dir(dst)
20 }
21 if err := os.MkdirAll(mkdir, 0755); err != nil {
22 return err
23 }
24
25 // File first
26 f, err := os.Open(src)
27 if err != nil {
28 return err
29 }
30 defer f.Close()
31
32 // xz compression is second
33 txzR, err := xz.NewReader(f)
34 if err != nil {
35 return fmt.Errorf("Error opening an xz reader for %s: %s", src, err)
36 }
37
38 return untar(txzR, dst, src, dir)
39}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_xz.go b/vendor/github.com/hashicorp/go-getter/decompress_xz.go
new file mode 100644
index 0000000..4e37aba
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_xz.go
@@ -0,0 +1,49 @@
1package getter
2
3import (
4 "fmt"
5 "io"
6 "os"
7 "path/filepath"
8
9 "github.com/ulikunitz/xz"
10)
11
12// XzDecompressor is an implementation of Decompressor that can
13// decompress xz files.
14type XzDecompressor struct{}
15
16func (d *XzDecompressor) Decompress(dst, src string, dir bool) error {
17 // Directory isn't supported at all
18 if dir {
19 return fmt.Errorf("xz-compressed files can only unarchive to a single file")
20 }
21
22 // If we're going into a directory we should make that first
23 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
24 return err
25 }
26
27 // File first
28 f, err := os.Open(src)
29 if err != nil {
30 return err
31 }
32 defer f.Close()
33
34 // xz compression is second
35 xzR, err := xz.NewReader(f)
36 if err != nil {
37 return err
38 }
39
40 // Copy it out
41 dstF, err := os.Create(dst)
42 if err != nil {
43 return err
44 }
45 defer dstF.Close()
46
47 _, err = io.Copy(dstF, xzR)
48 return err
49}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go
index a065c07..b0e70ca 100644
--- a/vendor/github.com/hashicorp/go-getter/decompress_zip.go
+++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go
@@ -42,6 +42,11 @@ func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error {
42 for _, f := range zipR.File { 42 for _, f := range zipR.File {
43 path := dst 43 path := dst
44 if dir { 44 if dir {
45 // Disallow parent traversal
46 if containsDotDot(f.Name) {
47 return fmt.Errorf("entry contains '..': %s", f.Name)
48 }
49
45 path = filepath.Join(path, f.Name) 50 path = filepath.Join(path, f.Name)
46 } 51 }
47 52
diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go
index 481b737..c369551 100644
--- a/vendor/github.com/hashicorp/go-getter/detect.go
+++ b/vendor/github.com/hashicorp/go-getter/detect.go
@@ -72,12 +72,18 @@ func Detect(src string, pwd string, ds []Detector) (string, error) {
72 subDir = detectSubdir 72 subDir = detectSubdir
73 } 73 }
74 } 74 }
75
75 if subDir != "" { 76 if subDir != "" {
76 u, err := url.Parse(result) 77 u, err := url.Parse(result)
77 if err != nil { 78 if err != nil {
78 return "", fmt.Errorf("Error parsing URL: %s", err) 79 return "", fmt.Errorf("Error parsing URL: %s", err)
79 } 80 }
80 u.Path += "//" + subDir 81 u.Path += "//" + subDir
82
83 // a subdir may contain wildcards, but in order to support them we
84 // have to ensure the path isn't escaped.
85 u.RawPath = u.Path
86
81 result = u.String() 87 result = u.String()
82 } 88 }
83 89
diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go
index 756ea43..4ef41ea 100644
--- a/vendor/github.com/hashicorp/go-getter/detect_file.go
+++ b/vendor/github.com/hashicorp/go-getter/detect_file.go
@@ -32,7 +32,7 @@ func (d *FileDetector) Detect(src, pwd string) (string, bool, error) {
32 return "", true, err 32 return "", true, err
33 } 33 }
34 if fi.Mode()&os.ModeSymlink != 0 { 34 if fi.Mode()&os.ModeSymlink != 0 {
35 pwd, err = os.Readlink(pwd) 35 pwd, err = filepath.EvalSymlinks(pwd)
36 if err != nil { 36 if err != nil {
37 return "", true, err 37 return "", true, err
38 } 38 }
diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go
index c3236f5..e6053d9 100644
--- a/vendor/github.com/hashicorp/go-getter/get.go
+++ b/vendor/github.com/hashicorp/go-getter/get.go
@@ -18,6 +18,8 @@ import (
18 "os/exec" 18 "os/exec"
19 "regexp" 19 "regexp"
20 "syscall" 20 "syscall"
21
22 cleanhttp "github.com/hashicorp/go-cleanhttp"
21) 23)
22 24
23// Getter defines the interface that schemes must implement to download 25// Getter defines the interface that schemes must implement to download
@@ -49,8 +51,13 @@ var Getters map[string]Getter
49// syntax is schema::url, example: git::https://foo.com 51// syntax is schema::url, example: git::https://foo.com
50var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) 52var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`)
51 53
54// httpClient is the default client to be used by HttpGetters.
55var httpClient = cleanhttp.DefaultClient()
56
52func init() { 57func init() {
53 httpGetter := &HttpGetter{Netrc: true} 58 httpGetter := &HttpGetter{
59 Netrc: true,
60 }
54 61
55 Getters = map[string]Getter{ 62 Getters = map[string]Getter{
56 "file": new(FileGetter), 63 "file": new(FileGetter),
diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go
index 0728139..cb1d029 100644
--- a/vendor/github.com/hashicorp/go-getter/get_git.go
+++ b/vendor/github.com/hashicorp/go-getter/get_git.go
@@ -11,6 +11,7 @@ import (
11 "strings" 11 "strings"
12 12
13 urlhelper "github.com/hashicorp/go-getter/helper/url" 13 urlhelper "github.com/hashicorp/go-getter/helper/url"
14 "github.com/hashicorp/go-safetemp"
14 "github.com/hashicorp/go-version" 15 "github.com/hashicorp/go-version"
15) 16)
16 17
@@ -105,13 +106,11 @@ func (g *GitGetter) Get(dst string, u *url.URL) error {
105// GetFile for Git doesn't support updating at this time. It will download 106// GetFile for Git doesn't support updating at this time. It will download
106// the file every time. 107// the file every time.
107func (g *GitGetter) GetFile(dst string, u *url.URL) error { 108func (g *GitGetter) GetFile(dst string, u *url.URL) error {
108 td, err := ioutil.TempDir("", "getter-git") 109 td, tdcloser, err := safetemp.Dir("", "getter")
109 if err != nil { 110 if err != nil {
110 return err 111 return err
111 } 112 }
112 if err := os.RemoveAll(td); err != nil { 113 defer tdcloser.Close()
113 return err
114 }
115 114
116 // Get the filename, and strip the filename from the URL so we can 115 // Get the filename, and strip the filename from the URL so we can
117 // just get the repository directly. 116 // just get the repository directly.
@@ -180,17 +179,34 @@ func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error {
180// setupGitEnv sets up the environment for the given command. This is used to 179// setupGitEnv sets up the environment for the given command. This is used to
181// pass configuration data to git and ssh and enables advanced cloning methods. 180// pass configuration data to git and ssh and enables advanced cloning methods.
182func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { 181func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) {
183 var sshOpts []string 182 const gitSSHCommand = "GIT_SSH_COMMAND="
183 var sshCmd []string
184
185 // If we have an existing GIT_SSH_COMMAND, we need to append our options.
186 // We will also remove our old entry to make sure the behavior is the same
187 // with versions of Go < 1.9.
188 env := os.Environ()
189 for i, v := range env {
190 if strings.HasPrefix(v, gitSSHCommand) {
191 sshCmd = []string{v}
192
193 env[i], env[len(env)-1] = env[len(env)-1], env[i]
194 env = env[:len(env)-1]
195 break
196 }
197 }
198
199 if len(sshCmd) == 0 {
200 sshCmd = []string{gitSSHCommand + "ssh"}
201 }
184 202
185 if sshKeyFile != "" { 203 if sshKeyFile != "" {
186 // We have an SSH key temp file configured, tell ssh about this. 204 // We have an SSH key temp file configured, tell ssh about this.
187 sshOpts = append(sshOpts, "-i", sshKeyFile) 205 sshCmd = append(sshCmd, "-i", sshKeyFile)
188 } 206 }
189 207
190 cmd.Env = append(os.Environ(), 208 env = append(env, strings.Join(sshCmd, " "))
191 // Set the ssh command to use for clones. 209 cmd.Env = env
192 "GIT_SSH_COMMAND=ssh "+strings.Join(sshOpts, " "),
193 )
194} 210}
195 211
196// checkGitVersion is used to check the version of git installed on the system 212// checkGitVersion is used to check the version of git installed on the system
diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go
index 820bdd4..f386922 100644
--- a/vendor/github.com/hashicorp/go-getter/get_hg.go
+++ b/vendor/github.com/hashicorp/go-getter/get_hg.go
@@ -2,7 +2,6 @@ package getter
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "io/ioutil"
6 "net/url" 5 "net/url"
7 "os" 6 "os"
8 "os/exec" 7 "os/exec"
@@ -10,6 +9,7 @@ import (
10 "runtime" 9 "runtime"
11 10
12 urlhelper "github.com/hashicorp/go-getter/helper/url" 11 urlhelper "github.com/hashicorp/go-getter/helper/url"
12 "github.com/hashicorp/go-safetemp"
13) 13)
14 14
15// HgGetter is a Getter implementation that will download a module from 15// HgGetter is a Getter implementation that will download a module from
@@ -64,13 +64,13 @@ func (g *HgGetter) Get(dst string, u *url.URL) error {
64// GetFile for Hg doesn't support updating at this time. It will download 64// GetFile for Hg doesn't support updating at this time. It will download
65// the file every time. 65// the file every time.
66func (g *HgGetter) GetFile(dst string, u *url.URL) error { 66func (g *HgGetter) GetFile(dst string, u *url.URL) error {
67 td, err := ioutil.TempDir("", "getter-hg") 67 // Create a temporary directory to store the full source. This has to be
68 // a non-existent directory.
69 td, tdcloser, err := safetemp.Dir("", "getter")
68 if err != nil { 70 if err != nil {
69 return err 71 return err
70 } 72 }
71 if err := os.RemoveAll(td); err != nil { 73 defer tdcloser.Close()
72 return err
73 }
74 74
75 // Get the filename, and strip the filename from the URL so we can 75 // Get the filename, and strip the filename from the URL so we can
76 // just get the repository directly. 76 // just get the repository directly.
diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go
index 3c02034..d2e2879 100644
--- a/vendor/github.com/hashicorp/go-getter/get_http.go
+++ b/vendor/github.com/hashicorp/go-getter/get_http.go
@@ -4,12 +4,13 @@ import (
4 "encoding/xml" 4 "encoding/xml"
5 "fmt" 5 "fmt"
6 "io" 6 "io"
7 "io/ioutil"
8 "net/http" 7 "net/http"
9 "net/url" 8 "net/url"
10 "os" 9 "os"
11 "path/filepath" 10 "path/filepath"
12 "strings" 11 "strings"
12
13 "github.com/hashicorp/go-safetemp"
13) 14)
14 15
15// HttpGetter is a Getter implementation that will download from an HTTP 16// HttpGetter is a Getter implementation that will download from an HTTP
@@ -36,6 +37,10 @@ type HttpGetter struct {
36 // Netrc, if true, will lookup and use auth information found 37 // Netrc, if true, will lookup and use auth information found
37 // in the user's netrc file if available. 38 // in the user's netrc file if available.
38 Netrc bool 39 Netrc bool
40
41 // Client is the http.Client to use for Get requests.
42 // This defaults to a cleanhttp.DefaultClient if left unset.
43 Client *http.Client
39} 44}
40 45
41func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { 46func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) {
@@ -57,13 +62,17 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error {
57 } 62 }
58 } 63 }
59 64
65 if g.Client == nil {
66 g.Client = httpClient
67 }
68
60 // Add terraform-get to the parameter. 69 // Add terraform-get to the parameter.
61 q := u.Query() 70 q := u.Query()
62 q.Add("terraform-get", "1") 71 q.Add("terraform-get", "1")
63 u.RawQuery = q.Encode() 72 u.RawQuery = q.Encode()
64 73
65 // Get the URL 74 // Get the URL
66 resp, err := http.Get(u.String()) 75 resp, err := g.Client.Get(u.String())
67 if err != nil { 76 if err != nil {
68 return err 77 return err
69 } 78 }
@@ -98,7 +107,18 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error {
98} 107}
99 108
100func (g *HttpGetter) GetFile(dst string, u *url.URL) error { 109func (g *HttpGetter) GetFile(dst string, u *url.URL) error {
101 resp, err := http.Get(u.String()) 110 if g.Netrc {
111 // Add auth from netrc if we can
112 if err := addAuthFromNetrc(u); err != nil {
113 return err
114 }
115 }
116
117 if g.Client == nil {
118 g.Client = httpClient
119 }
120
121 resp, err := g.Client.Get(u.String())
102 if err != nil { 122 if err != nil {
103 return err 123 return err
104 } 124 }
@@ -116,29 +136,40 @@ func (g *HttpGetter) GetFile(dst string, u *url.URL) error {
116 if err != nil { 136 if err != nil {
117 return err 137 return err
118 } 138 }
119 defer f.Close()
120 139
121 _, err = io.Copy(f, resp.Body) 140 n, err := io.Copy(f, resp.Body)
141 if err == nil && n < resp.ContentLength {
142 err = io.ErrShortWrite
143 }
144 if err1 := f.Close(); err == nil {
145 err = err1
146 }
122 return err 147 return err
123} 148}
124 149
125// getSubdir downloads the source into the destination, but with 150// getSubdir downloads the source into the destination, but with
126// the proper subdir. 151// the proper subdir.
127func (g *HttpGetter) getSubdir(dst, source, subDir string) error { 152func (g *HttpGetter) getSubdir(dst, source, subDir string) error {
128 // Create a temporary directory to store the full source 153 // Create a temporary directory to store the full source. This has to be
129 td, err := ioutil.TempDir("", "tf") 154 // a non-existent directory.
155 td, tdcloser, err := safetemp.Dir("", "getter")
130 if err != nil { 156 if err != nil {
131 return err 157 return err
132 } 158 }
133 defer os.RemoveAll(td) 159 defer tdcloser.Close()
134 160
135 // Download that into the given directory 161 // Download that into the given directory
136 if err := Get(td, source); err != nil { 162 if err := Get(td, source); err != nil {
137 return err 163 return err
138 } 164 }
139 165
166 // Process any globbing
167 sourcePath, err := SubdirGlob(td, subDir)
168 if err != nil {
169 return err
170 }
171
140 // Make sure the subdir path actually exists 172 // Make sure the subdir path actually exists
141 sourcePath := filepath.Join(td, subDir)
142 if _, err := os.Stat(sourcePath); err != nil { 173 if _, err := os.Stat(sourcePath); err != nil {
143 return fmt.Errorf( 174 return fmt.Errorf(
144 "Error downloading %s: %s", source, err) 175 "Error downloading %s: %s", source, err)
diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go
index d3bffeb..ebb3217 100644
--- a/vendor/github.com/hashicorp/go-getter/get_s3.go
+++ b/vendor/github.com/hashicorp/go-getter/get_s3.go
@@ -28,7 +28,7 @@ func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) {
28 } 28 }
29 29
30 // Create client config 30 // Create client config
31 config := g.getAWSConfig(region, creds) 31 config := g.getAWSConfig(region, u, creds)
32 sess := session.New(config) 32 sess := session.New(config)
33 client := s3.New(sess) 33 client := s3.New(sess)
34 34
@@ -84,7 +84,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error {
84 return err 84 return err
85 } 85 }
86 86
87 config := g.getAWSConfig(region, creds) 87 config := g.getAWSConfig(region, u, creds)
88 sess := session.New(config) 88 sess := session.New(config)
89 client := s3.New(sess) 89 client := s3.New(sess)
90 90
@@ -139,7 +139,7 @@ func (g *S3Getter) GetFile(dst string, u *url.URL) error {
139 return err 139 return err
140 } 140 }
141 141
142 config := g.getAWSConfig(region, creds) 142 config := g.getAWSConfig(region, u, creds)
143 sess := session.New(config) 143 sess := session.New(config)
144 client := s3.New(sess) 144 client := s3.New(sess)
145 return g.getObject(client, dst, bucket, path, version) 145 return g.getObject(client, dst, bucket, path, version)
@@ -174,7 +174,7 @@ func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) er
174 return err 174 return err
175} 175}
176 176
177func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config { 177func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config {
178 conf := &aws.Config{} 178 conf := &aws.Config{}
179 if creds == nil { 179 if creds == nil {
180 // Grab the metadata URL 180 // Grab the metadata URL
@@ -195,6 +195,14 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *
195 }) 195 })
196 } 196 }
197 197
198 if creds != nil {
199 conf.Endpoint = &url.Host
200 conf.S3ForcePathStyle = aws.Bool(true)
201 if url.Scheme == "http" {
202 conf.DisableSSL = aws.Bool(true)
203 }
204 }
205
198 conf.Credentials = creds 206 conf.Credentials = creds
199 if region != "" { 207 if region != "" {
200 conf.Region = aws.String(region) 208 conf.Region = aws.String(region)
@@ -204,29 +212,48 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *
204} 212}
205 213
206func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { 214func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) {
207 // Expected host style: s3.amazonaws.com. They always have 3 parts, 215 // This just check whether we are dealing with S3 or
208 // although the first may differ if we're accessing a specific region. 216 // any other S3 compliant service. S3 has a predictable
209 hostParts := strings.Split(u.Host, ".") 217 // url as others do not
210 if len(hostParts) != 3 { 218 if strings.Contains(u.Host, "amazonaws.com") {
211 err = fmt.Errorf("URL is not a valid S3 URL") 219 // Expected host style: s3.amazonaws.com. They always have 3 parts,
212 return 220 // although the first may differ if we're accessing a specific region.
213 } 221 hostParts := strings.Split(u.Host, ".")
222 if len(hostParts) != 3 {
223 err = fmt.Errorf("URL is not a valid S3 URL")
224 return
225 }
214 226
215 // Parse the region out of the first part of the host 227 // Parse the region out of the first part of the host
216 region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") 228 region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3")
217 if region == "" { 229 if region == "" {
218 region = "us-east-1" 230 region = "us-east-1"
219 } 231 }
220 232
221 pathParts := strings.SplitN(u.Path, "/", 3) 233 pathParts := strings.SplitN(u.Path, "/", 3)
222 if len(pathParts) != 3 { 234 if len(pathParts) != 3 {
223 err = fmt.Errorf("URL is not a valid S3 URL") 235 err = fmt.Errorf("URL is not a valid S3 URL")
224 return 236 return
225 } 237 }
238
239 bucket = pathParts[1]
240 path = pathParts[2]
241 version = u.Query().Get("version")
226 242
227 bucket = pathParts[1] 243 } else {
228 path = pathParts[2] 244 pathParts := strings.SplitN(u.Path, "/", 3)
229 version = u.Query().Get("version") 245 if len(pathParts) != 3 {
246 err = fmt.Errorf("URL is not a valid S3 complaint URL")
247 return
248 }
249 bucket = pathParts[1]
250 path = pathParts[2]
251 version = u.Query().Get("version")
252 region = u.Query().Get("region")
253 if region == "" {
254 region = "us-east-1"
255 }
256 }
230 257
231 _, hasAwsId := u.Query()["aws_access_key_id"] 258 _, hasAwsId := u.Query()["aws_access_key_id"]
232 _, hasAwsSecret := u.Query()["aws_access_key_secret"] 259 _, hasAwsSecret := u.Query()["aws_access_key_secret"]
diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go
index 4d5ee3c..c63f2bb 100644
--- a/vendor/github.com/hashicorp/go-getter/source.go
+++ b/vendor/github.com/hashicorp/go-getter/source.go
@@ -1,6 +1,8 @@
1package getter 1package getter
2 2
3import ( 3import (
4 "fmt"
5 "path/filepath"
4 "strings" 6 "strings"
5) 7)
6 8
@@ -34,3 +36,27 @@ func SourceDirSubdir(src string) (string, string) {
34 36
35 return src, subdir 37 return src, subdir
36} 38}
39
40// SubdirGlob returns the actual subdir with globbing processed.
41//
42// dst should be a destination directory that is already populated (the
43// download is complete) and subDir should be the set subDir. If subDir
44// is an empty string, this returns an empty string.
45//
46// The returned path is the full absolute path.
47func SubdirGlob(dst, subDir string) (string, error) {
48 matches, err := filepath.Glob(filepath.Join(dst, subDir))
49 if err != nil {
50 return "", err
51 }
52
53 if len(matches) == 0 {
54 return "", fmt.Errorf("subdir %q not found", subDir)
55 }
56
57 if len(matches) > 1 {
58 return "", fmt.Errorf("subdir %q matches multiple paths", subDir)
59 }
60
61 return matches[0], nil
62}
diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE
new file mode 100644
index 0000000..abaf1e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/LICENSE
@@ -0,0 +1,21 @@
1MIT License
2
3Copyright (c) 2017 HashiCorp
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in all
13copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21SOFTWARE.
diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md
new file mode 100644
index 0000000..614342b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/README.md
@@ -0,0 +1,123 @@
1# go-hclog
2
3[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
4
5[godocs]: https://godoc.org/github.com/hashicorp/go-hclog
6
7`go-hclog` is a package for Go that provides a simple key/value logging
8interface for use in development and production environments.
9
10It provides logging levels that provide decreased output based upon the
11desired amount of output, unlike the standard library `log` package.
12
13It does not provide `Printf` style logging, only key/value logging that is
14exposed as arguments to the logging functions for simplicity.
15
16It provides a human readable output mode for use in development as well as
17JSON output mode for production.
18
19## Stability Note
20
21While this library is fully open source and HashiCorp will be maintaining it
22(since we are and will be making extensive use of it), the API and output
23format is subject to minor changes as we fully bake and vet it in our projects.
24This notice will be removed once it's fully integrated into our major projects
25and no further changes are anticipated.
26
27## Installation and Docs
28
29Install using `go get github.com/hashicorp/go-hclog`.
30
31Full documentation is available at
32http://godoc.org/github.com/hashicorp/go-hclog
33
34## Usage
35
36### Use the global logger
37
38```go
39hclog.Default().Info("hello world")
40```
41
42```text
432017-07-05T16:15:55.167-0700 [INFO ] hello world
44```
45
46(Note timestamps are removed in future examples for brevity.)
47
48### Create a new logger
49
50```go
51appLogger := hclog.New(&hclog.LoggerOptions{
52 Name: "my-app",
53 Level: hclog.LevelFromString("DEBUG"),
54})
55```
56
57### Emit an Info level message with 2 key/value pairs
58
59```go
60input := "5.5"
61_, err := strconv.ParseInt(input, 10, 32)
62if err != nil {
63 appLogger.Info("Invalid input for ParseInt", "input", input, "error", err)
64}
65```
66
67```text
68... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax"
69```
70
71### Create a new Logger for a major subsystem
72
73```go
74subsystemLogger := appLogger.Named("transport")
75subsystemLogger.Info("we are transporting something")
76```
77
78```text
79... [INFO ] my-app.transport: we are transporting something
80```
81
82Notice that logs emitted by `subsystemLogger` contain `my-app.transport`,
83reflecting both the application and subsystem names.
84
85### Create a new Logger with fixed key/value pairs
86
87Using `With()` will include a specific key-value pair in all messages emitted
88by that logger.
89
90```go
91requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363"
92requestLogger := subsystemLogger.With("request", requestID)
93requestLogger.Info("we are transporting a request")
94```
95
96```text
97... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363
98```
99
100This allows sub Loggers to be context specific without having to thread that
101into all the callers.
102
103### Use this with code that uses the standard library logger
104
105If you want to use the standard library's `log.Logger` interface you can wrap
106`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use
107it with the familiar `Println()`, `Printf()`, etc. For example:
108
109```go
110stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{
111 InferLevels: true,
112})
113// Printf() is provided by stdlib log.Logger interface, not hclog.Logger
114stdLogger.Printf("[DEBUG] %+v", stdLogger)
115```
116
117```text
118... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]}
119```
120
121Notice that if `appLogger` is initialized with the `INFO` log level _and_ you
122specify `InferLevels: true`, you will not see any output here. You must change
123`appLogger` to `DEBUG` to see output. See the docs for more information.
diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go
new file mode 100644
index 0000000..55ce439
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/global.go
@@ -0,0 +1,34 @@
1package hclog
2
3import (
4 "sync"
5)
6
7var (
8 protect sync.Once
9 def Logger
10
11 // The options used to create the Default logger. These are
12 // read only when the Default logger is created, so set them
13 // as soon as the process starts.
14 DefaultOptions = &LoggerOptions{
15 Level: DefaultLevel,
16 Output: DefaultOutput,
17 }
18)
19
20// Return a logger that is held globally. This can be a good starting
21// place, and then you can use .With() and .Name() to create sub-loggers
22// to be used in more specific contexts.
23func Default() Logger {
24 protect.Do(func() {
25 def = New(DefaultOptions)
26 })
27
28 return def
29}
30
31// A short alias for Default()
32func L() Logger {
33 return Default()
34}
diff --git a/vendor/github.com/hashicorp/go-hclog/int.go b/vendor/github.com/hashicorp/go-hclog/int.go
new file mode 100644
index 0000000..9f90c28
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/int.go
@@ -0,0 +1,385 @@
1package hclog
2
3import (
4 "bufio"
5 "encoding/json"
6 "fmt"
7 "log"
8 "os"
9 "runtime"
10 "strconv"
11 "strings"
12 "sync"
13 "time"
14)
15
16var (
17 _levelToBracket = map[Level]string{
18 Debug: "[DEBUG]",
19 Trace: "[TRACE]",
20 Info: "[INFO ]",
21 Warn: "[WARN ]",
22 Error: "[ERROR]",
23 }
24)
25
26// Given the options (nil for defaults), create a new Logger
27func New(opts *LoggerOptions) Logger {
28 if opts == nil {
29 opts = &LoggerOptions{}
30 }
31
32 output := opts.Output
33 if output == nil {
34 output = os.Stderr
35 }
36
37 level := opts.Level
38 if level == NoLevel {
39 level = DefaultLevel
40 }
41
42 return &intLogger{
43 m: new(sync.Mutex),
44 json: opts.JSONFormat,
45 caller: opts.IncludeLocation,
46 name: opts.Name,
47 w: bufio.NewWriter(output),
48 level: level,
49 }
50}
51
52// The internal logger implementation. Internal in that it is defined entirely
53// by this package.
54type intLogger struct {
55 json bool
56 caller bool
57 name string
58
59 // this is a pointer so that it's shared by any derived loggers, since
60 // those derived loggers share the bufio.Writer as well.
61 m *sync.Mutex
62 w *bufio.Writer
63 level Level
64
65 implied []interface{}
66}
67
68// Make sure that intLogger is a Logger
69var _ Logger = &intLogger{}
70
71// The time format to use for logging. This is a version of RFC3339 that
72// contains millisecond precision
73const TimeFormat = "2006-01-02T15:04:05.000Z0700"
74
75// Log a message and a set of key/value pairs if the given level is at
76// or more severe that the threshold configured in the Logger.
77func (z *intLogger) Log(level Level, msg string, args ...interface{}) {
78 if level < z.level {
79 return
80 }
81
82 t := time.Now()
83
84 z.m.Lock()
85 defer z.m.Unlock()
86
87 if z.json {
88 z.logJson(t, level, msg, args...)
89 } else {
90 z.log(t, level, msg, args...)
91 }
92
93 z.w.Flush()
94}
95
96// Cleanup a path by returning the last 2 segments of the path only.
97func trimCallerPath(path string) string {
98 // lovely borrowed from zap
99 // nb. To make sure we trim the path correctly on Windows too, we
100 // counter-intuitively need to use '/' and *not* os.PathSeparator here,
101 // because the path given originates from Go stdlib, specifically
102 // runtime.Caller() which (as of Mar/17) returns forward slashes even on
103 // Windows.
104 //
105 // See https://github.com/golang/go/issues/3335
106 // and https://github.com/golang/go/issues/18151
107 //
108 // for discussion on the issue on Go side.
109 //
110
111 // Find the last separator.
112 //
113 idx := strings.LastIndexByte(path, '/')
114 if idx == -1 {
115 return path
116 }
117
118 // Find the penultimate separator.
119 idx = strings.LastIndexByte(path[:idx], '/')
120 if idx == -1 {
121 return path
122 }
123
124 return path[idx+1:]
125}
126
127// Non-JSON logging format function
128func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) {
129 z.w.WriteString(t.Format(TimeFormat))
130 z.w.WriteByte(' ')
131
132 s, ok := _levelToBracket[level]
133 if ok {
134 z.w.WriteString(s)
135 } else {
136 z.w.WriteString("[UNKN ]")
137 }
138
139 if z.caller {
140 if _, file, line, ok := runtime.Caller(3); ok {
141 z.w.WriteByte(' ')
142 z.w.WriteString(trimCallerPath(file))
143 z.w.WriteByte(':')
144 z.w.WriteString(strconv.Itoa(line))
145 z.w.WriteByte(':')
146 }
147 }
148
149 z.w.WriteByte(' ')
150
151 if z.name != "" {
152 z.w.WriteString(z.name)
153 z.w.WriteString(": ")
154 }
155
156 z.w.WriteString(msg)
157
158 args = append(z.implied, args...)
159
160 var stacktrace CapturedStacktrace
161
162 if args != nil && len(args) > 0 {
163 if len(args)%2 != 0 {
164 cs, ok := args[len(args)-1].(CapturedStacktrace)
165 if ok {
166 args = args[:len(args)-1]
167 stacktrace = cs
168 } else {
169 args = append(args, "<unknown>")
170 }
171 }
172
173 z.w.WriteByte(':')
174
175 FOR:
176 for i := 0; i < len(args); i = i + 2 {
177 var val string
178
179 switch st := args[i+1].(type) {
180 case string:
181 val = st
182 case int:
183 val = strconv.FormatInt(int64(st), 10)
184 case int64:
185 val = strconv.FormatInt(int64(st), 10)
186 case int32:
187 val = strconv.FormatInt(int64(st), 10)
188 case int16:
189 val = strconv.FormatInt(int64(st), 10)
190 case int8:
191 val = strconv.FormatInt(int64(st), 10)
192 case uint:
193 val = strconv.FormatUint(uint64(st), 10)
194 case uint64:
195 val = strconv.FormatUint(uint64(st), 10)
196 case uint32:
197 val = strconv.FormatUint(uint64(st), 10)
198 case uint16:
199 val = strconv.FormatUint(uint64(st), 10)
200 case uint8:
201 val = strconv.FormatUint(uint64(st), 10)
202 case CapturedStacktrace:
203 stacktrace = st
204 continue FOR
205 default:
206 val = fmt.Sprintf("%v", st)
207 }
208
209 z.w.WriteByte(' ')
210 z.w.WriteString(args[i].(string))
211 z.w.WriteByte('=')
212
213 if strings.ContainsAny(val, " \t\n\r") {
214 z.w.WriteByte('"')
215 z.w.WriteString(val)
216 z.w.WriteByte('"')
217 } else {
218 z.w.WriteString(val)
219 }
220 }
221 }
222
223 z.w.WriteString("\n")
224
225 if stacktrace != "" {
226 z.w.WriteString(string(stacktrace))
227 }
228}
229
230// JSON logging function
231func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) {
232 vals := map[string]interface{}{
233 "@message": msg,
234 "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"),
235 }
236
237 var levelStr string
238 switch level {
239 case Error:
240 levelStr = "error"
241 case Warn:
242 levelStr = "warn"
243 case Info:
244 levelStr = "info"
245 case Debug:
246 levelStr = "debug"
247 case Trace:
248 levelStr = "trace"
249 default:
250 levelStr = "all"
251 }
252
253 vals["@level"] = levelStr
254
255 if z.name != "" {
256 vals["@module"] = z.name
257 }
258
259 if z.caller {
260 if _, file, line, ok := runtime.Caller(3); ok {
261 vals["@caller"] = fmt.Sprintf("%s:%d", file, line)
262 }
263 }
264
265 if args != nil && len(args) > 0 {
266 if len(args)%2 != 0 {
267 cs, ok := args[len(args)-1].(CapturedStacktrace)
268 if ok {
269 args = args[:len(args)-1]
270 vals["stacktrace"] = cs
271 } else {
272 args = append(args, "<unknown>")
273 }
274 }
275
276 for i := 0; i < len(args); i = i + 2 {
277 if _, ok := args[i].(string); !ok {
278 // As this is the logging function not much we can do here
279 // without injecting into logs...
280 continue
281 }
282 vals[args[i].(string)] = args[i+1]
283 }
284 }
285
286 err := json.NewEncoder(z.w).Encode(vals)
287 if err != nil {
288 panic(err)
289 }
290}
291
292// Emit the message and args at DEBUG level
293func (z *intLogger) Debug(msg string, args ...interface{}) {
294 z.Log(Debug, msg, args...)
295}
296
297// Emit the message and args at TRACE level
298func (z *intLogger) Trace(msg string, args ...interface{}) {
299 z.Log(Trace, msg, args...)
300}
301
302// Emit the message and args at INFO level
303func (z *intLogger) Info(msg string, args ...interface{}) {
304 z.Log(Info, msg, args...)
305}
306
307// Emit the message and args at WARN level
308func (z *intLogger) Warn(msg string, args ...interface{}) {
309 z.Log(Warn, msg, args...)
310}
311
312// Emit the message and args at ERROR level
313func (z *intLogger) Error(msg string, args ...interface{}) {
314 z.Log(Error, msg, args...)
315}
316
317// Indicate that the logger would emit TRACE level logs
318func (z *intLogger) IsTrace() bool {
319 return z.level == Trace
320}
321
322// Indicate that the logger would emit DEBUG level logs
323func (z *intLogger) IsDebug() bool {
324 return z.level <= Debug
325}
326
327// Indicate that the logger would emit INFO level logs
328func (z *intLogger) IsInfo() bool {
329 return z.level <= Info
330}
331
332// Indicate that the logger would emit WARN level logs
333func (z *intLogger) IsWarn() bool {
334 return z.level <= Warn
335}
336
337// Indicate that the logger would emit ERROR level logs
338func (z *intLogger) IsError() bool {
339 return z.level <= Error
340}
341
342// Return a sub-Logger for which every emitted log message will contain
343// the given key/value pairs. This is used to create a context specific
344// Logger.
345func (z *intLogger) With(args ...interface{}) Logger {
346 var nz intLogger = *z
347
348 nz.implied = append(nz.implied, args...)
349
350 return &nz
351}
352
353// Create a new sub-Logger that a name decending from the current name.
354// This is used to create a subsystem specific Logger.
355func (z *intLogger) Named(name string) Logger {
356 var nz intLogger = *z
357
358 if nz.name != "" {
359 nz.name = nz.name + "." + name
360 }
361
362 return &nz
363}
364
365// Create a new sub-Logger with an explicit name. This ignores the current
366// name. This is used to create a standalone logger that doesn't fall
367// within the normal hierarchy.
368func (z *intLogger) ResetNamed(name string) Logger {
369 var nz intLogger = *z
370
371 nz.name = name
372
373 return &nz
374}
375
376// Create a *log.Logger that will send it's data through this Logger. This
377// allows packages that expect to be using the standard library log to actually
378// use this logger.
379func (z *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
380 if opts == nil {
381 opts = &StandardLoggerOptions{}
382 }
383
384 return log.New(&stdlogAdapter{z, opts.InferLevels}, "", 0)
385}
diff --git a/vendor/github.com/hashicorp/go-hclog/log.go b/vendor/github.com/hashicorp/go-hclog/log.go
new file mode 100644
index 0000000..6bb16ba
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/log.go
@@ -0,0 +1,138 @@
1package hclog
2
3import (
4 "io"
5 "log"
6 "os"
7 "strings"
8)
9
10var (
11 DefaultOutput = os.Stderr
12 DefaultLevel = Info
13)
14
15type Level int
16
17const (
18 // This is a special level used to indicate that no level has been
19 // set and allow for a default to be used.
20 NoLevel Level = 0
21
22 // The most verbose level. Intended to be used for the tracing of actions
23 // in code, such as function enters/exits, etc.
24 Trace Level = 1
25
26 // For programmer lowlevel analysis.
27 Debug Level = 2
28
29 // For information about steady state operations.
30 Info Level = 3
31
32 // For information about rare but handled events.
33 Warn Level = 4
34
35 // For information about unrecoverable events.
36 Error Level = 5
37)
38
39// LevelFromString returns a Level type for the named log level, or "NoLevel" if
40// the level string is invalid. This facilitates setting the log level via
41// config or environment variable by name in a predictable way.
42func LevelFromString(levelStr string) Level {
43 // We don't care about case. Accept "INFO" or "info"
44 levelStr = strings.ToLower(strings.TrimSpace(levelStr))
45 switch levelStr {
46 case "trace":
47 return Trace
48 case "debug":
49 return Debug
50 case "info":
51 return Info
52 case "warn":
53 return Warn
54 case "error":
55 return Error
56 default:
57 return NoLevel
58 }
59}
60
61// The main Logger interface. All code should code against this interface only.
62type Logger interface {
63 // Args are alternating key, val pairs
64 // keys must be strings
65 // vals can be any type, but display is implementation specific
66 // Emit a message and key/value pairs at the TRACE level
67 Trace(msg string, args ...interface{})
68
69 // Emit a message and key/value pairs at the DEBUG level
70 Debug(msg string, args ...interface{})
71
72 // Emit a message and key/value pairs at the INFO level
73 Info(msg string, args ...interface{})
74
75 // Emit a message and key/value pairs at the WARN level
76 Warn(msg string, args ...interface{})
77
78 // Emit a message and key/value pairs at the ERROR level
79 Error(msg string, args ...interface{})
80
81 // Indicate if TRACE logs would be emitted. This and the other Is* guards
82 // are used to elide expensive logging code based on the current level.
83 IsTrace() bool
84
85 // Indicate if DEBUG logs would be emitted. This and the other Is* guards
86 IsDebug() bool
87
88 // Indicate if INFO logs would be emitted. This and the other Is* guards
89 IsInfo() bool
90
91 // Indicate if WARN logs would be emitted. This and the other Is* guards
92 IsWarn() bool
93
94 // Indicate if ERROR logs would be emitted. This and the other Is* guards
95 IsError() bool
96
97 // Creates a sublogger that will always have the given key/value pairs
98 With(args ...interface{}) Logger
99
100 // Create a logger that will prepend the name string on the front of all messages.
101 // If the logger already has a name, the new value will be appended to the current
102 // name. That way, a major subsystem can use this to decorate all it's own logs
103 // without losing context.
104 Named(name string) Logger
105
106 // Create a logger that will prepend the name string on the front of all messages.
107 // This sets the name of the logger to the value directly, unlike Named which honor
108 // the current name as well.
109 ResetNamed(name string) Logger
110
111 // Return a value that conforms to the stdlib log.Logger interface
112 StandardLogger(opts *StandardLoggerOptions) *log.Logger
113}
114
115type StandardLoggerOptions struct {
116 // Indicate that some minimal parsing should be done on strings to try
117 // and detect their level and re-emit them.
118 // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
119 // [DEBUG] and strip it off before reapplying it.
120 InferLevels bool
121}
122
123type LoggerOptions struct {
124 // Name of the subsystem to prefix logs with
125 Name string
126
127 // The threshold for the logger. Anything less severe is supressed
128 Level Level
129
130 // Where to write the logs to. Defaults to os.Stdout if nil
131 Output io.Writer
132
133 // Control if the output should be in JSON.
134 JSONFormat bool
135
136 // Intclude file and line information in each log line
137 IncludeLocation bool
138}
diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go
new file mode 100644
index 0000000..8af1a3b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go
@@ -0,0 +1,108 @@
1// Copyright (c) 2016 Uber Technologies, Inc.
2//
3// Permission is hereby granted, free of charge, to any person obtaining a copy
4// of this software and associated documentation files (the "Software"), to deal
5// in the Software without restriction, including without limitation the rights
6// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7// copies of the Software, and to permit persons to whom the Software is
8// furnished to do so, subject to the following conditions:
9//
10// The above copyright notice and this permission notice shall be included in
11// all copies or substantial portions of the Software.
12//
13// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19// THE SOFTWARE.
20
21package hclog
22
23import (
24 "bytes"
25 "runtime"
26 "strconv"
27 "strings"
28 "sync"
29)
30
31var (
32 _stacktraceIgnorePrefixes = []string{
33 "runtime.goexit",
34 "runtime.main",
35 }
36 _stacktracePool = sync.Pool{
37 New: func() interface{} {
38 return newProgramCounters(64)
39 },
40 }
41)
42
43// A stacktrace gathered by a previous call to log.Stacktrace. If passed
44// to a logging function, the stacktrace will be appended.
45type CapturedStacktrace string
46
47// Gather a stacktrace of the current goroutine and return it to be passed
48// to a logging function.
49func Stacktrace() CapturedStacktrace {
50 return CapturedStacktrace(takeStacktrace())
51}
52
53func takeStacktrace() string {
54 programCounters := _stacktracePool.Get().(*programCounters)
55 defer _stacktracePool.Put(programCounters)
56
57 var buffer bytes.Buffer
58
59 for {
60 // Skip the call to runtime.Counters and takeStacktrace so that the
61 // program counters start at the caller of takeStacktrace.
62 n := runtime.Callers(2, programCounters.pcs)
63 if n < cap(programCounters.pcs) {
64 programCounters.pcs = programCounters.pcs[:n]
65 break
66 }
67 // Don't put the too-short counter slice back into the pool; this lets
68 // the pool adjust if we consistently take deep stacktraces.
69 programCounters = newProgramCounters(len(programCounters.pcs) * 2)
70 }
71
72 i := 0
73 frames := runtime.CallersFrames(programCounters.pcs)
74 for frame, more := frames.Next(); more; frame, more = frames.Next() {
75 if shouldIgnoreStacktraceFunction(frame.Function) {
76 continue
77 }
78 if i != 0 {
79 buffer.WriteByte('\n')
80 }
81 i++
82 buffer.WriteString(frame.Function)
83 buffer.WriteByte('\n')
84 buffer.WriteByte('\t')
85 buffer.WriteString(frame.File)
86 buffer.WriteByte(':')
87 buffer.WriteString(strconv.Itoa(int(frame.Line)))
88 }
89
90 return buffer.String()
91}
92
93func shouldIgnoreStacktraceFunction(function string) bool {
94 for _, prefix := range _stacktraceIgnorePrefixes {
95 if strings.HasPrefix(function, prefix) {
96 return true
97 }
98 }
99 return false
100}
101
102type programCounters struct {
103 pcs []uintptr
104}
105
106func newProgramCounters(size int) *programCounters {
107 return &programCounters{make([]uintptr, size)}
108}
diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go
new file mode 100644
index 0000000..2bb927f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go
@@ -0,0 +1,62 @@
1package hclog
2
3import (
4 "bytes"
5 "strings"
6)
7
8// Provides a io.Writer to shim the data out of *log.Logger
9// and back into our Logger. This is basically the only way to
10// build upon *log.Logger.
11type stdlogAdapter struct {
12 hl Logger
13 inferLevels bool
14}
15
16// Take the data, infer the levels if configured, and send it through
17// a regular Logger
18func (s *stdlogAdapter) Write(data []byte) (int, error) {
19 str := string(bytes.TrimRight(data, " \t\n"))
20
21 if s.inferLevels {
22 level, str := s.pickLevel(str)
23 switch level {
24 case Trace:
25 s.hl.Trace(str)
26 case Debug:
27 s.hl.Debug(str)
28 case Info:
29 s.hl.Info(str)
30 case Warn:
31 s.hl.Warn(str)
32 case Error:
33 s.hl.Error(str)
34 default:
35 s.hl.Info(str)
36 }
37 } else {
38 s.hl.Info(str)
39 }
40
41 return len(data), nil
42}
43
44// Detect, based on conventions, what log level this is
45func (s *stdlogAdapter) pickLevel(str string) (Level, string) {
46 switch {
47 case strings.HasPrefix(str, "[DEBUG]"):
48 return Debug, strings.TrimSpace(str[7:])
49 case strings.HasPrefix(str, "[TRACE]"):
50 return Trace, strings.TrimSpace(str[7:])
51 case strings.HasPrefix(str, "[INFO]"):
52 return Info, strings.TrimSpace(str[6:])
53 case strings.HasPrefix(str, "[WARN]"):
54 return Warn, strings.TrimSpace(str[7:])
55 case strings.HasPrefix(str, "[ERROR]"):
56 return Error, strings.TrimSpace(str[7:])
57 case strings.HasPrefix(str, "[ERR]"):
58 return Error, strings.TrimSpace(str[5:])
59 default:
60 return Info, str
61 }
62}
diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md
index 2058cfb..e4558db 100644
--- a/vendor/github.com/hashicorp/go-plugin/README.md
+++ b/vendor/github.com/hashicorp/go-plugin/README.md
@@ -1,10 +1,9 @@
1# Go Plugin System over RPC 1# Go Plugin System over RPC
2 2
3`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system 3`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system
4that has been in use by HashiCorp tooling for over 3 years. While initially 4that has been in use by HashiCorp tooling for over 4 years. While initially
5created for [Packer](https://www.packer.io), it has since been used by 5created for [Packer](https://www.packer.io), it is additionally in use by
6[Terraform](https://www.terraform.io) and [Otto](https://www.ottoproject.io), 6[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and
7with plans to also use it for [Nomad](https://www.nomadproject.io) and
8[Vault](https://www.vaultproject.io). 7[Vault](https://www.vaultproject.io).
9 8
10While the plugin system is over RPC, it is currently only designed to work 9While the plugin system is over RPC, it is currently only designed to work
@@ -24,6 +23,11 @@ interface as if it were going to run in the same process. For a plugin user:
24you just use and call functions on an interface as if it were in the same 23you just use and call functions on an interface as if it were in the same
25process. This plugin system handles the communication in between. 24process. This plugin system handles the communication in between.
26 25
26**Cross-language support.** Plugins can be written (and consumed) by
27almost every major language. This library supports serving plugins via
28[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written
29in any language.
30
27**Complex arguments and return values are supported.** This library 31**Complex arguments and return values are supported.** This library
28provides APIs for handling complex arguments and return values such 32provides APIs for handling complex arguments and return values such
29as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library 33as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library
@@ -37,7 +41,10 @@ and the plugin can call back into the host process.
37**Built-in Logging.** Any plugins that use the `log` standard library 41**Built-in Logging.** Any plugins that use the `log` standard library
38will have log data automatically sent to the host process. The host 42will have log data automatically sent to the host process. The host
39process will mirror this output prefixed with the path to the plugin 43process will mirror this output prefixed with the path to the plugin
40binary. This makes debugging with plugins simple. 44binary. This makes debugging with plugins simple. If the host system
45uses [hclog](https://github.com/hashicorp/go-hclog) then the log data
46will be structured. If the plugin also uses hclog, logs from the plugin
47will be sent to the host hclog and be structured.
41 48
42**Protocol Versioning.** A very basic "protocol version" is supported that 49**Protocol Versioning.** A very basic "protocol version" is supported that
43can be incremented to invalidate any previous plugins. This is useful when 50can be incremented to invalidate any previous plugins. This is useful when
@@ -62,13 +69,18 @@ This requires the host/plugin to know this is possible and daemonize
62properly. `NewClient` takes a `ReattachConfig` to determine if and how to 69properly. `NewClient` takes a `ReattachConfig` to determine if and how to
63reattach. 70reattach.
64 71
72**Cryptographically Secure Plugins.** Plugins can be verified with an expected
73checksum and RPC communications can be configured to use TLS. The host process
74must be properly secured to protect this configuration.
75
65## Architecture 76## Architecture
66 77
67The HashiCorp plugin system works by launching subprocesses and communicating 78The HashiCorp plugin system works by launching subprocesses and communicating
68over RPC (using standard `net/rpc`). A single connection is made between 79over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single
69any plugin and the host process, and we use a 80connection is made between any plugin and the host process. For net/rpc-based
70[connection multiplexing](https://github.com/hashicorp/yamux) 81plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux)
71library to multiplex any other connections on top. 82library to multiplex any other connections on top. For gRPC-based plugins,
83the HTTP2 protocol handles multiplexing.
72 84
73This architecture has a number of benefits: 85This architecture has a number of benefits:
74 86
@@ -76,8 +88,8 @@ This architecture has a number of benefits:
76 panic the plugin user. 88 panic the plugin user.
77 89
78 * Plugins are very easy to write: just write a Go application and `go build`. 90 * Plugins are very easy to write: just write a Go application and `go build`.
79 Theoretically you could also use another language as long as it can 91 Or use any other language to write a gRPC server with a tiny amount of
80 communicate the Go `net/rpc` protocol but this hasn't yet been tried. 92 boilerplate to support go-plugin.
81 93
82 * Plugins are very easy to install: just put the binary in a location where 94 * Plugins are very easy to install: just put the binary in a location where
83 the host will find it (depends on the host but this library also provides 95 the host will find it (depends on the host but this library also provides
@@ -85,8 +97,8 @@ This architecture has a number of benefits:
85 97
86 * Plugins can be relatively secure: The plugin only has access to the 98 * Plugins can be relatively secure: The plugin only has access to the
87 interfaces and args given to it, not to the entire memory space of the 99 interfaces and args given to it, not to the entire memory space of the
88 process. More security features are planned (see the coming soon section 100 process. Additionally, go-plugin can communicate with the plugin over
89 below). 101 TLS.
90 102
91## Usage 103## Usage
92 104
@@ -97,10 +109,9 @@ high-level steps that must be done. Examples are available in the
97 1. Choose the interface(s) you want to expose for plugins. 109 1. Choose the interface(s) you want to expose for plugins.
98 110
99 2. For each interface, implement an implementation of that interface 111 2. For each interface, implement an implementation of that interface
100 that communicates over an `*rpc.Client` (from the standard `net/rpc` 112 that communicates over a `net/rpc` connection or other a
101 package) for every function call. Likewise, implement the RPC server 113 [gRPC](http://www.grpc.io) connection or both. You'll have to implement
102 struct this communicates to which is then communicating to a real, 114 both a client and server implementation.
103 concrete implementation.
104 115
105 3. Create a `Plugin` implementation that knows how to create the RPC 116 3. Create a `Plugin` implementation that knows how to create the RPC
106 client/server for a given plugin type. 117 client/server for a given plugin type.
@@ -125,10 +136,6 @@ improvements we can make.
125 136
126At this point in time, the roadmap for the plugin system is: 137At this point in time, the roadmap for the plugin system is:
127 138
128**Cryptographically Secure Plugins.** We'll implement signing plugins
129and loading signed plugins in order to allow Vault to make use of multi-process
130in a secure way.
131
132**Semantic Versioning.** Plugins will be able to implement a semantic version. 139**Semantic Versioning.** Plugins will be able to implement a semantic version.
133This plugin system will give host processes a system for constraining 140This plugin system will give host processes a system for constraining
134versions. This is in addition to the protocol versioning already present 141versions. This is in addition to the protocol versioning already present
diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go
index 9f8a0f2..b3e3b78 100644
--- a/vendor/github.com/hashicorp/go-plugin/client.go
+++ b/vendor/github.com/hashicorp/go-plugin/client.go
@@ -2,8 +2,12 @@ package plugin
2 2
3import ( 3import (
4 "bufio" 4 "bufio"
5 "context"
6 "crypto/subtle"
7 "crypto/tls"
5 "errors" 8 "errors"
6 "fmt" 9 "fmt"
10 "hash"
7 "io" 11 "io"
8 "io/ioutil" 12 "io/ioutil"
9 "log" 13 "log"
@@ -17,6 +21,8 @@ import (
17 "sync/atomic" 21 "sync/atomic"
18 "time" 22 "time"
19 "unicode" 23 "unicode"
24
25 hclog "github.com/hashicorp/go-hclog"
20) 26)
21 27
22// If this is 1, then we've called CleanupClients. This can be used 28// If this is 1, then we've called CleanupClients. This can be used
@@ -35,6 +41,22 @@ var (
35 // ErrProcessNotFound is returned when a client is instantiated to 41 // ErrProcessNotFound is returned when a client is instantiated to
36 // reattach to an existing process and it isn't found. 42 // reattach to an existing process and it isn't found.
37 ErrProcessNotFound = errors.New("Reattachment process not found") 43 ErrProcessNotFound = errors.New("Reattachment process not found")
44
45 // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match
46 // the one provided in the SecureConfig.
47 ErrChecksumsDoNotMatch = errors.New("checksums did not match")
48
49 // ErrSecureNoChecksum is returned when an empty checksum is provided to the
50 // SecureConfig.
51 ErrSecureConfigNoChecksum = errors.New("no checksum provided")
52
53 // ErrSecureNoHash is returned when a nil Hash object is provided to the
54 // SecureConfig.
55 ErrSecureConfigNoHash = errors.New("no hash implementation provided")
56
57 // ErrSecureConfigAndReattach is returned when both Reattach and
58 // SecureConfig are set.
59 ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set")
38) 60)
39 61
40// Client handles the lifecycle of a plugin application. It launches 62// Client handles the lifecycle of a plugin application. It launches
@@ -55,7 +77,10 @@ type Client struct {
55 l sync.Mutex 77 l sync.Mutex
56 address net.Addr 78 address net.Addr
57 process *os.Process 79 process *os.Process
58 client *RPCClient 80 client ClientProtocol
81 protocol Protocol
82 logger hclog.Logger
83 doneCtx context.Context
59} 84}
60 85
61// ClientConfig is the configuration used to initialize a new 86// ClientConfig is the configuration used to initialize a new
@@ -79,6 +104,13 @@ type ClientConfig struct {
79 Cmd *exec.Cmd 104 Cmd *exec.Cmd
80 Reattach *ReattachConfig 105 Reattach *ReattachConfig
81 106
107 // SecureConfig is configuration for verifying the integrity of the
108 // executable. It can not be used with Reattach.
109 SecureConfig *SecureConfig
110
111 // TLSConfig is used to enable TLS on the RPC client.
112 TLSConfig *tls.Config
113
82 // Managed represents if the client should be managed by the 114 // Managed represents if the client should be managed by the
83 // plugin package or not. If true, then by calling CleanupClients, 115 // plugin package or not. If true, then by calling CleanupClients,
84 // it will automatically be cleaned up. Otherwise, the client 116 // it will automatically be cleaned up. Otherwise, the client
@@ -109,14 +141,74 @@ type ClientConfig struct {
109 // sync any of these streams. 141 // sync any of these streams.
110 SyncStdout io.Writer 142 SyncStdout io.Writer
111 SyncStderr io.Writer 143 SyncStderr io.Writer
144
145 // AllowedProtocols is a list of allowed protocols. If this isn't set,
146 // then only netrpc is allowed. This is so that older go-plugin systems
147 // can show friendly errors if they see a plugin with an unknown
148 // protocol.
149 //
150 // By setting this, you can cause an error immediately on plugin start
151 // if an unsupported protocol is used with a good error message.
152 //
153 // If this isn't set at all (nil value), then only net/rpc is accepted.
154 // This is done for legacy reasons. You must explicitly opt-in to
155 // new protocols.
156 AllowedProtocols []Protocol
157
158 // Logger is the logger that the client will used. If none is provided,
159 // it will default to hclog's default logger.
160 Logger hclog.Logger
112} 161}
113 162
114// ReattachConfig is used to configure a client to reattach to an 163// ReattachConfig is used to configure a client to reattach to an
115// already-running plugin process. You can retrieve this information by 164// already-running plugin process. You can retrieve this information by
116// calling ReattachConfig on Client. 165// calling ReattachConfig on Client.
117type ReattachConfig struct { 166type ReattachConfig struct {
118 Addr net.Addr 167 Protocol Protocol
119 Pid int 168 Addr net.Addr
169 Pid int
170}
171
172// SecureConfig is used to configure a client to verify the integrity of an
173// executable before running. It does this by verifying the checksum is
174// expected. Hash is used to specify the hashing method to use when checksumming
175// the file. The configuration is verified by the client by calling the
176// SecureConfig.Check() function.
177//
178// The host process should ensure the checksum was provided by a trusted and
179// authoritative source. The binary should be installed in such a way that it
180// can not be modified by an unauthorized user between the time of this check
181// and the time of execution.
182type SecureConfig struct {
183 Checksum []byte
184 Hash hash.Hash
185}
186
187// Check takes the filepath to an executable and returns true if the checksum of
188// the file matches the checksum provided in the SecureConfig.
189func (s *SecureConfig) Check(filePath string) (bool, error) {
190 if len(s.Checksum) == 0 {
191 return false, ErrSecureConfigNoChecksum
192 }
193
194 if s.Hash == nil {
195 return false, ErrSecureConfigNoHash
196 }
197
198 file, err := os.Open(filePath)
199 if err != nil {
200 return false, err
201 }
202 defer file.Close()
203
204 _, err = io.Copy(s.Hash, file)
205 if err != nil {
206 return false, err
207 }
208
209 sum := s.Hash.Sum(nil)
210
211 return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil
120} 212}
121 213
122// This makes sure all the managed subprocesses are killed and properly 214// This makes sure all the managed subprocesses are killed and properly
@@ -174,7 +266,22 @@ func NewClient(config *ClientConfig) (c *Client) {
174 config.SyncStderr = ioutil.Discard 266 config.SyncStderr = ioutil.Discard
175 } 267 }
176 268
177 c = &Client{config: config} 269 if config.AllowedProtocols == nil {
270 config.AllowedProtocols = []Protocol{ProtocolNetRPC}
271 }
272
273 if config.Logger == nil {
274 config.Logger = hclog.New(&hclog.LoggerOptions{
275 Output: hclog.DefaultOutput,
276 Level: hclog.Trace,
277 Name: "plugin",
278 })
279 }
280
281 c = &Client{
282 config: config,
283 logger: config.Logger,
284 }
178 if config.Managed { 285 if config.Managed {
179 managedClientsLock.Lock() 286 managedClientsLock.Lock()
180 managedClients = append(managedClients, c) 287 managedClients = append(managedClients, c)
@@ -184,11 +291,11 @@ func NewClient(config *ClientConfig) (c *Client) {
184 return 291 return
185} 292}
186 293
187// Client returns an RPC client for the plugin. 294// Client returns the protocol client for this connection.
188// 295//
189// Subsequent calls to this will return the same RPC client. 296// Subsequent calls to this will return the same client.
190func (c *Client) Client() (*RPCClient, error) { 297func (c *Client) Client() (ClientProtocol, error) {
191 addr, err := c.Start() 298 _, err := c.Start()
192 if err != nil { 299 if err != nil {
193 return nil, err 300 return nil, err
194 } 301 }
@@ -200,29 +307,18 @@ func (c *Client) Client() (*RPCClient, error) {
200 return c.client, nil 307 return c.client, nil
201 } 308 }
202 309
203 // Connect to the client 310 switch c.protocol {
204 conn, err := net.Dial(addr.Network(), addr.String()) 311 case ProtocolNetRPC:
205 if err != nil { 312 c.client, err = newRPCClient(c)
206 return nil, err
207 }
208 if tcpConn, ok := conn.(*net.TCPConn); ok {
209 // Make sure to set keep alive so that the connection doesn't die
210 tcpConn.SetKeepAlive(true)
211 }
212 313
213 // Create the actual RPC client 314 case ProtocolGRPC:
214 c.client, err = NewRPCClient(conn, c.config.Plugins) 315 c.client, err = newGRPCClient(c.doneCtx, c)
215 if err != nil { 316
216 conn.Close() 317 default:
217 return nil, err 318 return nil, fmt.Errorf("unknown server protocol: %s", c.protocol)
218 } 319 }
219 320
220 // Begin the stream syncing so that stdin, out, err work properly
221 err = c.client.SyncStreams(
222 c.config.SyncStdout,
223 c.config.SyncStderr)
224 if err != nil { 321 if err != nil {
225 c.client.Close()
226 c.client = nil 322 c.client = nil
227 return nil, err 323 return nil, err
228 } 324 }
@@ -274,8 +370,7 @@ func (c *Client) Kill() {
274 if err != nil { 370 if err != nil {
275 // If there was an error just log it. We're going to force 371 // If there was an error just log it. We're going to force
276 // kill in a moment anyways. 372 // kill in a moment anyways.
277 log.Printf( 373 c.logger.Warn("error closing client during Kill", "err", err)
278 "[WARN] plugin: error closing client during Kill: %s", err)
279 } 374 }
280 } 375 }
281 } 376 }
@@ -318,13 +413,21 @@ func (c *Client) Start() (addr net.Addr, err error) {
318 { 413 {
319 cmdSet := c.config.Cmd != nil 414 cmdSet := c.config.Cmd != nil
320 attachSet := c.config.Reattach != nil 415 attachSet := c.config.Reattach != nil
416 secureSet := c.config.SecureConfig != nil
321 if cmdSet == attachSet { 417 if cmdSet == attachSet {
322 return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") 418 return nil, fmt.Errorf("Only one of Cmd or Reattach must be set")
323 } 419 }
420
421 if secureSet && attachSet {
422 return nil, ErrSecureConfigAndReattach
423 }
324 } 424 }
325 425
326 // Create the logging channel for when we kill 426 // Create the logging channel for when we kill
327 c.doneLogging = make(chan struct{}) 427 c.doneLogging = make(chan struct{})
428 // Create a context for when we kill
429 var ctxCancel context.CancelFunc
430 c.doneCtx, ctxCancel = context.WithCancel(context.Background())
328 431
329 if c.config.Reattach != nil { 432 if c.config.Reattach != nil {
330 // Verify the process still exists. If not, then it is an error 433 // Verify the process still exists. If not, then it is an error
@@ -350,7 +453,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
350 pidWait(pid) 453 pidWait(pid)
351 454
352 // Log so we can see it 455 // Log so we can see it
353 log.Printf("[DEBUG] plugin: reattached plugin process exited\n") 456 c.logger.Debug("reattached plugin process exited")
354 457
355 // Mark it 458 // Mark it
356 c.l.Lock() 459 c.l.Lock()
@@ -359,11 +462,19 @@ func (c *Client) Start() (addr net.Addr, err error) {
359 462
360 // Close the logging channel since that doesn't work on reattach 463 // Close the logging channel since that doesn't work on reattach
361 close(c.doneLogging) 464 close(c.doneLogging)
465
466 // Cancel the context
467 ctxCancel()
362 }(p.Pid) 468 }(p.Pid)
363 469
364 // Set the address and process 470 // Set the address and process
365 c.address = c.config.Reattach.Addr 471 c.address = c.config.Reattach.Addr
366 c.process = p 472 c.process = p
473 c.protocol = c.config.Reattach.Protocol
474 if c.protocol == "" {
475 // Default the protocol to net/rpc for backwards compatibility
476 c.protocol = ProtocolNetRPC
477 }
367 478
368 return c.address, nil 479 return c.address, nil
369 } 480 }
@@ -384,7 +495,15 @@ func (c *Client) Start() (addr net.Addr, err error) {
384 cmd.Stderr = stderr_w 495 cmd.Stderr = stderr_w
385 cmd.Stdout = stdout_w 496 cmd.Stdout = stdout_w
386 497
387 log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args) 498 if c.config.SecureConfig != nil {
499 if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil {
500 return nil, fmt.Errorf("error verifying checksum: %s", err)
501 } else if !ok {
502 return nil, ErrChecksumsDoNotMatch
503 }
504 }
505
506 c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args)
388 err = cmd.Start() 507 err = cmd.Start()
389 if err != nil { 508 if err != nil {
390 return 509 return
@@ -418,12 +537,15 @@ func (c *Client) Start() (addr net.Addr, err error) {
418 cmd.Wait() 537 cmd.Wait()
419 538
420 // Log and make sure to flush the logs write away 539 // Log and make sure to flush the logs write away
421 log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path) 540 c.logger.Debug("plugin process exited", "path", cmd.Path)
422 os.Stderr.Sync() 541 os.Stderr.Sync()
423 542
424 // Mark that we exited 543 // Mark that we exited
425 close(exitCh) 544 close(exitCh)
426 545
546 // Cancel the context, marking that we exited
547 ctxCancel()
548
427 // Set that we exited, which takes a lock 549 // Set that we exited, which takes a lock
428 c.l.Lock() 550 c.l.Lock()
429 defer c.l.Unlock() 551 defer c.l.Unlock()
@@ -465,7 +587,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
465 timeout := time.After(c.config.StartTimeout) 587 timeout := time.After(c.config.StartTimeout)
466 588
467 // Start looking for the address 589 // Start looking for the address
468 log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path) 590 c.logger.Debug("waiting for RPC address", "path", cmd.Path)
469 select { 591 select {
470 case <-timeout: 592 case <-timeout:
471 err = errors.New("timeout while waiting for plugin to start") 593 err = errors.New("timeout while waiting for plugin to start")
@@ -475,7 +597,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
475 // Trim the line and split by "|" in order to get the parts of 597 // Trim the line and split by "|" in order to get the parts of
476 // the output. 598 // the output.
477 line := strings.TrimSpace(string(lineBytes)) 599 line := strings.TrimSpace(string(lineBytes))
478 parts := strings.SplitN(line, "|", 4) 600 parts := strings.SplitN(line, "|", 6)
479 if len(parts) < 4 { 601 if len(parts) < 4 {
480 err = fmt.Errorf( 602 err = fmt.Errorf(
481 "Unrecognized remote plugin message: %s\n\n"+ 603 "Unrecognized remote plugin message: %s\n\n"+
@@ -495,7 +617,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
495 617
496 if int(coreProtocol) != CoreProtocolVersion { 618 if int(coreProtocol) != CoreProtocolVersion {
497 err = fmt.Errorf("Incompatible core API version with plugin. "+ 619 err = fmt.Errorf("Incompatible core API version with plugin. "+
498 "Plugin version: %s, Ours: %d\n\n"+ 620 "Plugin version: %s, Core version: %d\n\n"+
499 "To fix this, the plugin usually only needs to be recompiled.\n"+ 621 "To fix this, the plugin usually only needs to be recompiled.\n"+
500 "Please report this to the plugin author.", parts[0], CoreProtocolVersion) 622 "Please report this to the plugin author.", parts[0], CoreProtocolVersion)
501 return 623 return
@@ -513,7 +635,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
513 // Test the API version 635 // Test the API version
514 if uint(protocol) != c.config.ProtocolVersion { 636 if uint(protocol) != c.config.ProtocolVersion {
515 err = fmt.Errorf("Incompatible API version with plugin. "+ 637 err = fmt.Errorf("Incompatible API version with plugin. "+
516 "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion) 638 "Plugin version: %s, Core version: %d", parts[1], c.config.ProtocolVersion)
517 return 639 return
518 } 640 }
519 641
@@ -525,6 +647,27 @@ func (c *Client) Start() (addr net.Addr, err error) {
525 default: 647 default:
526 err = fmt.Errorf("Unknown address type: %s", parts[3]) 648 err = fmt.Errorf("Unknown address type: %s", parts[3])
527 } 649 }
650
651 // If we have a server type, then record that. We default to net/rpc
652 // for backwards compatibility.
653 c.protocol = ProtocolNetRPC
654 if len(parts) >= 5 {
655 c.protocol = Protocol(parts[4])
656 }
657
658 found := false
659 for _, p := range c.config.AllowedProtocols {
660 if p == c.protocol {
661 found = true
662 break
663 }
664 }
665 if !found {
666 err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v",
667 c.protocol, c.config.AllowedProtocols)
668 return
669 }
670
528 } 671 }
529 672
530 c.address = addr 673 c.address = addr
@@ -555,9 +698,57 @@ func (c *Client) ReattachConfig() *ReattachConfig {
555 } 698 }
556 699
557 return &ReattachConfig{ 700 return &ReattachConfig{
558 Addr: c.address, 701 Protocol: c.protocol,
559 Pid: c.config.Cmd.Process.Pid, 702 Addr: c.address,
703 Pid: c.config.Cmd.Process.Pid,
704 }
705}
706
707// Protocol returns the protocol of server on the remote end. This will
708// start the plugin process if it isn't already started. Errors from
709// starting the plugin are surpressed and ProtocolInvalid is returned. It
710// is recommended you call Start explicitly before calling Protocol to ensure
711// no errors occur.
712func (c *Client) Protocol() Protocol {
713 _, err := c.Start()
714 if err != nil {
715 return ProtocolInvalid
716 }
717
718 return c.protocol
719}
720
721func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) {
722 return func(_ string, _ time.Duration) (net.Conn, error) {
723 // Connect to the client
724 conn, err := net.Dial(addr.Network(), addr.String())
725 if err != nil {
726 return nil, err
727 }
728 if tcpConn, ok := conn.(*net.TCPConn); ok {
729 // Make sure to set keep alive so that the connection doesn't die
730 tcpConn.SetKeepAlive(true)
731 }
732
733 return conn, nil
734 }
735}
736
737// dialer is compatible with grpc.WithDialer and creates the connection
738// to the plugin.
739func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) {
740 conn, err := netAddrDialer(c.address)("", timeout)
741 if err != nil {
742 return nil, err
560 } 743 }
744
745 // If we have a TLS config we wrap our connection. We only do this
746 // for net/rpc since gRPC uses its own mechanism for TLS.
747 if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil {
748 conn = tls.Client(conn, c.config.TLSConfig)
749 }
750
751 return conn, nil
561} 752}
562 753
563func (c *Client) logStderr(r io.Reader) { 754func (c *Client) logStderr(r io.Reader) {
@@ -566,9 +757,31 @@ func (c *Client) logStderr(r io.Reader) {
566 line, err := bufR.ReadString('\n') 757 line, err := bufR.ReadString('\n')
567 if line != "" { 758 if line != "" {
568 c.config.Stderr.Write([]byte(line)) 759 c.config.Stderr.Write([]byte(line))
569
570 line = strings.TrimRightFunc(line, unicode.IsSpace) 760 line = strings.TrimRightFunc(line, unicode.IsSpace)
571 log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line) 761
762 l := c.logger.Named(filepath.Base(c.config.Cmd.Path))
763
764 entry, err := parseJSON(line)
765 // If output is not JSON format, print directly to Debug
766 if err != nil {
767 l.Debug(line)
768 } else {
769 out := flattenKVPairs(entry.KVPairs)
770
771 l = l.With("timestamp", entry.Timestamp.Format(hclog.TimeFormat))
772 switch hclog.LevelFromString(entry.Level) {
773 case hclog.Trace:
774 l.Trace(entry.Message, out...)
775 case hclog.Debug:
776 l.Debug(entry.Message, out...)
777 case hclog.Info:
778 l.Info(entry.Message, out...)
779 case hclog.Warn:
780 l.Warn(entry.Message, out...)
781 case hclog.Error:
782 l.Error(entry.Message, out...)
783 }
784 }
572 } 785 }
573 786
574 if err == io.EOF { 787 if err == io.EOF {
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go
new file mode 100644
index 0000000..49fd21c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go
@@ -0,0 +1,455 @@
1package plugin
2
3import (
4 "context"
5 "crypto/tls"
6 "errors"
7 "fmt"
8 "log"
9 "net"
10 "sync"
11 "sync/atomic"
12 "time"
13
14 "github.com/oklog/run"
15 "google.golang.org/grpc"
16 "google.golang.org/grpc/credentials"
17)
18
19// streamer interface is used in the broker to send/receive connection
20// information.
21type streamer interface {
22 Send(*ConnInfo) error
23 Recv() (*ConnInfo, error)
24 Close()
25}
26
27// sendErr is used to pass errors back during a send.
28type sendErr struct {
29 i *ConnInfo
30 ch chan error
31}
32
33// gRPCBrokerServer is used by the plugin to start a stream and to send
34// connection information to/from the plugin. Implements GRPCBrokerServer and
35// streamer interfaces.
36type gRPCBrokerServer struct {
37 // send is used to send connection info to the gRPC stream.
38 send chan *sendErr
39
40 // recv is used to receive connection info from the gRPC stream.
41 recv chan *ConnInfo
42
43 // quit closes down the stream.
44 quit chan struct{}
45
46 // o is used to ensure we close the quit channel only once.
47 o sync.Once
48}
49
50func newGRPCBrokerServer() *gRPCBrokerServer {
51 return &gRPCBrokerServer{
52 send: make(chan *sendErr),
53 recv: make(chan *ConnInfo),
54 quit: make(chan struct{}),
55 }
56}
57
58// StartStream implements the GRPCBrokerServer interface and will block until
59// the quit channel is closed or the context reports Done. The stream will pass
60// connection information to/from the client.
61func (s *gRPCBrokerServer) StartStream(stream GRPCBroker_StartStreamServer) error {
62 doneCh := stream.Context().Done()
63 defer s.Close()
64
65 // Proccess send stream
66 go func() {
67 for {
68 select {
69 case <-doneCh:
70 return
71 case <-s.quit:
72 return
73 case se := <-s.send:
74 err := stream.Send(se.i)
75 se.ch <- err
76 }
77 }
78 }()
79
80 // Process receive stream
81 for {
82 i, err := stream.Recv()
83 if err != nil {
84 return err
85 }
86 select {
87 case <-doneCh:
88 return nil
89 case <-s.quit:
90 return nil
91 case s.recv <- i:
92 }
93 }
94
95 return nil
96}
97
98// Send is used by the GRPCBroker to pass connection information into the stream
99// to the client.
100func (s *gRPCBrokerServer) Send(i *ConnInfo) error {
101 ch := make(chan error)
102 defer close(ch)
103
104 select {
105 case <-s.quit:
106 return errors.New("broker closed")
107 case s.send <- &sendErr{
108 i: i,
109 ch: ch,
110 }:
111 }
112
113 return <-ch
114}
115
116// Recv is used by the GRPCBroker to pass connection information that has been
117// sent from the client from the stream to the broker.
118func (s *gRPCBrokerServer) Recv() (*ConnInfo, error) {
119 select {
120 case <-s.quit:
121 return nil, errors.New("broker closed")
122 case i := <-s.recv:
123 return i, nil
124 }
125}
126
127// Close closes the quit channel, shutting down the stream.
128func (s *gRPCBrokerServer) Close() {
129 s.o.Do(func() {
130 close(s.quit)
131 })
132}
133
134// gRPCBrokerClientImpl is used by the client to start a stream and to send
135// connection information to/from the client. Implements GRPCBrokerClient and
136// streamer interfaces.
137type gRPCBrokerClientImpl struct {
138 // client is the underlying GRPC client used to make calls to the server.
139 client GRPCBrokerClient
140
141 // send is used to send connection info to the gRPC stream.
142 send chan *sendErr
143
144 // recv is used to receive connection info from the gRPC stream.
145 recv chan *ConnInfo
146
147 // quit closes down the stream.
148 quit chan struct{}
149
150 // o is used to ensure we close the quit channel only once.
151 o sync.Once
152}
153
154func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl {
155 return &gRPCBrokerClientImpl{
156 client: NewGRPCBrokerClient(conn),
157 send: make(chan *sendErr),
158 recv: make(chan *ConnInfo),
159 quit: make(chan struct{}),
160 }
161}
162
163// StartStream implements the GRPCBrokerClient interface and will block until
164// the quit channel is closed or the context reports Done. The stream will pass
165// connection information to/from the plugin.
166func (s *gRPCBrokerClientImpl) StartStream() error {
167 ctx, cancelFunc := context.WithCancel(context.Background())
168 defer cancelFunc()
169 defer s.Close()
170
171 stream, err := s.client.StartStream(ctx)
172 if err != nil {
173 return err
174 }
175 doneCh := stream.Context().Done()
176
177 go func() {
178 for {
179 select {
180 case <-doneCh:
181 return
182 case <-s.quit:
183 return
184 case se := <-s.send:
185 err := stream.Send(se.i)
186 se.ch <- err
187 }
188 }
189 }()
190
191 for {
192 i, err := stream.Recv()
193 if err != nil {
194 return err
195 }
196 select {
197 case <-doneCh:
198 return nil
199 case <-s.quit:
200 return nil
201 case s.recv <- i:
202 }
203 }
204
205 return nil
206}
207
208// Send is used by the GRPCBroker to pass connection information into the stream
209// to the plugin.
210func (s *gRPCBrokerClientImpl) Send(i *ConnInfo) error {
211 ch := make(chan error)
212 defer close(ch)
213
214 select {
215 case <-s.quit:
216 return errors.New("broker closed")
217 case s.send <- &sendErr{
218 i: i,
219 ch: ch,
220 }:
221 }
222
223 return <-ch
224}
225
226// Recv is used by the GRPCBroker to pass connection information that has been
227// sent from the plugin to the broker.
228func (s *gRPCBrokerClientImpl) Recv() (*ConnInfo, error) {
229 select {
230 case <-s.quit:
231 return nil, errors.New("broker closed")
232 case i := <-s.recv:
233 return i, nil
234 }
235}
236
237// Close closes the quit channel, shutting down the stream.
238func (s *gRPCBrokerClientImpl) Close() {
239 s.o.Do(func() {
240 close(s.quit)
241 })
242}
243
244// GRPCBroker is responsible for brokering connections by unique ID.
245//
246// It is used by plugins to create multiple gRPC connections and data
247// streams between the plugin process and the host process.
248//
249// This allows a plugin to request a channel with a specific ID to connect to
250// or accept a connection from, and the broker handles the details of
251// holding these channels open while they're being negotiated.
252//
253// The Plugin interface has access to these for both Server and Client.
254// The broker can be used by either (optionally) to reserve and connect to
255// new streams. This is useful for complex args and return values,
256// or anything else you might need a data stream for.
257type GRPCBroker struct {
258 nextId uint32
259 streamer streamer
260 streams map[uint32]*gRPCBrokerPending
261 tls *tls.Config
262 doneCh chan struct{}
263 o sync.Once
264
265 sync.Mutex
266}
267
268type gRPCBrokerPending struct {
269 ch chan *ConnInfo
270 doneCh chan struct{}
271}
272
273func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker {
274 return &GRPCBroker{
275 streamer: s,
276 streams: make(map[uint32]*gRPCBrokerPending),
277 tls: tls,
278 doneCh: make(chan struct{}),
279 }
280}
281
282// Accept accepts a connection by ID.
283//
284// This should not be called multiple times with the same ID at one time.
285func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) {
286 listener, err := serverListener()
287 if err != nil {
288 return nil, err
289 }
290
291 err = b.streamer.Send(&ConnInfo{
292 ServiceId: id,
293 Network: listener.Addr().Network(),
294 Address: listener.Addr().String(),
295 })
296 if err != nil {
297 return nil, err
298 }
299
300 return listener, nil
301}
302
303// AcceptAndServe is used to accept a specific stream ID and immediately
304// serve a gRPC server on that stream ID. This is used to easily serve
305// complex arguments. Each AcceptAndServe call opens a new listener socket and
306// sends the connection info down the stream to the dialer. Since a new
307// connection is opened every call, these calls should be used sparingly.
308// Multiple gRPC server implementations can be registered to a single
309// AcceptAndServe call.
310func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) {
311 listener, err := b.Accept(id)
312 if err != nil {
313 log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
314 return
315 }
316 defer listener.Close()
317
318 var opts []grpc.ServerOption
319 if b.tls != nil {
320 opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))}
321 }
322
323 server := s(opts)
324
325 // Here we use a run group to close this goroutine if the server is shutdown
326 // or the broker is shutdown.
327 var g run.Group
328 {
329 // Serve on the listener, if shutting down call GracefulStop.
330 g.Add(func() error {
331 return server.Serve(listener)
332 }, func(err error) {
333 server.GracefulStop()
334 })
335 }
336 {
337 // block on the closeCh or the doneCh. If we are shutting down close the
338 // closeCh.
339 closeCh := make(chan struct{})
340 g.Add(func() error {
341 select {
342 case <-b.doneCh:
343 case <-closeCh:
344 }
345 return nil
346 }, func(err error) {
347 close(closeCh)
348 })
349 }
350
351 // Block until we are done
352 g.Run()
353}
354
355// Close closes the stream and all servers.
356func (b *GRPCBroker) Close() error {
357 b.streamer.Close()
358 b.o.Do(func() {
359 close(b.doneCh)
360 })
361 return nil
362}
363
364// Dial opens a connection by ID.
365func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) {
366 var c *ConnInfo
367
368 // Open the stream
369 p := b.getStream(id)
370 select {
371 case c = <-p.ch:
372 close(p.doneCh)
373 case <-time.After(5 * time.Second):
374 return nil, fmt.Errorf("timeout waiting for connection info")
375 }
376
377 var addr net.Addr
378 switch c.Network {
379 case "tcp":
380 addr, err = net.ResolveTCPAddr("tcp", c.Address)
381 case "unix":
382 addr, err = net.ResolveUnixAddr("unix", c.Address)
383 default:
384 err = fmt.Errorf("Unknown address type: %s", c.Address)
385 }
386 if err != nil {
387 return nil, err
388 }
389
390 return dialGRPCConn(b.tls, netAddrDialer(addr))
391}
392
393// NextId returns a unique ID to use next.
394//
395// It is possible for very long-running plugin hosts to wrap this value,
396// though it would require a very large amount of calls. In practice
397// we've never seen it happen.
398func (m *GRPCBroker) NextId() uint32 {
399 return atomic.AddUint32(&m.nextId, 1)
400}
401
402// Run starts the brokering and should be executed in a goroutine, since it
403// blocks forever, or until the session closes.
404//
405// Uses of GRPCBroker never need to call this. It is called internally by
406// the plugin host/client.
407func (m *GRPCBroker) Run() {
408 for {
409 stream, err := m.streamer.Recv()
410 if err != nil {
411 // Once we receive an error, just exit
412 break
413 }
414
415 // Initialize the waiter
416 p := m.getStream(stream.ServiceId)
417 select {
418 case p.ch <- stream:
419 default:
420 }
421
422 go m.timeoutWait(stream.ServiceId, p)
423 }
424}
425
426func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending {
427 m.Lock()
428 defer m.Unlock()
429
430 p, ok := m.streams[id]
431 if ok {
432 return p
433 }
434
435 m.streams[id] = &gRPCBrokerPending{
436 ch: make(chan *ConnInfo, 1),
437 doneCh: make(chan struct{}),
438 }
439 return m.streams[id]
440}
441
442func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) {
443 // Wait for the stream to either be picked up and connected, or
444 // for a timeout.
445 select {
446 case <-p.doneCh:
447 case <-time.After(5 * time.Second):
448 }
449
450 m.Lock()
451 defer m.Unlock()
452
453 // Delete the stream so no one else can grab it
454 delete(m.streams, id)
455}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go
new file mode 100644
index 0000000..d490daf
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go
@@ -0,0 +1,190 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: grpc_broker.proto
3
4/*
5Package plugin is a generated protocol buffer package.
6
7It is generated from these files:
8 grpc_broker.proto
9
10It has these top-level messages:
11 ConnInfo
12*/
13package plugin
14
15import proto "github.com/golang/protobuf/proto"
16import fmt "fmt"
17import math "math"
18
19import (
20 context "golang.org/x/net/context"
21 grpc "google.golang.org/grpc"
22)
23
24// Reference imports to suppress errors if they are not otherwise used.
25var _ = proto.Marshal
26var _ = fmt.Errorf
27var _ = math.Inf
28
29// This is a compile-time assertion to ensure that this generated file
30// is compatible with the proto package it is being compiled against.
31// A compilation error at this line likely means your copy of the
32// proto package needs to be updated.
33const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
34
35type ConnInfo struct {
36 ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId" json:"service_id,omitempty"`
37 Network string `protobuf:"bytes,2,opt,name=network" json:"network,omitempty"`
38 Address string `protobuf:"bytes,3,opt,name=address" json:"address,omitempty"`
39}
40
41func (m *ConnInfo) Reset() { *m = ConnInfo{} }
42func (m *ConnInfo) String() string { return proto.CompactTextString(m) }
43func (*ConnInfo) ProtoMessage() {}
44func (*ConnInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
45
46func (m *ConnInfo) GetServiceId() uint32 {
47 if m != nil {
48 return m.ServiceId
49 }
50 return 0
51}
52
53func (m *ConnInfo) GetNetwork() string {
54 if m != nil {
55 return m.Network
56 }
57 return ""
58}
59
60func (m *ConnInfo) GetAddress() string {
61 if m != nil {
62 return m.Address
63 }
64 return ""
65}
66
67func init() {
68 proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo")
69}
70
71// Reference imports to suppress errors if they are not otherwise used.
72var _ context.Context
73var _ grpc.ClientConn
74
75// This is a compile-time assertion to ensure that this generated file
76// is compatible with the grpc package it is being compiled against.
77const _ = grpc.SupportPackageIsVersion4
78
79// Client API for GRPCBroker service
80
81type GRPCBrokerClient interface {
82 StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error)
83}
84
85type gRPCBrokerClient struct {
86 cc *grpc.ClientConn
87}
88
89func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient {
90 return &gRPCBrokerClient{cc}
91}
92
93func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) {
94 stream, err := grpc.NewClientStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], c.cc, "/plugin.GRPCBroker/StartStream", opts...)
95 if err != nil {
96 return nil, err
97 }
98 x := &gRPCBrokerStartStreamClient{stream}
99 return x, nil
100}
101
102type GRPCBroker_StartStreamClient interface {
103 Send(*ConnInfo) error
104 Recv() (*ConnInfo, error)
105 grpc.ClientStream
106}
107
108type gRPCBrokerStartStreamClient struct {
109 grpc.ClientStream
110}
111
112func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error {
113 return x.ClientStream.SendMsg(m)
114}
115
116func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) {
117 m := new(ConnInfo)
118 if err := x.ClientStream.RecvMsg(m); err != nil {
119 return nil, err
120 }
121 return m, nil
122}
123
124// Server API for GRPCBroker service
125
126type GRPCBrokerServer interface {
127 StartStream(GRPCBroker_StartStreamServer) error
128}
129
130func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) {
131 s.RegisterService(&_GRPCBroker_serviceDesc, srv)
132}
133
134func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error {
135 return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream})
136}
137
138type GRPCBroker_StartStreamServer interface {
139 Send(*ConnInfo) error
140 Recv() (*ConnInfo, error)
141 grpc.ServerStream
142}
143
144type gRPCBrokerStartStreamServer struct {
145 grpc.ServerStream
146}
147
148func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error {
149 return x.ServerStream.SendMsg(m)
150}
151
152func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) {
153 m := new(ConnInfo)
154 if err := x.ServerStream.RecvMsg(m); err != nil {
155 return nil, err
156 }
157 return m, nil
158}
159
160var _GRPCBroker_serviceDesc = grpc.ServiceDesc{
161 ServiceName: "plugin.GRPCBroker",
162 HandlerType: (*GRPCBrokerServer)(nil),
163 Methods: []grpc.MethodDesc{},
164 Streams: []grpc.StreamDesc{
165 {
166 StreamName: "StartStream",
167 Handler: _GRPCBroker_StartStream_Handler,
168 ServerStreams: true,
169 ClientStreams: true,
170 },
171 },
172 Metadata: "grpc_broker.proto",
173}
174
175func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor0) }
176
177var fileDescriptor0 = []byte{
178 // 170 bytes of a gzipped FileDescriptorProto
179 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48,
180 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b,
181 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b,
182 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91,
183 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7,
184 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20,
185 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc,
186 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1,
187 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b,
188 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x93, 0xd8, 0xc0, 0x4e, 0x36, 0x06, 0x04, 0x00, 0x00,
189 0xff, 0xff, 0x7b, 0x5d, 0xfb, 0xe1, 0xc7, 0x00, 0x00, 0x00,
190}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/grpc_broker.proto
new file mode 100644
index 0000000..f578348
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.proto
@@ -0,0 +1,14 @@
1syntax = "proto3";
2package plugin;
3
4message ConnInfo {
5 uint32 service_id = 1;
6 string network = 2;
7 string address = 3;
8}
9
10service GRPCBroker {
11 rpc StartStream(stream ConnInfo) returns (stream ConnInfo);
12}
13
14
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go
new file mode 100644
index 0000000..44294d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go
@@ -0,0 +1,107 @@
1package plugin
2
3import (
4 "crypto/tls"
5 "fmt"
6 "net"
7 "time"
8
9 "golang.org/x/net/context"
10 "google.golang.org/grpc"
11 "google.golang.org/grpc/credentials"
12 "google.golang.org/grpc/health/grpc_health_v1"
13)
14
15func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) {
16 // Build dialing options.
17 opts := make([]grpc.DialOption, 0, 5)
18
19 // We use a custom dialer so that we can connect over unix domain sockets
20 opts = append(opts, grpc.WithDialer(dialer))
21
22 // go-plugin expects to block the connection
23 opts = append(opts, grpc.WithBlock())
24
25 // Fail right away
26 opts = append(opts, grpc.FailOnNonTempDialError(true))
27
28 // If we have no TLS configuration set, we need to explicitly tell grpc
29 // that we're connecting with an insecure connection.
30 if tls == nil {
31 opts = append(opts, grpc.WithInsecure())
32 } else {
33 opts = append(opts, grpc.WithTransportCredentials(
34 credentials.NewTLS(tls)))
35 }
36
37 // Connect. Note the first parameter is unused because we use a custom
38 // dialer that has the state to see the address.
39 conn, err := grpc.Dial("unused", opts...)
40 if err != nil {
41 return nil, err
42 }
43
44 return conn, nil
45}
46
47// newGRPCClient creates a new GRPCClient. The Client argument is expected
48// to be successfully started already with a lock held.
49func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) {
50 conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer)
51 if err != nil {
52 return nil, err
53 }
54
55 // Start the broker.
56 brokerGRPCClient := newGRPCBrokerClient(conn)
57 broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig)
58 go broker.Run()
59 go brokerGRPCClient.StartStream()
60
61 return &GRPCClient{
62 Conn: conn,
63 Plugins: c.config.Plugins,
64 doneCtx: doneCtx,
65 broker: broker,
66 }, nil
67}
68
69// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types.
70type GRPCClient struct {
71 Conn *grpc.ClientConn
72 Plugins map[string]Plugin
73
74 doneCtx context.Context
75 broker *GRPCBroker
76}
77
78// ClientProtocol impl.
79func (c *GRPCClient) Close() error {
80 c.broker.Close()
81 return c.Conn.Close()
82}
83
84// ClientProtocol impl.
85func (c *GRPCClient) Dispense(name string) (interface{}, error) {
86 raw, ok := c.Plugins[name]
87 if !ok {
88 return nil, fmt.Errorf("unknown plugin type: %s", name)
89 }
90
91 p, ok := raw.(GRPCPlugin)
92 if !ok {
93 return nil, fmt.Errorf("plugin %q doesn't support gRPC", name)
94 }
95
96 return p.GRPCClient(c.doneCtx, c.broker, c.Conn)
97}
98
99// ClientProtocol impl.
100func (c *GRPCClient) Ping() error {
101 client := grpc_health_v1.NewHealthClient(c.Conn)
102 _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{
103 Service: GRPCServiceName,
104 })
105
106 return err
107}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go
new file mode 100644
index 0000000..3a72739
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go
@@ -0,0 +1,132 @@
1package plugin
2
3import (
4 "bytes"
5 "crypto/tls"
6 "encoding/json"
7 "fmt"
8 "io"
9 "net"
10
11 "google.golang.org/grpc"
12 "google.golang.org/grpc/credentials"
13 "google.golang.org/grpc/health"
14 "google.golang.org/grpc/health/grpc_health_v1"
15)
16
17// GRPCServiceName is the name of the service that the health check should
18// return as passing.
19const GRPCServiceName = "plugin"
20
21// DefaultGRPCServer can be used with the "GRPCServer" field for Server
22// as a default factory method to create a gRPC server with no extra options.
23func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server {
24 return grpc.NewServer(opts...)
25}
26
27// GRPCServer is a ServerType implementation that serves plugins over
28// gRPC. This allows plugins to easily be written for other languages.
29//
30// The GRPCServer outputs a custom configuration as a base64-encoded
31// JSON structure represented by the GRPCServerConfig config structure.
32type GRPCServer struct {
33 // Plugins are the list of plugins to serve.
34 Plugins map[string]Plugin
35
36 // Server is the actual server that will accept connections. This
37 // will be used for plugin registration as well.
38 Server func([]grpc.ServerOption) *grpc.Server
39
40 // TLS should be the TLS configuration if available. If this is nil,
41 // the connection will not have transport security.
42 TLS *tls.Config
43
44 // DoneCh is the channel that is closed when this server has exited.
45 DoneCh chan struct{}
46
47 // Stdout/StderrLis are the readers for stdout/stderr that will be copied
48 // to the stdout/stderr connection that is output.
49 Stdout io.Reader
50 Stderr io.Reader
51
52 config GRPCServerConfig
53 server *grpc.Server
54 broker *GRPCBroker
55}
56
57// ServerProtocol impl.
58func (s *GRPCServer) Init() error {
59 // Create our server
60 var opts []grpc.ServerOption
61 if s.TLS != nil {
62 opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS)))
63 }
64 s.server = s.Server(opts)
65
66 // Register the health service
67 healthCheck := health.NewServer()
68 healthCheck.SetServingStatus(
69 GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING)
70 grpc_health_v1.RegisterHealthServer(s.server, healthCheck)
71
72 // Register the broker service
73 brokerServer := newGRPCBrokerServer()
74 RegisterGRPCBrokerServer(s.server, brokerServer)
75 s.broker = newGRPCBroker(brokerServer, s.TLS)
76 go s.broker.Run()
77
78 // Register all our plugins onto the gRPC server.
79 for k, raw := range s.Plugins {
80 p, ok := raw.(GRPCPlugin)
81 if !ok {
82 return fmt.Errorf("%q is not a GRPC-compatible plugin", k)
83 }
84
85 if err := p.GRPCServer(s.broker, s.server); err != nil {
86 return fmt.Errorf("error registring %q: %s", k, err)
87 }
88 }
89
90 return nil
91}
92
93// Stop calls Stop on the underlying grpc.Server
94func (s *GRPCServer) Stop() {
95 s.server.Stop()
96}
97
98// GracefulStop calls GracefulStop on the underlying grpc.Server
99func (s *GRPCServer) GracefulStop() {
100 s.server.GracefulStop()
101}
102
103// Config is the GRPCServerConfig encoded as JSON then base64.
104func (s *GRPCServer) Config() string {
105 // Create a buffer that will contain our final contents
106 var buf bytes.Buffer
107
108 // Wrap the base64 encoding with JSON encoding.
109 if err := json.NewEncoder(&buf).Encode(s.config); err != nil {
110 // We panic since ths shouldn't happen under any scenario. We
111 // carefully control the structure being encoded here and it should
112 // always be successful.
113 panic(err)
114 }
115
116 return buf.String()
117}
118
119func (s *GRPCServer) Serve(lis net.Listener) {
120 // Start serving in a goroutine
121 go s.server.Serve(lis)
122
123 // Wait until graceful completion
124 <-s.DoneCh
125}
126
127// GRPCServerConfig is the extra configuration passed along for consumers
128// to facilitate using GRPC plugins.
129type GRPCServerConfig struct {
130 StdoutAddr string `json:"stdout_addr"`
131 StderrAddr string `json:"stderr_addr"`
132}
diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go
new file mode 100644
index 0000000..2996c14
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go
@@ -0,0 +1,73 @@
1package plugin
2
3import (
4 "encoding/json"
5 "time"
6)
7
8// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host
9type logEntry struct {
10 Message string `json:"@message"`
11 Level string `json:"@level"`
12 Timestamp time.Time `json:"timestamp"`
13 KVPairs []*logEntryKV `json:"kv_pairs"`
14}
15
16// logEntryKV is a key value pair within the Output payload
17type logEntryKV struct {
18 Key string `json:"key"`
19 Value interface{} `json:"value"`
20}
21
22// flattenKVPairs is used to flatten KVPair slice into []interface{}
23// for hclog consumption.
24func flattenKVPairs(kvs []*logEntryKV) []interface{} {
25 var result []interface{}
26 for _, kv := range kvs {
27 result = append(result, kv.Key)
28 result = append(result, kv.Value)
29 }
30
31 return result
32}
33
34// parseJSON handles parsing JSON output
35func parseJSON(input string) (*logEntry, error) {
36 var raw map[string]interface{}
37 entry := &logEntry{}
38
39 err := json.Unmarshal([]byte(input), &raw)
40 if err != nil {
41 return nil, err
42 }
43
44 // Parse hclog-specific objects
45 if v, ok := raw["@message"]; ok {
46 entry.Message = v.(string)
47 delete(raw, "@message")
48 }
49
50 if v, ok := raw["@level"]; ok {
51 entry.Level = v.(string)
52 delete(raw, "@level")
53 }
54
55 if v, ok := raw["@timestamp"]; ok {
56 t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string))
57 if err != nil {
58 return nil, err
59 }
60 entry.Timestamp = t
61 delete(raw, "@timestamp")
62 }
63
64 // Parse dynamic KV args from the hclog payload.
65 for k, v := range raw {
66 entry.KVPairs = append(entry.KVPairs, &logEntryKV{
67 Key: k,
68 Value: v,
69 })
70 }
71
72 return entry, nil
73}
diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go
index 37c8fd6..79d9674 100644
--- a/vendor/github.com/hashicorp/go-plugin/plugin.go
+++ b/vendor/github.com/hashicorp/go-plugin/plugin.go
@@ -9,7 +9,11 @@
9package plugin 9package plugin
10 10
11import ( 11import (
12 "context"
13 "errors"
12 "net/rpc" 14 "net/rpc"
15
16 "google.golang.org/grpc"
13) 17)
14 18
15// Plugin is the interface that is implemented to serve/connect to an 19// Plugin is the interface that is implemented to serve/connect to an
@@ -23,3 +27,32 @@ type Plugin interface {
23 // serving that communicates to the server end of the plugin. 27 // serving that communicates to the server end of the plugin.
24 Client(*MuxBroker, *rpc.Client) (interface{}, error) 28 Client(*MuxBroker, *rpc.Client) (interface{}, error)
25} 29}
30
31// GRPCPlugin is the interface that is implemented to serve/connect to
32// a plugin over gRPC.
33type GRPCPlugin interface {
34 // GRPCServer should register this plugin for serving with the
35 // given GRPCServer. Unlike Plugin.Server, this is only called once
36 // since gRPC plugins serve singletons.
37 GRPCServer(*GRPCBroker, *grpc.Server) error
38
39 // GRPCClient should return the interface implementation for the plugin
40 // you're serving via gRPC. The provided context will be canceled by
41 // go-plugin in the event of the plugin process exiting.
42 GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error)
43}
44
45// NetRPCUnsupportedPlugin implements Plugin but returns errors for the
46// Server and Client functions. This will effectively disable support for
47// net/rpc based plugins.
48//
49// This struct can be embedded in your struct.
50type NetRPCUnsupportedPlugin struct{}
51
52func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) {
53 return nil, errors.New("net/rpc plugin protocol not supported")
54}
55
56func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) {
57 return nil, errors.New("net/rpc plugin protocol not supported")
58}
diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go
new file mode 100644
index 0000000..0cfc19e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/protocol.go
@@ -0,0 +1,45 @@
1package plugin
2
3import (
4 "io"
5 "net"
6)
7
8// Protocol is an enum representing the types of protocols.
9type Protocol string
10
11const (
12 ProtocolInvalid Protocol = ""
13 ProtocolNetRPC Protocol = "netrpc"
14 ProtocolGRPC Protocol = "grpc"
15)
16
17// ServerProtocol is an interface that must be implemented for new plugin
18// protocols to be servers.
19type ServerProtocol interface {
20 // Init is called once to configure and initialize the protocol, but
21 // not start listening. This is the point at which all validation should
22 // be done and errors returned.
23 Init() error
24
25 // Config is extra configuration to be outputted to stdout. This will
26 // be automatically base64 encoded to ensure it can be parsed properly.
27 // This can be an empty string if additional configuration is not needed.
28 Config() string
29
30 // Serve is called to serve connections on the given listener. This should
31 // continue until the listener is closed.
32 Serve(net.Listener)
33}
34
35// ClientProtocol is an interface that must be implemented for new plugin
36// protocols to be clients.
37type ClientProtocol interface {
38 io.Closer
39
40 // Dispense dispenses a new instance of the plugin with the given name.
41 Dispense(string) (interface{}, error)
42
43 // Ping checks that the client connection is still healthy.
44 Ping() error
45}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
index 29f9bf0..f30a4b1 100644
--- a/vendor/github.com/hashicorp/go-plugin/rpc_client.go
+++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
@@ -1,6 +1,7 @@
1package plugin 1package plugin
2 2
3import ( 3import (
4 "crypto/tls"
4 "fmt" 5 "fmt"
5 "io" 6 "io"
6 "net" 7 "net"
@@ -19,6 +20,42 @@ type RPCClient struct {
19 stdout, stderr net.Conn 20 stdout, stderr net.Conn
20} 21}
21 22
23// newRPCClient creates a new RPCClient. The Client argument is expected
24// to be successfully started already with a lock held.
25func newRPCClient(c *Client) (*RPCClient, error) {
26 // Connect to the client
27 conn, err := net.Dial(c.address.Network(), c.address.String())
28 if err != nil {
29 return nil, err
30 }
31 if tcpConn, ok := conn.(*net.TCPConn); ok {
32 // Make sure to set keep alive so that the connection doesn't die
33 tcpConn.SetKeepAlive(true)
34 }
35
36 if c.config.TLSConfig != nil {
37 conn = tls.Client(conn, c.config.TLSConfig)
38 }
39
40 // Create the actual RPC client
41 result, err := NewRPCClient(conn, c.config.Plugins)
42 if err != nil {
43 conn.Close()
44 return nil, err
45 }
46
47 // Begin the stream syncing so that stdin, out, err work properly
48 err = result.SyncStreams(
49 c.config.SyncStdout,
50 c.config.SyncStderr)
51 if err != nil {
52 result.Close()
53 return nil, err
54 }
55
56 return result, nil
57}
58
22// NewRPCClient creates a client from an already-open connection-like value. 59// NewRPCClient creates a client from an already-open connection-like value.
23// Dial is typically used instead. 60// Dial is typically used instead.
24func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { 61func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) {
@@ -121,3 +158,13 @@ func (c *RPCClient) Dispense(name string) (interface{}, error) {
121 158
122 return p.Client(c.broker, rpc.NewClient(conn)) 159 return p.Client(c.broker, rpc.NewClient(conn))
123} 160}
161
162// Ping pings the connection to ensure it is still alive.
163//
164// The error from the RPC call is returned exactly if you want to inspect
165// it for further error analysis. Any error returned from here would indicate
166// that the connection to the plugin is not healthy.
167func (c *RPCClient) Ping() error {
168 var empty struct{}
169 return c.control.Call("Control.Ping", true, &empty)
170}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
index 3984dc8..5bb18dd 100644
--- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go
+++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
@@ -34,10 +34,14 @@ type RPCServer struct {
34 lock sync.Mutex 34 lock sync.Mutex
35} 35}
36 36
37// Accept accepts connections on a listener and serves requests for 37// ServerProtocol impl.
38// each incoming connection. Accept blocks; the caller typically invokes 38func (s *RPCServer) Init() error { return nil }
39// it in a go statement. 39
40func (s *RPCServer) Accept(lis net.Listener) { 40// ServerProtocol impl.
41func (s *RPCServer) Config() string { return "" }
42
43// ServerProtocol impl.
44func (s *RPCServer) Serve(lis net.Listener) {
41 for { 45 for {
42 conn, err := lis.Accept() 46 conn, err := lis.Accept()
43 if err != nil { 47 if err != nil {
@@ -122,6 +126,14 @@ type controlServer struct {
122 server *RPCServer 126 server *RPCServer
123} 127}
124 128
129// Ping can be called to verify the connection (and likely the binary)
130// is still alive to a plugin.
131func (c *controlServer) Ping(
132 null bool, response *struct{}) error {
133 *response = struct{}{}
134 return nil
135}
136
125func (c *controlServer) Quit( 137func (c *controlServer) Quit(
126 null bool, response *struct{}) error { 138 null bool, response *struct{}) error {
127 // End the server 139 // End the server
diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go
index b5c5270..1e808b9 100644
--- a/vendor/github.com/hashicorp/go-plugin/server.go
+++ b/vendor/github.com/hashicorp/go-plugin/server.go
@@ -1,6 +1,8 @@
1package plugin 1package plugin
2 2
3import ( 3import (
4 "crypto/tls"
5 "encoding/base64"
4 "errors" 6 "errors"
5 "fmt" 7 "fmt"
6 "io/ioutil" 8 "io/ioutil"
@@ -11,6 +13,10 @@ import (
11 "runtime" 13 "runtime"
12 "strconv" 14 "strconv"
13 "sync/atomic" 15 "sync/atomic"
16
17 "github.com/hashicorp/go-hclog"
18
19 "google.golang.org/grpc"
14) 20)
15 21
16// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. 22// CoreProtocolVersion is the ProtocolVersion of the plugin system itself.
@@ -45,14 +51,41 @@ type ServeConfig struct {
45 // HandshakeConfig is the configuration that must match clients. 51 // HandshakeConfig is the configuration that must match clients.
46 HandshakeConfig 52 HandshakeConfig
47 53
54 // TLSProvider is a function that returns a configured tls.Config.
55 TLSProvider func() (*tls.Config, error)
56
48 // Plugins are the plugins that are served. 57 // Plugins are the plugins that are served.
49 Plugins map[string]Plugin 58 Plugins map[string]Plugin
59
60 // GRPCServer should be non-nil to enable serving the plugins over
61 // gRPC. This is a function to create the server when needed with the
62 // given server options. The server options populated by go-plugin will
63 // be for TLS if set. You may modify the input slice.
64 //
65 // Note that the grpc.Server will automatically be registered with
66 // the gRPC health checking service. This is not optional since go-plugin
67 // relies on this to implement Ping().
68 GRPCServer func([]grpc.ServerOption) *grpc.Server
69
70 // Logger is used to pass a logger into the server. If none is provided the
71 // server will create a default logger.
72 Logger hclog.Logger
73}
74
75// Protocol returns the protocol that this server should speak.
76func (c *ServeConfig) Protocol() Protocol {
77 result := ProtocolNetRPC
78 if c.GRPCServer != nil {
79 result = ProtocolGRPC
80 }
81
82 return result
50} 83}
51 84
52// Serve serves the plugins given by ServeConfig. 85// Serve serves the plugins given by ServeConfig.
53// 86//
54// Serve doesn't return until the plugin is done being executed. Any 87// Serve doesn't return until the plugin is done being executed. Any
55// errors will be outputted to the log. 88// errors will be outputted to os.Stderr.
56// 89//
57// This is the method that plugins should call in their main() functions. 90// This is the method that plugins should call in their main() functions.
58func Serve(opts *ServeConfig) { 91func Serve(opts *ServeConfig) {
@@ -77,6 +110,16 @@ func Serve(opts *ServeConfig) {
77 // Logging goes to the original stderr 110 // Logging goes to the original stderr
78 log.SetOutput(os.Stderr) 111 log.SetOutput(os.Stderr)
79 112
113 logger := opts.Logger
114 if logger == nil {
115 // internal logger to os.Stderr
116 logger = hclog.New(&hclog.LoggerOptions{
117 Level: hclog.Trace,
118 Output: os.Stderr,
119 JSONFormat: true,
120 })
121 }
122
80 // Create our new stdout, stderr files. These will override our built-in 123 // Create our new stdout, stderr files. These will override our built-in
81 // stdout/stderr so that it works across the stream boundary. 124 // stdout/stderr so that it works across the stream boundary.
82 stdout_r, stdout_w, err := os.Pipe() 125 stdout_r, stdout_w, err := os.Pipe()
@@ -93,30 +136,86 @@ func Serve(opts *ServeConfig) {
93 // Register a listener so we can accept a connection 136 // Register a listener so we can accept a connection
94 listener, err := serverListener() 137 listener, err := serverListener()
95 if err != nil { 138 if err != nil {
96 log.Printf("[ERR] plugin: plugin init: %s", err) 139 logger.Error("plugin init error", "error", err)
97 return 140 return
98 } 141 }
99 defer listener.Close() 142
143 // Close the listener on return. We wrap this in a func() on purpose
144 // because the "listener" reference may change to TLS.
145 defer func() {
146 listener.Close()
147 }()
148
149 var tlsConfig *tls.Config
150 if opts.TLSProvider != nil {
151 tlsConfig, err = opts.TLSProvider()
152 if err != nil {
153 logger.Error("plugin tls init", "error", err)
154 return
155 }
156 }
100 157
101 // Create the channel to tell us when we're done 158 // Create the channel to tell us when we're done
102 doneCh := make(chan struct{}) 159 doneCh := make(chan struct{})
103 160
104 // Create the RPC server to dispense 161 // Build the server type
105 server := &RPCServer{ 162 var server ServerProtocol
106 Plugins: opts.Plugins, 163 switch opts.Protocol() {
107 Stdout: stdout_r, 164 case ProtocolNetRPC:
108 Stderr: stderr_r, 165 // If we have a TLS configuration then we wrap the listener
109 DoneCh: doneCh, 166 // ourselves and do it at that level.
167 if tlsConfig != nil {
168 listener = tls.NewListener(listener, tlsConfig)
169 }
170
171 // Create the RPC server to dispense
172 server = &RPCServer{
173 Plugins: opts.Plugins,
174 Stdout: stdout_r,
175 Stderr: stderr_r,
176 DoneCh: doneCh,
177 }
178
179 case ProtocolGRPC:
180 // Create the gRPC server
181 server = &GRPCServer{
182 Plugins: opts.Plugins,
183 Server: opts.GRPCServer,
184 TLS: tlsConfig,
185 Stdout: stdout_r,
186 Stderr: stderr_r,
187 DoneCh: doneCh,
188 }
189
190 default:
191 panic("unknown server protocol: " + opts.Protocol())
110 } 192 }
111 193
194 // Initialize the servers
195 if err := server.Init(); err != nil {
196 logger.Error("protocol init", "error", err)
197 return
198 }
199
200 // Build the extra configuration
201 extra := ""
202 if v := server.Config(); v != "" {
203 extra = base64.StdEncoding.EncodeToString([]byte(v))
204 }
205 if extra != "" {
206 extra = "|" + extra
207 }
208
209 logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String())
210
112 // Output the address and service name to stdout so that core can bring it up. 211 // Output the address and service name to stdout so that core can bring it up.
113 log.Printf("[DEBUG] plugin: plugin address: %s %s\n", 212 fmt.Printf("%d|%d|%s|%s|%s%s\n",
114 listener.Addr().Network(), listener.Addr().String())
115 fmt.Printf("%d|%d|%s|%s\n",
116 CoreProtocolVersion, 213 CoreProtocolVersion,
117 opts.ProtocolVersion, 214 opts.ProtocolVersion,
118 listener.Addr().Network(), 215 listener.Addr().Network(),
119 listener.Addr().String()) 216 listener.Addr().String(),
217 opts.Protocol(),
218 extra)
120 os.Stdout.Sync() 219 os.Stdout.Sync()
121 220
122 // Eat the interrupts 221 // Eat the interrupts
@@ -127,9 +226,7 @@ func Serve(opts *ServeConfig) {
127 for { 226 for {
128 <-ch 227 <-ch
129 newCount := atomic.AddInt32(&count, 1) 228 newCount := atomic.AddInt32(&count, 1)
130 log.Printf( 229 logger.Debug("plugin received interrupt signal, ignoring", "count", newCount)
131 "[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.",
132 newCount)
133 } 230 }
134 }() 231 }()
135 232
@@ -137,10 +234,8 @@ func Serve(opts *ServeConfig) {
137 os.Stdout = stdout_w 234 os.Stdout = stdout_w
138 os.Stderr = stderr_w 235 os.Stderr = stderr_w
139 236
140 // Serve 237 // Accept connections and wait for completion
141 go server.Accept(listener) 238 go server.Serve(listener)
142
143 // Wait for the graceful exit
144 <-doneCh 239 <-doneCh
145} 240}
146 241
diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go
index 9086a1b..df29593 100644
--- a/vendor/github.com/hashicorp/go-plugin/testing.go
+++ b/vendor/github.com/hashicorp/go-plugin/testing.go
@@ -2,9 +2,12 @@ package plugin
2 2
3import ( 3import (
4 "bytes" 4 "bytes"
5 "context"
5 "net" 6 "net"
6 "net/rpc" 7 "net/rpc"
7 "testing" 8
9 "github.com/mitchellh/go-testing-interface"
10 "google.golang.org/grpc"
8) 11)
9 12
10// The testing file contains test helpers that you can use outside of 13// The testing file contains test helpers that you can use outside of
@@ -12,7 +15,7 @@ import (
12 15
13// TestConn is a helper function for returning a client and server 16// TestConn is a helper function for returning a client and server
14// net.Conn connected to each other. 17// net.Conn connected to each other.
15func TestConn(t *testing.T) (net.Conn, net.Conn) { 18func TestConn(t testing.T) (net.Conn, net.Conn) {
16 // Listen to any local port. This listener will be closed 19 // Listen to any local port. This listener will be closed
17 // after a single connection is established. 20 // after a single connection is established.
18 l, err := net.Listen("tcp", "127.0.0.1:0") 21 l, err := net.Listen("tcp", "127.0.0.1:0")
@@ -46,7 +49,7 @@ func TestConn(t *testing.T) (net.Conn, net.Conn) {
46} 49}
47 50
48// TestRPCConn returns a rpc client and server connected to each other. 51// TestRPCConn returns a rpc client and server connected to each other.
49func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) { 52func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) {
50 clientConn, serverConn := TestConn(t) 53 clientConn, serverConn := TestConn(t)
51 54
52 server := rpc.NewServer() 55 server := rpc.NewServer()
@@ -58,7 +61,7 @@ func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) {
58 61
59// TestPluginRPCConn returns a plugin RPC client and server that are connected 62// TestPluginRPCConn returns a plugin RPC client and server that are connected
60// together and configured. 63// together and configured.
61func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { 64func TestPluginRPCConn(t testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) {
62 // Create two net.Conns we can use to shuttle our control connection 65 // Create two net.Conns we can use to shuttle our control connection
63 clientConn, serverConn := TestConn(t) 66 clientConn, serverConn := TestConn(t)
64 67
@@ -74,3 +77,78 @@ func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServ
74 77
75 return client, server 78 return client, server
76} 79}
80
81// TestGRPCConn returns a gRPC client conn and grpc server that are connected
82// together and configured. The register function is used to register services
83// prior to the Serve call. This is used to test gRPC connections.
84func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) {
85 // Create a listener
86 l, err := net.Listen("tcp", "127.0.0.1:0")
87 if err != nil {
88 t.Fatalf("err: %s", err)
89 }
90
91 server := grpc.NewServer()
92 register(server)
93 go server.Serve(l)
94
95 // Connect to the server
96 conn, err := grpc.Dial(
97 l.Addr().String(),
98 grpc.WithBlock(),
99 grpc.WithInsecure())
100 if err != nil {
101 t.Fatalf("err: %s", err)
102 }
103
104 // Connection successful, close the listener
105 l.Close()
106
107 return conn, server
108}
109
110// TestPluginGRPCConn returns a plugin gRPC client and server that are connected
111// together and configured. This is used to test gRPC connections.
112func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) {
113 // Create a listener
114 l, err := net.Listen("tcp", "127.0.0.1:0")
115 if err != nil {
116 t.Fatalf("err: %s", err)
117 }
118
119 // Start up the server
120 server := &GRPCServer{
121 Plugins: ps,
122 Server: DefaultGRPCServer,
123 Stdout: new(bytes.Buffer),
124 Stderr: new(bytes.Buffer),
125 }
126 if err := server.Init(); err != nil {
127 t.Fatalf("err: %s", err)
128 }
129 go server.Serve(l)
130
131 // Connect to the server
132 conn, err := grpc.Dial(
133 l.Addr().String(),
134 grpc.WithBlock(),
135 grpc.WithInsecure())
136 if err != nil {
137 t.Fatalf("err: %s", err)
138 }
139
140 brokerGRPCClient := newGRPCBrokerClient(conn)
141 broker := newGRPCBroker(brokerGRPCClient, nil)
142 go broker.Run()
143 go brokerGRPCClient.StartStream()
144
145 // Create the client
146 client := &GRPCClient{
147 Conn: conn,
148 Plugins: ps,
149 broker: broker,
150 doneCtx: context.Background(),
151 }
152
153 return client, server
154}
diff --git a/vendor/github.com/hashicorp/go-safetemp/LICENSE b/vendor/github.com/hashicorp/go-safetemp/LICENSE
new file mode 100644
index 0000000..be2cc4d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-safetemp/LICENSE
@@ -0,0 +1,362 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. "Contributor"
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. "Contributor Version"
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor's Contribution.
14
151.3. "Contribution"
16
17 means Covered Software of a particular Contributor.
18
191.4. "Covered Software"
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. "Incompatible With Secondary Licenses"
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of
33 version 1.1 or earlier of the License, but not also under the terms of
34 a Secondary License.
35
361.6. "Executable Form"
37
38 means any form of the work other than Source Code Form.
39
401.7. "Larger Work"
41
42 means a work that combines Covered Software with other material, in a
43 separate file or files, that is not Covered Software.
44
451.8. "License"
46
47 means this document.
48
491.9. "Licensable"
50
51 means having the right to grant, to the maximum extent possible, whether
52 at the time of the initial grant or subsequently, any and all of the
53 rights conveyed by this License.
54
551.10. "Modifications"
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to,
60 deletion from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. "Patent Claims" of a Contributor
65
66 means any patent claim(s), including without limitation, method,
67 process, and apparatus claims, in any patent Licensable by such
68 Contributor that would be infringed, but for the grant of the License,
69 by the making, using, selling, offering for sale, having made, import,
70 or transfer of either its Contributions or its Contributor Version.
71
721.12. "Secondary License"
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. "Source Code Form"
79
80 means the form of the work preferred for making modifications.
81
821.14. "You" (or "Your")
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, "You" includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, "control" means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or
104 as part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its
108 Contributions or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution
113 become effective for each Contribution on the date the Contributor first
114 distributes such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under
119 this License. No additional rights or licenses will be implied from the
120 distribution or licensing of Covered Software under this License.
121 Notwithstanding Section 2.1(b) above, no patent license is granted by a
122 Contributor:
123
124 a. for any code that a Contributor has removed from Covered Software; or
125
126 b. for infringements caused by: (i) Your and any other third party's
127 modifications of Covered Software, or (ii) the combination of its
128 Contributions with other software (except as part of its Contributor
129 Version); or
130
131 c. under Patent Claims infringed by Covered Software in the absence of
132 its Contributions.
133
134 This License does not grant any rights in the trademarks, service marks,
135 or logos of any Contributor (except as may be necessary to comply with
136 the notice requirements in Section 3.4).
137
1382.4. Subsequent Licenses
139
140 No Contributor makes additional grants as a result of Your choice to
141 distribute the Covered Software under a subsequent version of this
142 License (see Section 10.2) or under the terms of a Secondary License (if
143 permitted under the terms of Section 3.3).
144
1452.5. Representation
146
147 Each Contributor represents that the Contributor believes its
148 Contributions are its original creation(s) or it has sufficient rights to
149 grant the rights to its Contributions conveyed by this License.
150
1512.6. Fair Use
152
153 This License is not intended to limit any rights You have under
154 applicable copyright doctrines of fair use, fair dealing, or other
155 equivalents.
156
1572.7. Conditions
158
159 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
160 Section 2.1.
161
162
1633. Responsibilities
164
1653.1. Distribution of Source Form
166
167 All distribution of Covered Software in Source Code Form, including any
168 Modifications that You create or to which You contribute, must be under
169 the terms of this License. You must inform recipients that the Source
170 Code Form of the Covered Software is governed by the terms of this
171 License, and how they can obtain a copy of this License. You may not
172 attempt to alter or restrict the recipients' rights in the Source Code
173 Form.
174
1753.2. Distribution of Executable Form
176
177 If You distribute Covered Software in Executable Form then:
178
179 a. such Covered Software must also be made available in Source Code Form,
180 as described in Section 3.1, and You must inform recipients of the
181 Executable Form how they can obtain a copy of such Source Code Form by
182 reasonable means in a timely manner, at a charge no more than the cost
183 of distribution to the recipient; and
184
185 b. You may distribute such Executable Form under the terms of this
186 License, or sublicense it under different terms, provided that the
187 license for the Executable Form does not attempt to limit or alter the
188 recipients' rights in the Source Code Form under this License.
189
1903.3. Distribution of a Larger Work
191
192 You may create and distribute a Larger Work under terms of Your choice,
193 provided that You also comply with the requirements of this License for
194 the Covered Software. If the Larger Work is a combination of Covered
195 Software with a work governed by one or more Secondary Licenses, and the
196 Covered Software is not Incompatible With Secondary Licenses, this
197 License permits You to additionally distribute such Covered Software
198 under the terms of such Secondary License(s), so that the recipient of
199 the Larger Work may, at their option, further distribute the Covered
200 Software under the terms of either this License or such Secondary
201 License(s).
202
2033.4. Notices
204
205 You may not remove or alter the substance of any license notices
206 (including copyright notices, patent notices, disclaimers of warranty, or
207 limitations of liability) contained within the Source Code Form of the
208 Covered Software, except that You may alter any license notices to the
209 extent required to remedy known factual inaccuracies.
210
2113.5. Application of Additional Terms
212
213 You may choose to offer, and to charge a fee for, warranty, support,
214 indemnity or liability obligations to one or more recipients of Covered
215 Software. However, You may do so only on Your own behalf, and not on
216 behalf of any Contributor. You must make it absolutely clear that any
217 such warranty, support, indemnity, or liability obligation is offered by
218 You alone, and You hereby agree to indemnify every Contributor for any
219 liability incurred by such Contributor as a result of warranty, support,
220 indemnity or liability terms You offer. You may include additional
221 disclaimers of warranty and limitations of liability specific to any
222 jurisdiction.
223
2244. Inability to Comply Due to Statute or Regulation
225
226 If it is impossible for You to comply with any of the terms of this License
227 with respect to some or all of the Covered Software due to statute,
228 judicial order, or regulation then You must: (a) comply with the terms of
229 this License to the maximum extent possible; and (b) describe the
230 limitations and the code they affect. Such description must be placed in a
231 text file included with all distributions of the Covered Software under
232 this License. Except to the extent prohibited by statute or regulation,
233 such description must be sufficiently detailed for a recipient of ordinary
234 skill to be able to understand it.
235
2365. Termination
237
2385.1. The rights granted under this License will terminate automatically if You
239 fail to comply with any of its terms. However, if You become compliant,
240 then the rights granted under this License from a particular Contributor
241 are reinstated (a) provisionally, unless and until such Contributor
242 explicitly and finally terminates Your grants, and (b) on an ongoing
243 basis, if such Contributor fails to notify You of the non-compliance by
244 some reasonable means prior to 60 days after You have come back into
245 compliance. Moreover, Your grants from a particular Contributor are
246 reinstated on an ongoing basis if such Contributor notifies You of the
247 non-compliance by some reasonable means, this is the first time You have
248 received notice of non-compliance with this License from such
249 Contributor, and You become compliant prior to 30 days after Your receipt
250 of the notice.
251
2525.2. If You initiate litigation against any entity by asserting a patent
253 infringement claim (excluding declaratory judgment actions,
254 counter-claims, and cross-claims) alleging that a Contributor Version
255 directly or indirectly infringes any patent, then the rights granted to
256 You by any and all Contributors for the Covered Software under Section
257 2.1 of this License shall terminate.
258
2595.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
260 license agreements (excluding distributors and resellers) which have been
261 validly granted by You or Your distributors under this License prior to
262 termination shall survive termination.
263
2646. Disclaimer of Warranty
265
266 Covered Software is provided under this License on an "as is" basis,
267 without warranty of any kind, either expressed, implied, or statutory,
268 including, without limitation, warranties that the Covered Software is free
269 of defects, merchantable, fit for a particular purpose or non-infringing.
270 The entire risk as to the quality and performance of the Covered Software
271 is with You. Should any Covered Software prove defective in any respect,
272 You (not any Contributor) assume the cost of any necessary servicing,
273 repair, or correction. This disclaimer of warranty constitutes an essential
274 part of this License. No use of any Covered Software is authorized under
275 this License except under this disclaimer.
276
2777. Limitation of Liability
278
279 Under no circumstances and under no legal theory, whether tort (including
280 negligence), contract, or otherwise, shall any Contributor, or anyone who
281 distributes Covered Software as permitted above, be liable to You for any
282 direct, indirect, special, incidental, or consequential damages of any
283 character including, without limitation, damages for lost profits, loss of
284 goodwill, work stoppage, computer failure or malfunction, or any and all
285 other commercial damages or losses, even if such party shall have been
286 informed of the possibility of such damages. This limitation of liability
287 shall not apply to liability for death or personal injury resulting from
288 such party's negligence to the extent applicable law prohibits such
289 limitation. Some jurisdictions do not allow the exclusion or limitation of
290 incidental or consequential damages, so this exclusion and limitation may
291 not apply to You.
292
2938. Litigation
294
295 Any litigation relating to this License may be brought only in the courts
296 of a jurisdiction where the defendant maintains its principal place of
297 business and such litigation shall be governed by laws of that
298 jurisdiction, without reference to its conflict-of-law provisions. Nothing
299 in this Section shall prevent a party's ability to bring cross-claims or
300 counter-claims.
301
3029. Miscellaneous
303
304 This License represents the complete agreement concerning the subject
305 matter hereof. If any provision of this License is held to be
306 unenforceable, such provision shall be reformed only to the extent
307 necessary to make it enforceable. Any law or regulation which provides that
308 the language of a contract shall be construed against the drafter shall not
309 be used to construe this License against a Contributor.
310
311
31210. Versions of the License
313
31410.1. New Versions
315
316 Mozilla Foundation is the license steward. Except as provided in Section
317 10.3, no one other than the license steward has the right to modify or
318 publish new versions of this License. Each version will be given a
319 distinguishing version number.
320
32110.2. Effect of New Versions
322
323 You may distribute the Covered Software under the terms of the version
324 of the License under which You originally received the Covered Software,
325 or under the terms of any subsequent version published by the license
326 steward.
327
32810.3. Modified Versions
329
330 If you create software not governed by this License, and you want to
331 create a new license for such software, you may create and use a
332 modified version of this License if you rename the license and remove
333 any references to the name of the license steward (except to note that
334 such modified license differs from this License).
335
33610.4. Distributing Source Code Form that is Incompatible With Secondary
337 Licenses If You choose to distribute Source Code Form that is
338 Incompatible With Secondary Licenses under the terms of this version of
339 the License, the notice described in Exhibit B of this License must be
340 attached.
341
342Exhibit A - Source Code Form License Notice
343
344 This Source Code Form is subject to the
345 terms of the Mozilla Public License, v.
346 2.0. If a copy of the MPL was not
347 distributed with this file, You can
348 obtain one at
349 http://mozilla.org/MPL/2.0/.
350
351If it is not possible or desirable to put the notice in a particular file,
352then You may include the notice in a location (such as a LICENSE file in a
353relevant directory) where a recipient would be likely to look for such a
354notice.
355
356You may add additional accurate notices of copyright ownership.
357
358Exhibit B - "Incompatible With Secondary Licenses" Notice
359
360 This Source Code Form is "Incompatible
361 With Secondary Licenses", as defined by
362 the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-safetemp/README.md b/vendor/github.com/hashicorp/go-safetemp/README.md
new file mode 100644
index 0000000..02ece33
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-safetemp/README.md
@@ -0,0 +1,10 @@
1# go-safetemp
2[![Godoc](https://godoc.org/github.com/hashcorp/go-safetemp?status.svg)](https://godoc.org/github.com/hashicorp/go-safetemp)
3
4Functions for safely working with temporary directories and files.
5
6## Why?
7
8The Go standard library provides the excellent `ioutil` package for
9working with temporary directories and files. This library builds on top
10of that to provide safe abstractions above that.
diff --git a/vendor/github.com/hashicorp/go-safetemp/safetemp.go b/vendor/github.com/hashicorp/go-safetemp/safetemp.go
new file mode 100644
index 0000000..c4ae72b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-safetemp/safetemp.go
@@ -0,0 +1,40 @@
1package safetemp
2
3import (
4 "io"
5 "io/ioutil"
6 "os"
7 "path/filepath"
8)
9
10// Dir creates a new temporary directory that isn't yet created. This
11// can be used with calls that expect a non-existent directory.
12//
13// The directory is created as a child of a temporary directory created
14// within the directory dir starting with prefix. The temporary directory
15// returned is always named "temp". The parent directory has the specified
16// prefix.
17//
18// The returned io.Closer should be used to clean up the returned directory.
19// This will properly remove the returned directory and any other temporary
20// files created.
21//
22// If an error is returned, the Closer does not need to be called (and will
23// be nil).
24func Dir(dir, prefix string) (string, io.Closer, error) {
25 // Create the temporary directory
26 td, err := ioutil.TempDir(dir, prefix)
27 if err != nil {
28 return "", nil, err
29 }
30
31 return filepath.Join(td, "temp"), pathCloser(td), nil
32}
33
34// pathCloser implements io.Closer to remove the given path on Close.
35type pathCloser string
36
37// Close deletes this path.
38func (p pathCloser) Close() error {
39 return os.RemoveAll(string(p))
40}
diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml
new file mode 100644
index 0000000..7698490
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml
@@ -0,0 +1,12 @@
1language: go
2
3sudo: false
4
5go:
6 - 1.4
7 - 1.5
8 - 1.6
9 - tip
10
11script:
12 - go test -bench . -benchmem -v ./...
diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md
index 21fdda4..fbde8b9 100644
--- a/vendor/github.com/hashicorp/go-uuid/README.md
+++ b/vendor/github.com/hashicorp/go-uuid/README.md
@@ -1,6 +1,6 @@
1# uuid 1# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid)
2 2
3Generates UUID-format strings using purely high quality random bytes. 3Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes.
4 4
5Documentation 5Documentation
6============= 6=============
diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod
new file mode 100644
index 0000000..dd57f9d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/go.mod
@@ -0,0 +1 @@
module github.com/hashicorp/go-uuid
diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go
index 322b522..ff9364c 100644
--- a/vendor/github.com/hashicorp/go-uuid/uuid.go
+++ b/vendor/github.com/hashicorp/go-uuid/uuid.go
@@ -6,13 +6,21 @@ import (
6 "fmt" 6 "fmt"
7) 7)
8 8
9// GenerateUUID is used to generate a random UUID 9// GenerateRandomBytes is used to generate random bytes of given size.
10func GenerateUUID() (string, error) { 10func GenerateRandomBytes(size int) ([]byte, error) {
11 buf := make([]byte, 16) 11 buf := make([]byte, size)
12 if _, err := rand.Read(buf); err != nil { 12 if _, err := rand.Read(buf); err != nil {
13 return "", fmt.Errorf("failed to read random bytes: %v", err) 13 return nil, fmt.Errorf("failed to read random bytes: %v", err)
14 } 14 }
15 return buf, nil
16}
15 17
18// GenerateUUID is used to generate a random UUID
19func GenerateUUID() (string, error) {
20 buf, err := GenerateRandomBytes(16)
21 if err != nil {
22 return "", err
23 }
16 return FormatUUID(buf) 24 return FormatUUID(buf)
17} 25}
18 26
diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml
index 9f30eec..542ca8b 100644
--- a/vendor/github.com/hashicorp/go-version/.travis.yml
+++ b/vendor/github.com/hashicorp/go-version/.travis.yml
@@ -6,6 +6,8 @@ go:
6 - 1.2 6 - 1.2
7 - 1.3 7 - 1.3
8 - 1.4 8 - 1.4
9 - 1.9
10 - "1.10"
9 11
10script: 12script:
11 - go test 13 - go test
diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go
index 8c73df0..d055759 100644
--- a/vendor/github.com/hashicorp/go-version/constraint.go
+++ b/vendor/github.com/hashicorp/go-version/constraint.go
@@ -2,6 +2,7 @@ package version
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "reflect"
5 "regexp" 6 "regexp"
6 "strings" 7 "strings"
7) 8)
@@ -113,6 +114,26 @@ func parseSingle(v string) (*Constraint, error) {
113 }, nil 114 }, nil
114} 115}
115 116
117func prereleaseCheck(v, c *Version) bool {
118 switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; {
119 case cPre && vPre:
120 // A constraint with a pre-release can only match a pre-release version
121 // with the same base segments.
122 return reflect.DeepEqual(c.Segments64(), v.Segments64())
123
124 case !cPre && vPre:
125 // A constraint without a pre-release can only match a version without a
126 // pre-release.
127 return false
128
129 case cPre && !vPre:
130 // OK, except with the pessimistic operator
131 case !cPre && !vPre:
132 // OK
133 }
134 return true
135}
136
116//------------------------------------------------------------------- 137//-------------------------------------------------------------------
117// Constraint functions 138// Constraint functions
118//------------------------------------------------------------------- 139//-------------------------------------------------------------------
@@ -126,22 +147,27 @@ func constraintNotEqual(v, c *Version) bool {
126} 147}
127 148
128func constraintGreaterThan(v, c *Version) bool { 149func constraintGreaterThan(v, c *Version) bool {
129 return v.Compare(c) == 1 150 return prereleaseCheck(v, c) && v.Compare(c) == 1
130} 151}
131 152
132func constraintLessThan(v, c *Version) bool { 153func constraintLessThan(v, c *Version) bool {
133 return v.Compare(c) == -1 154 return prereleaseCheck(v, c) && v.Compare(c) == -1
134} 155}
135 156
136func constraintGreaterThanEqual(v, c *Version) bool { 157func constraintGreaterThanEqual(v, c *Version) bool {
137 return v.Compare(c) >= 0 158 return prereleaseCheck(v, c) && v.Compare(c) >= 0
138} 159}
139 160
140func constraintLessThanEqual(v, c *Version) bool { 161func constraintLessThanEqual(v, c *Version) bool {
141 return v.Compare(c) <= 0 162 return prereleaseCheck(v, c) && v.Compare(c) <= 0
142} 163}
143 164
144func constraintPessimistic(v, c *Version) bool { 165func constraintPessimistic(v, c *Version) bool {
166 // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases
167 if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") {
168 return false
169 }
170
145 // If the version being checked is naturally less than the constraint, then there 171 // If the version being checked is naturally less than the constraint, then there
146 // is no way for the version to be valid against the constraint 172 // is no way for the version to be valid against the constraint
147 if v.LessThan(c) { 173 if v.LessThan(c) {
diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod
new file mode 100644
index 0000000..f528555
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/go.mod
@@ -0,0 +1 @@
module github.com/hashicorp/go-version
diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go
index ae2f6b6..4d1e6e2 100644
--- a/vendor/github.com/hashicorp/go-version/version.go
+++ b/vendor/github.com/hashicorp/go-version/version.go
@@ -15,8 +15,8 @@ var versionRegexp *regexp.Regexp
15// The raw regular expression string used for testing the validity 15// The raw regular expression string used for testing the validity
16// of a version. 16// of a version.
17const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + 17const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
18 `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + 18 `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
19 `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + 19 `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
20 `?` 20 `?`
21 21
22// Version represents a single version. 22// Version represents a single version.
@@ -25,6 +25,7 @@ type Version struct {
25 pre string 25 pre string
26 segments []int64 26 segments []int64
27 si int 27 si int
28 original string
28} 29}
29 30
30func init() { 31func init() {
@@ -59,11 +60,17 @@ func NewVersion(v string) (*Version, error) {
59 segments = append(segments, 0) 60 segments = append(segments, 0)
60 } 61 }
61 62
63 pre := matches[7]
64 if pre == "" {
65 pre = matches[4]
66 }
67
62 return &Version{ 68 return &Version{
63 metadata: matches[7], 69 metadata: matches[10],
64 pre: matches[4], 70 pre: pre,
65 segments: segments, 71 segments: segments,
66 si: si, 72 si: si,
73 original: v,
67 }, nil 74 }, nil
68} 75}
69 76
@@ -166,24 +173,42 @@ func comparePart(preSelf string, preOther string) int {
166 return 0 173 return 0
167 } 174 }
168 175
176 var selfInt int64
177 selfNumeric := true
178 selfInt, err := strconv.ParseInt(preSelf, 10, 64)
179 if err != nil {
180 selfNumeric = false
181 }
182
183 var otherInt int64
184 otherNumeric := true
185 otherInt, err = strconv.ParseInt(preOther, 10, 64)
186 if err != nil {
187 otherNumeric = false
188 }
189
169 // if a part is empty, we use the other to decide 190 // if a part is empty, we use the other to decide
170 if preSelf == "" { 191 if preSelf == "" {
171 _, notIsNumeric := strconv.ParseInt(preOther, 10, 64) 192 if otherNumeric {
172 if notIsNumeric == nil {
173 return -1 193 return -1
174 } 194 }
175 return 1 195 return 1
176 } 196 }
177 197
178 if preOther == "" { 198 if preOther == "" {
179 _, notIsNumeric := strconv.ParseInt(preSelf, 10, 64) 199 if selfNumeric {
180 if notIsNumeric == nil {
181 return 1 200 return 1
182 } 201 }
183 return -1 202 return -1
184 } 203 }
185 204
186 if preSelf > preOther { 205 if selfNumeric && !otherNumeric {
206 return -1
207 } else if !selfNumeric && otherNumeric {
208 return 1
209 } else if !selfNumeric && !otherNumeric && preSelf > preOther {
210 return 1
211 } else if selfInt > otherInt {
187 return 1 212 return 1
188 } 213 }
189 214
@@ -283,11 +308,19 @@ func (v *Version) Segments() []int {
283// for a version "1.2.3-beta", segments will return a slice of 308// for a version "1.2.3-beta", segments will return a slice of
284// 1, 2, 3. 309// 1, 2, 3.
285func (v *Version) Segments64() []int64 { 310func (v *Version) Segments64() []int64 {
286 return v.segments 311 result := make([]int64, len(v.segments))
312 copy(result, v.segments)
313 return result
287} 314}
288 315
289// String returns the full version string included pre-release 316// String returns the full version string included pre-release
290// and metadata information. 317// and metadata information.
318//
319// This value is rebuilt according to the parsed segments and other
320// information. Therefore, ambiguities in the version string such as
321// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and
322// missing parts (1.0 => 1.0.0) will be made into a canonicalized form
323// as shown in the parenthesized examples.
291func (v *Version) String() string { 324func (v *Version) String() string {
292 var buf bytes.Buffer 325 var buf bytes.Buffer
293 fmtParts := make([]string, len(v.segments)) 326 fmtParts := make([]string, len(v.segments))
@@ -306,3 +339,9 @@ func (v *Version) String() string {
306 339
307 return buf.String() 340 return buf.String()
308} 341}
342
343// Original returns the original parsed version as-is, including any
344// potential whitespace, `v` prefix, etc.
345func (v *Version) Original() string {
346 return v.original
347}
diff --git a/vendor/github.com/hashicorp/hcl2/LICENSE b/vendor/github.com/hashicorp/hcl2/LICENSE
new file mode 100644
index 0000000..82b4de9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/LICENSE
@@ -0,0 +1,353 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/decode.go b/vendor/github.com/hashicorp/hcl2/gohcl/decode.go
new file mode 100644
index 0000000..3a149a8
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/gohcl/decode.go
@@ -0,0 +1,304 @@
1package gohcl
2
3import (
4 "fmt"
5 "reflect"
6
7 "github.com/zclconf/go-cty/cty"
8
9 "github.com/hashicorp/hcl2/hcl"
10 "github.com/zclconf/go-cty/cty/convert"
11 "github.com/zclconf/go-cty/cty/gocty"
12)
13
14// DecodeBody extracts the configuration within the given body into the given
15// value. This value must be a non-nil pointer to either a struct or
16// a map, where in the former case the configuration will be decoded using
17// struct tags and in the latter case only attributes are allowed and their
18// values are decoded into the map.
19//
20// The given EvalContext is used to resolve any variables or functions in
21// expressions encountered while decoding. This may be nil to require only
22// constant values, for simple applications that do not support variables or
23// functions.
24//
25// The returned diagnostics should be inspected with its HasErrors method to
26// determine if the populated value is valid and complete. If error diagnostics
27// are returned then the given value may have been partially-populated but
28// may still be accessed by a careful caller for static analysis and editor
29// integration use-cases.
30func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
31 rv := reflect.ValueOf(val)
32 if rv.Kind() != reflect.Ptr {
33 panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String()))
34 }
35
36 return decodeBodyToValue(body, ctx, rv.Elem())
37}
38
39func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
40 et := val.Type()
41 switch et.Kind() {
42 case reflect.Struct:
43 return decodeBodyToStruct(body, ctx, val)
44 case reflect.Map:
45 return decodeBodyToMap(body, ctx, val)
46 default:
47 panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String()))
48 }
49}
50
51func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
52 schema, partial := ImpliedBodySchema(val.Interface())
53
54 var content *hcl.BodyContent
55 var leftovers hcl.Body
56 var diags hcl.Diagnostics
57 if partial {
58 content, leftovers, diags = body.PartialContent(schema)
59 } else {
60 content, diags = body.Content(schema)
61 }
62 if content == nil {
63 return diags
64 }
65
66 tags := getFieldTags(val.Type())
67
68 if tags.Remain != nil {
69 fieldIdx := *tags.Remain
70 field := val.Type().Field(fieldIdx)
71 fieldV := val.Field(fieldIdx)
72 switch {
73 case bodyType.AssignableTo(field.Type):
74 fieldV.Set(reflect.ValueOf(leftovers))
75 case attrsType.AssignableTo(field.Type):
76 attrs, attrsDiags := leftovers.JustAttributes()
77 if len(attrsDiags) > 0 {
78 diags = append(diags, attrsDiags...)
79 }
80 fieldV.Set(reflect.ValueOf(attrs))
81 default:
82 diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...)
83 }
84 }
85
86 for name, fieldIdx := range tags.Attributes {
87 attr := content.Attributes[name]
88 field := val.Type().Field(fieldIdx)
89 fieldV := val.Field(fieldIdx)
90
91 if attr == nil {
92 if !exprType.AssignableTo(field.Type) {
93 continue
94 }
95
96 // As a special case, if the target is of type hcl.Expression then
97 // we'll assign an actual expression that evalues to a cty null,
98 // so the caller can deal with it within the cty realm rather
99 // than within the Go realm.
100 synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange())
101 fieldV.Set(reflect.ValueOf(synthExpr))
102 continue
103 }
104
105 switch {
106 case attrType.AssignableTo(field.Type):
107 fieldV.Set(reflect.ValueOf(attr))
108 case exprType.AssignableTo(field.Type):
109 fieldV.Set(reflect.ValueOf(attr.Expr))
110 default:
111 diags = append(diags, DecodeExpression(
112 attr.Expr, ctx, fieldV.Addr().Interface(),
113 )...)
114 }
115 }
116
117 blocksByType := content.Blocks.ByType()
118
119 for typeName, fieldIdx := range tags.Blocks {
120 blocks := blocksByType[typeName]
121 field := val.Type().Field(fieldIdx)
122
123 ty := field.Type
124 isSlice := false
125 isPtr := false
126 if ty.Kind() == reflect.Slice {
127 isSlice = true
128 ty = ty.Elem()
129 }
130 if ty.Kind() == reflect.Ptr {
131 isPtr = true
132 ty = ty.Elem()
133 }
134
135 if len(blocks) > 1 && !isSlice {
136 diags = append(diags, &hcl.Diagnostic{
137 Severity: hcl.DiagError,
138 Summary: fmt.Sprintf("Duplicate %s block", typeName),
139 Detail: fmt.Sprintf(
140 "Only one %s block is allowed. Another was defined at %s.",
141 typeName, blocks[0].DefRange.String(),
142 ),
143 Subject: &blocks[1].DefRange,
144 })
145 continue
146 }
147
148 if len(blocks) == 0 {
149 if isSlice || isPtr {
150 val.Field(fieldIdx).Set(reflect.Zero(field.Type))
151 } else {
152 diags = append(diags, &hcl.Diagnostic{
153 Severity: hcl.DiagError,
154 Summary: fmt.Sprintf("Missing %s block", typeName),
155 Detail: fmt.Sprintf("A %s block is required.", typeName),
156 Subject: body.MissingItemRange().Ptr(),
157 })
158 }
159 continue
160 }
161
162 switch {
163
164 case isSlice:
165 elemType := ty
166 if isPtr {
167 elemType = reflect.PtrTo(ty)
168 }
169 sli := reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks))
170
171 for i, block := range blocks {
172 if isPtr {
173 v := reflect.New(ty)
174 diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...)
175 sli.Index(i).Set(v)
176 } else {
177 diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...)
178 }
179 }
180
181 val.Field(fieldIdx).Set(sli)
182
183 default:
184 block := blocks[0]
185 if isPtr {
186 v := reflect.New(ty)
187 diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...)
188 val.Field(fieldIdx).Set(v)
189 } else {
190 diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...)
191 }
192
193 }
194
195 }
196
197 return diags
198}
199
200func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
201 attrs, diags := body.JustAttributes()
202 if attrs == nil {
203 return diags
204 }
205
206 mv := reflect.MakeMap(v.Type())
207
208 for k, attr := range attrs {
209 switch {
210 case attrType.AssignableTo(v.Type().Elem()):
211 mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr))
212 case exprType.AssignableTo(v.Type().Elem()):
213 mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr))
214 default:
215 ev := reflect.New(v.Type().Elem())
216 diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...)
217 mv.SetMapIndex(reflect.ValueOf(k), ev.Elem())
218 }
219 }
220
221 v.Set(mv)
222
223 return diags
224}
225
226func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
227 var diags hcl.Diagnostics
228
229 ty := v.Type()
230
231 switch {
232 case blockType.AssignableTo(ty):
233 v.Elem().Set(reflect.ValueOf(block))
234 case bodyType.AssignableTo(ty):
235 v.Elem().Set(reflect.ValueOf(block.Body))
236 case attrsType.AssignableTo(ty):
237 attrs, attrsDiags := block.Body.JustAttributes()
238 if len(attrsDiags) > 0 {
239 diags = append(diags, attrsDiags...)
240 }
241 v.Elem().Set(reflect.ValueOf(attrs))
242 default:
243 diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...)
244
245 if len(block.Labels) > 0 {
246 blockTags := getFieldTags(ty)
247 for li, lv := range block.Labels {
248 lfieldIdx := blockTags.Labels[li].FieldIndex
249 v.Field(lfieldIdx).Set(reflect.ValueOf(lv))
250 }
251 }
252
253 }
254
255 return diags
256}
257
258// DecodeExpression extracts the value of the given expression into the given
259// value. This value must be something that gocty is able to decode into,
260// since the final decoding is delegated to that package.
261//
262// The given EvalContext is used to resolve any variables or functions in
263// expressions encountered while decoding. This may be nil to require only
264// constant values, for simple applications that do not support variables or
265// functions.
266//
267// The returned diagnostics should be inspected with its HasErrors method to
268// determine if the populated value is valid and complete. If error diagnostics
269// are returned then the given value may have been partially-populated but
270// may still be accessed by a careful caller for static analysis and editor
271// integration use-cases.
272func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
273 srcVal, diags := expr.Value(ctx)
274
275 convTy, err := gocty.ImpliedType(val)
276 if err != nil {
277 panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err))
278 }
279
280 srcVal, err = convert.Convert(srcVal, convTy)
281 if err != nil {
282 diags = append(diags, &hcl.Diagnostic{
283 Severity: hcl.DiagError,
284 Summary: "Unsuitable value type",
285 Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
286 Subject: expr.StartRange().Ptr(),
287 Context: expr.Range().Ptr(),
288 })
289 return diags
290 }
291
292 err = gocty.FromCtyValue(srcVal, val)
293 if err != nil {
294 diags = append(diags, &hcl.Diagnostic{
295 Severity: hcl.DiagError,
296 Summary: "Unsuitable value type",
297 Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
298 Subject: expr.StartRange().Ptr(),
299 Context: expr.Range().Ptr(),
300 })
301 }
302
303 return diags
304}
diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go
new file mode 100644
index 0000000..8500214
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go
@@ -0,0 +1,49 @@
1// Package gohcl allows decoding HCL configurations into Go data structures.
2//
3// It provides a convenient and concise way of describing the schema for
4// configuration and then accessing the resulting data via native Go
5// types.
6//
7// A struct field tag scheme is used, similar to other decoding and
8// unmarshalling libraries. The tags are formatted as in the following example:
9//
10// ThingType string `hcl:"thing_type,attr"`
11//
12// Within each tag there are two comma-separated tokens. The first is the
13// name of the corresponding construct in configuration, while the second
14// is a keyword giving the kind of construct expected. The following
15// kind keywords are supported:
16//
17// attr (the default) indicates that the value is to be populated from an attribute
18// block indicates that the value is to populated from a block
19// label indicates that the value is to populated from a block label
20// remain indicates that the value is to be populated from the remaining body after populating other fields
21//
22// "attr" fields may either be of type *hcl.Expression, in which case the raw
23// expression is assigned, or of any type accepted by gocty, in which case
24// gocty will be used to assign the value to a native Go type.
25//
26// "block" fields may be of type *hcl.Block or hcl.Body, in which case the
27// corresponding raw value is assigned, or may be a struct that recursively
28// uses the same tags. Block fields may also be slices of any of these types,
29// in which case multiple blocks of the corresponding type are decoded into
30// the slice.
31//
32// "label" fields are considered only in a struct used as the type of a field
33// marked as "block", and are used sequentially to capture the labels of
34// the blocks being decoded. In this case, the name token is used only as
35// an identifier for the label in diagnostic messages.
36//
37// "remain" can be placed on a single field that may be either of type
38// hcl.Body or hcl.Attributes, in which case any remaining body content is
39// placed into this field for delayed processing. If no "remain" field is
40// present then any attributes or blocks not matched by another valid tag
41// will cause an error diagnostic.
42//
43// Broadly-speaking this package deals with two types of error. The first is
44// errors in the configuration itself, which are returned as diagnostics
45// written with the configuration author as the target audience. The second
46// is bugs in the calling program, such as invalid struct tags, which are
47// surfaced via panics since there can be no useful runtime handling of such
48// errors and they should certainly not be returned to the user as diagnostics.
49package gohcl
diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/schema.go b/vendor/github.com/hashicorp/hcl2/gohcl/schema.go
new file mode 100644
index 0000000..88164cb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/gohcl/schema.go
@@ -0,0 +1,174 @@
1package gohcl
2
3import (
4 "fmt"
5 "reflect"
6 "sort"
7 "strings"
8
9 "github.com/hashicorp/hcl2/hcl"
10)
11
12// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the
13// given value, which must be a struct value or a pointer to one. If an
14// inappropriate value is passed, this function will panic.
15//
16// The second return argument indicates whether the given struct includes
17// a "remain" field, and thus the returned schema is non-exhaustive.
18//
19// This uses the tags on the fields of the struct to discover how each
20// field's value should be expressed within configuration. If an invalid
21// mapping is attempted, this function will panic.
22func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) {
23 ty := reflect.TypeOf(val)
24
25 if ty.Kind() == reflect.Ptr {
26 ty = ty.Elem()
27 }
28
29 if ty.Kind() != reflect.Struct {
30 panic(fmt.Sprintf("given value must be struct, not %T", val))
31 }
32
33 var attrSchemas []hcl.AttributeSchema
34 var blockSchemas []hcl.BlockHeaderSchema
35
36 tags := getFieldTags(ty)
37
38 attrNames := make([]string, 0, len(tags.Attributes))
39 for n := range tags.Attributes {
40 attrNames = append(attrNames, n)
41 }
42 sort.Strings(attrNames)
43 for _, n := range attrNames {
44 idx := tags.Attributes[n]
45 optional := tags.Optional[n]
46 field := ty.Field(idx)
47
48 var required bool
49
50 switch {
51 case field.Type.AssignableTo(exprType):
52 // If we're decoding to hcl.Expression then absense can be
53 // indicated via a null value, so we don't specify that
54 // the field is required during decoding.
55 required = false
56 case field.Type.Kind() != reflect.Ptr && !optional:
57 required = true
58 default:
59 required = false
60 }
61
62 attrSchemas = append(attrSchemas, hcl.AttributeSchema{
63 Name: n,
64 Required: required,
65 })
66 }
67
68 blockNames := make([]string, 0, len(tags.Blocks))
69 for n := range tags.Blocks {
70 blockNames = append(blockNames, n)
71 }
72 sort.Strings(blockNames)
73 for _, n := range blockNames {
74 idx := tags.Blocks[n]
75 field := ty.Field(idx)
76 fty := field.Type
77 if fty.Kind() == reflect.Slice {
78 fty = fty.Elem()
79 }
80 if fty.Kind() == reflect.Ptr {
81 fty = fty.Elem()
82 }
83 if fty.Kind() != reflect.Struct {
84 panic(fmt.Sprintf(
85 "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name,
86 ))
87 }
88 ftags := getFieldTags(fty)
89 var labelNames []string
90 if len(ftags.Labels) > 0 {
91 labelNames = make([]string, len(ftags.Labels))
92 for i, l := range ftags.Labels {
93 labelNames[i] = l.Name
94 }
95 }
96
97 blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{
98 Type: n,
99 LabelNames: labelNames,
100 })
101 }
102
103 partial = tags.Remain != nil
104 schema = &hcl.BodySchema{
105 Attributes: attrSchemas,
106 Blocks: blockSchemas,
107 }
108 return schema, partial
109}
110
111type fieldTags struct {
112 Attributes map[string]int
113 Blocks map[string]int
114 Labels []labelField
115 Remain *int
116 Optional map[string]bool
117}
118
119type labelField struct {
120 FieldIndex int
121 Name string
122}
123
124func getFieldTags(ty reflect.Type) *fieldTags {
125 ret := &fieldTags{
126 Attributes: map[string]int{},
127 Blocks: map[string]int{},
128 Optional: map[string]bool{},
129 }
130
131 ct := ty.NumField()
132 for i := 0; i < ct; i++ {
133 field := ty.Field(i)
134 tag := field.Tag.Get("hcl")
135 if tag == "" {
136 continue
137 }
138
139 comma := strings.Index(tag, ",")
140 var name, kind string
141 if comma != -1 {
142 name = tag[:comma]
143 kind = tag[comma+1:]
144 } else {
145 name = tag
146 kind = "attr"
147 }
148
149 switch kind {
150 case "attr":
151 ret.Attributes[name] = i
152 case "block":
153 ret.Blocks[name] = i
154 case "label":
155 ret.Labels = append(ret.Labels, labelField{
156 FieldIndex: i,
157 Name: name,
158 })
159 case "remain":
160 if ret.Remain != nil {
161 panic("only one 'remain' tag is permitted")
162 }
163 idx := i // copy, because this loop will continue assigning to i
164 ret.Remain = &idx
165 case "optional":
166 ret.Attributes[name] = i
167 ret.Optional[name] = true
168 default:
169 panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name))
170 }
171 }
172
173 return ret
174}
diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/types.go b/vendor/github.com/hashicorp/hcl2/gohcl/types.go
new file mode 100644
index 0000000..a94f275
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/gohcl/types.go
@@ -0,0 +1,16 @@
1package gohcl
2
3import (
4 "reflect"
5
6 "github.com/hashicorp/hcl2/hcl"
7)
8
9var victimExpr hcl.Expression
10var victimBody hcl.Body
11
12var exprType = reflect.TypeOf(&victimExpr).Elem()
13var bodyType = reflect.TypeOf(&victimBody).Elem()
14var blockType = reflect.TypeOf((*hcl.Block)(nil))
15var attrType = reflect.TypeOf((*hcl.Attribute)(nil))
16var attrsType = reflect.TypeOf(hcl.Attributes(nil))
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go
new file mode 100644
index 0000000..6ecf744
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go
@@ -0,0 +1,103 @@
1package hcl
2
3import (
4 "fmt"
5)
6
7// DiagnosticSeverity represents the severity of a diagnostic.
8type DiagnosticSeverity int
9
10const (
11 // DiagInvalid is the invalid zero value of DiagnosticSeverity
12 DiagInvalid DiagnosticSeverity = iota
13
14 // DiagError indicates that the problem reported by a diagnostic prevents
15 // further progress in parsing and/or evaluating the subject.
16 DiagError
17
18 // DiagWarning indicates that the problem reported by a diagnostic warrants
19 // user attention but does not prevent further progress. It is most
20 // commonly used for showing deprecation notices.
21 DiagWarning
22)
23
24// Diagnostic represents information to be presented to a user about an
25// error or anomoly in parsing or evaluating configuration.
26type Diagnostic struct {
27 Severity DiagnosticSeverity
28
29 // Summary and detail contain the English-language description of the
30 // problem. Summary is a terse description of the general problem and
31 // detail is a more elaborate, often-multi-sentence description of
32 // the probem and what might be done to solve it.
33 Summary string
34 Detail string
35 Subject *Range
36 Context *Range
37}
38
39// Diagnostics is a list of Diagnostic instances.
40type Diagnostics []*Diagnostic
41
42// error implementation, so that diagnostics can be returned via APIs
43// that normally deal in vanilla Go errors.
44//
45// This presents only minimal context about the error, for compatibility
46// with usual expectations about how errors will present as strings.
47func (d *Diagnostic) Error() string {
48 return fmt.Sprintf("%s: %s; %s", d.Subject, d.Summary, d.Detail)
49}
50
51// error implementation, so that sets of diagnostics can be returned via
52// APIs that normally deal in vanilla Go errors.
53func (d Diagnostics) Error() string {
54 count := len(d)
55 switch {
56 case count == 0:
57 return "no diagnostics"
58 case count == 1:
59 return d[0].Error()
60 default:
61 return fmt.Sprintf("%s, and %d other diagnostic(s)", d[0].Error(), count-1)
62 }
63}
64
65// Append appends a new error to a Diagnostics and return the whole Diagnostics.
66//
67// This is provided as a convenience for returning from a function that
68// collects and then returns a set of diagnostics:
69//
70// return nil, diags.Append(&hcl.Diagnostic{ ... })
71//
72// Note that this modifies the array underlying the diagnostics slice, so
73// must be used carefully within a single codepath. It is incorrect (and rude)
74// to extend a diagnostics created by a different subsystem.
75func (d Diagnostics) Append(diag *Diagnostic) Diagnostics {
76 return append(d, diag)
77}
78
79// Extend concatenates the given Diagnostics with the receiver and returns
80// the whole new Diagnostics.
81//
82// This is similar to Append but accepts multiple diagnostics to add. It has
83// all the same caveats and constraints.
84func (d Diagnostics) Extend(diags Diagnostics) Diagnostics {
85 return append(d, diags...)
86}
87
88// HasErrors returns true if the receiver contains any diagnostics of
89// severity DiagError.
90func (d Diagnostics) HasErrors() bool {
91 for _, diag := range d {
92 if diag.Severity == DiagError {
93 return true
94 }
95 }
96 return false
97}
98
99// A DiagnosticWriter emits diagnostics somehow.
100type DiagnosticWriter interface {
101 WriteDiagnostic(*Diagnostic) error
102 WriteDiagnostics(Diagnostics) error
103}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go
new file mode 100644
index 0000000..dfa473a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go
@@ -0,0 +1,168 @@
1package hcl
2
3import (
4 "bufio"
5 "errors"
6 "fmt"
7 "io"
8
9 wordwrap "github.com/mitchellh/go-wordwrap"
10)
11
12type diagnosticTextWriter struct {
13 files map[string]*File
14 wr io.Writer
15 width uint
16 color bool
17}
18
19// NewDiagnosticTextWriter creates a DiagnosticWriter that writes diagnostics
20// to the given writer as formatted text.
21//
22// It is designed to produce text appropriate to print in a monospaced font
23// in a terminal of a particular width, or optionally with no width limit.
24//
25// The given width may be zero to disable word-wrapping of the detail text
26// and truncation of source code snippets.
27//
28// If color is set to true, the output will include VT100 escape sequences to
29// color-code the severity indicators. It is suggested to turn this off if
30// the target writer is not a terminal.
31func NewDiagnosticTextWriter(wr io.Writer, files map[string]*File, width uint, color bool) DiagnosticWriter {
32 return &diagnosticTextWriter{
33 files: files,
34 wr: wr,
35 width: width,
36 color: color,
37 }
38}
39
40func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error {
41 if diag == nil {
42 return errors.New("nil diagnostic")
43 }
44
45 var colorCode, highlightCode, resetCode string
46 if w.color {
47 switch diag.Severity {
48 case DiagError:
49 colorCode = "\x1b[31m"
50 case DiagWarning:
51 colorCode = "\x1b[33m"
52 }
53 resetCode = "\x1b[0m"
54 highlightCode = "\x1b[1;4m"
55 }
56
57 var severityStr string
58 switch diag.Severity {
59 case DiagError:
60 severityStr = "Error"
61 case DiagWarning:
62 severityStr = "Warning"
63 default:
64 // should never happen
65 severityStr = "???????"
66 }
67
68 fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary)
69
70 if diag.Subject != nil {
71 snipRange := *diag.Subject
72 highlightRange := snipRange
73 if diag.Context != nil {
74 // Show enough of the source code to include both the subject
75 // and context ranges, which overlap in all reasonable
76 // situations.
77 snipRange = RangeOver(snipRange, *diag.Context)
78 }
79 // We can't illustrate an empty range, so we'll turn such ranges into
80 // single-character ranges, which might not be totally valid (may point
81 // off the end of a line, or off the end of the file) but are good
82 // enough for the bounds checks we do below.
83 if snipRange.Empty() {
84 snipRange.End.Byte++
85 snipRange.End.Column++
86 }
87 if highlightRange.Empty() {
88 highlightRange.End.Byte++
89 highlightRange.End.Column++
90 }
91
92 file := w.files[diag.Subject.Filename]
93 if file == nil || file.Bytes == nil {
94 fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line)
95 } else {
96
97 var contextLine string
98 if diag.Subject != nil {
99 contextLine = contextString(file, diag.Subject.Start.Byte)
100 if contextLine != "" {
101 contextLine = ", in " + contextLine
102 }
103 }
104
105 fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine)
106
107 src := file.Bytes
108 sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines)
109
110 for sc.Scan() {
111 lineRange := sc.Range()
112 if !lineRange.Overlaps(snipRange) {
113 continue
114 }
115
116 beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange)
117 if highlightedRange.Empty() {
118 fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes())
119 } else {
120 before := beforeRange.SliceBytes(src)
121 highlighted := highlightedRange.SliceBytes(src)
122 after := afterRange.SliceBytes(src)
123 fmt.Fprintf(
124 w.wr, "%4d: %s%s%s%s%s\n",
125 lineRange.Start.Line,
126 before,
127 highlightCode, highlighted, resetCode,
128 after,
129 )
130 }
131
132 }
133
134 w.wr.Write([]byte{'\n'})
135 }
136 }
137
138 if diag.Detail != "" {
139 detail := diag.Detail
140 if w.width != 0 {
141 detail = wordwrap.WrapString(detail, w.width)
142 }
143 fmt.Fprintf(w.wr, "%s\n\n", detail)
144 }
145
146 return nil
147}
148
149func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error {
150 for _, diag := range diags {
151 err := w.WriteDiagnostic(diag)
152 if err != nil {
153 return err
154 }
155 }
156 return nil
157}
158
159func contextString(file *File, offset int) string {
160 type contextStringer interface {
161 ContextString(offset int) string
162 }
163
164 if cser, ok := file.Nav.(contextStringer); ok {
165 return cser.ContextString(offset)
166 }
167 return ""
168}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go b/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go
new file mode 100644
index 0000000..c128334
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go
@@ -0,0 +1,24 @@
1package hcl
2
3import (
4 "github.com/agext/levenshtein"
5)
6
7// nameSuggestion tries to find a name from the given slice of suggested names
8// that is close to the given name and returns it if found. If no suggestion
9// is close enough, returns the empty string.
10//
11// The suggestions are tried in order, so earlier suggestions take precedence
12// if the given string is similar to two or more suggestions.
13//
14// This function is intended to be used with a relatively-small number of
15// suggestions. It's not optimized for hundreds or thousands of them.
16func nameSuggestion(given string, suggestions []string) string {
17 for _, suggestion := range suggestions {
18 dist := levenshtein.Distance(given, suggestion, nil)
19 if dist < 3 { // threshold determined experimentally
20 return suggestion
21 }
22 }
23 return ""
24}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/doc.go b/vendor/github.com/hashicorp/hcl2/hcl/doc.go
new file mode 100644
index 0000000..01318c9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/doc.go
@@ -0,0 +1 @@
package hcl
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go b/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go
new file mode 100644
index 0000000..915910a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go
@@ -0,0 +1,25 @@
1package hcl
2
3import (
4 "github.com/zclconf/go-cty/cty"
5 "github.com/zclconf/go-cty/cty/function"
6)
7
8// An EvalContext provides the variables and functions that should be used
9// to evaluate an expression.
10type EvalContext struct {
11 Variables map[string]cty.Value
12 Functions map[string]function.Function
13 parent *EvalContext
14}
15
16// NewChild returns a new EvalContext that is a child of the receiver.
17func (ctx *EvalContext) NewChild() *EvalContext {
18 return &EvalContext{parent: ctx}
19}
20
21// Parent returns the parent of the receiver, or nil if the receiver has
22// no parent.
23func (ctx *EvalContext) Parent() *EvalContext {
24 return ctx.parent
25}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go
new file mode 100644
index 0000000..6963fba
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go
@@ -0,0 +1,46 @@
1package hcl
2
3// ExprCall tests if the given expression is a function call and,
4// if so, extracts the function name and the expressions that represent
5// the arguments. If the given expression is not statically a function call,
6// error diagnostics are returned.
7//
8// A particular Expression implementation can support this function by
9// offering a method called ExprCall that takes no arguments and returns
10// *StaticCall. This method should return nil if a static call cannot
11// be extracted. Alternatively, an implementation can support
12// UnwrapExpression to delegate handling of this function to a wrapped
13// Expression object.
14func ExprCall(expr Expression) (*StaticCall, Diagnostics) {
15 type exprCall interface {
16 ExprCall() *StaticCall
17 }
18
19 physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
20 _, supported := expr.(exprCall)
21 return supported
22 })
23
24 if exC, supported := physExpr.(exprCall); supported {
25 if call := exC.ExprCall(); call != nil {
26 return call, nil
27 }
28 }
29 return nil, Diagnostics{
30 &Diagnostic{
31 Severity: DiagError,
32 Summary: "Invalid expression",
33 Detail: "A static function call is required.",
34 Subject: expr.StartRange().Ptr(),
35 },
36 }
37}
38
39// StaticCall represents a function call that was extracted statically from
40// an expression using ExprCall.
41type StaticCall struct {
42 Name string
43 NameRange Range
44 Arguments []Expression
45 ArgsRange Range
46}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go
new file mode 100644
index 0000000..d05cca0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go
@@ -0,0 +1,37 @@
1package hcl
2
3// ExprList tests if the given expression is a static list construct and,
4// if so, extracts the expressions that represent the list elements.
5// If the given expression is not a static list, error diagnostics are
6// returned.
7//
8// A particular Expression implementation can support this function by
9// offering a method called ExprList that takes no arguments and returns
10// []Expression. This method should return nil if a static list cannot
11// be extracted. Alternatively, an implementation can support
12// UnwrapExpression to delegate handling of this function to a wrapped
13// Expression object.
14func ExprList(expr Expression) ([]Expression, Diagnostics) {
15 type exprList interface {
16 ExprList() []Expression
17 }
18
19 physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
20 _, supported := expr.(exprList)
21 return supported
22 })
23
24 if exL, supported := physExpr.(exprList); supported {
25 if list := exL.ExprList(); list != nil {
26 return list, nil
27 }
28 }
29 return nil, Diagnostics{
30 &Diagnostic{
31 Severity: DiagError,
32 Summary: "Invalid expression",
33 Detail: "A static list expression is required.",
34 Subject: expr.StartRange().Ptr(),
35 },
36 }
37}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go
new file mode 100644
index 0000000..96d1ce4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go
@@ -0,0 +1,44 @@
1package hcl
2
3// ExprMap tests if the given expression is a static map construct and,
4// if so, extracts the expressions that represent the map elements.
5// If the given expression is not a static map, error diagnostics are
6// returned.
7//
8// A particular Expression implementation can support this function by
9// offering a method called ExprMap that takes no arguments and returns
10// []KeyValuePair. This method should return nil if a static map cannot
11// be extracted. Alternatively, an implementation can support
12// UnwrapExpression to delegate handling of this function to a wrapped
13// Expression object.
14func ExprMap(expr Expression) ([]KeyValuePair, Diagnostics) {
15 type exprMap interface {
16 ExprMap() []KeyValuePair
17 }
18
19 physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
20 _, supported := expr.(exprMap)
21 return supported
22 })
23
24 if exM, supported := physExpr.(exprMap); supported {
25 if pairs := exM.ExprMap(); pairs != nil {
26 return pairs, nil
27 }
28 }
29 return nil, Diagnostics{
30 &Diagnostic{
31 Severity: DiagError,
32 Summary: "Invalid expression",
33 Detail: "A static map expression is required.",
34 Subject: expr.StartRange().Ptr(),
35 },
36 }
37}
38
39// KeyValuePair represents a pair of expressions that serve as a single item
40// within a map or object definition construct.
41type KeyValuePair struct {
42 Key Expression
43 Value Expression
44}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go
new file mode 100644
index 0000000..6d5d205
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go
@@ -0,0 +1,68 @@
1package hcl
2
3type unwrapExpression interface {
4 UnwrapExpression() Expression
5}
6
7// UnwrapExpression removes any "wrapper" expressions from the given expression,
8// to recover the representation of the physical expression given in source
9// code.
10//
11// Sometimes wrapping expressions are used to modify expression behavior, e.g.
12// in extensions that need to make some local variables available to certain
13// sub-trees of the configuration. This can make it difficult to reliably
14// type-assert on the physical AST types used by the underlying syntax.
15//
16// Unwrapping an expression may modify its behavior by stripping away any
17// additional constraints or capabilities being applied to the Value and
18// Variables methods, so this function should generally only be used prior
19// to operations that concern themselves with the static syntax of the input
20// configuration, and not with the effective value of the expression.
21//
22// Wrapper expression types must support unwrapping by implementing a method
23// called UnwrapExpression that takes no arguments and returns the embedded
24// Expression. Implementations of this method should peel away only one level
25// of wrapping, if multiple are present. This method may return nil to
26// indicate _dynamically_ that no wrapped expression is available, for
27// expression types that might only behave as wrappers in certain cases.
28func UnwrapExpression(expr Expression) Expression {
29 for {
30 unwrap, wrapped := expr.(unwrapExpression)
31 if !wrapped {
32 return expr
33 }
34 innerExpr := unwrap.UnwrapExpression()
35 if innerExpr == nil {
36 return expr
37 }
38 expr = innerExpr
39 }
40}
41
42// UnwrapExpressionUntil is similar to UnwrapExpression except it gives the
43// caller an opportunity to test each level of unwrapping to see each a
44// particular expression is accepted.
45//
46// This could be used, for example, to unwrap until a particular other
47// interface is satisfied, regardless of wrap wrapping level it is satisfied
48// at.
49//
50// The given callback function must return false to continue wrapping, or
51// true to accept and return the proposed expression given. If the callback
52// function rejects even the final, physical expression then the result of
53// this function is nil.
54func UnwrapExpressionUntil(expr Expression, until func(Expression) bool) Expression {
55 for {
56 if until(expr) {
57 return expr
58 }
59 unwrap, wrapped := expr.(unwrapExpression)
60 if !wrapped {
61 return nil
62 }
63 expr = unwrap.UnwrapExpression()
64 if expr == nil {
65 return nil
66 }
67 }
68}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go
new file mode 100644
index 0000000..ccc1c0a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go
@@ -0,0 +1,24 @@
1package hclsyntax
2
3import (
4 "github.com/agext/levenshtein"
5)
6
7// nameSuggestion tries to find a name from the given slice of suggested names
8// that is close to the given name and returns it if found. If no suggestion
9// is close enough, returns the empty string.
10//
11// The suggestions are tried in order, so earlier suggestions take precedence
12// if the given string is similar to two or more suggestions.
13//
14// This function is intended to be used with a relatively-small number of
15// suggestions. It's not optimized for hundreds or thousands of them.
16func nameSuggestion(given string, suggestions []string) string {
17 for _, suggestion := range suggestions {
18 dist := levenshtein.Distance(given, suggestion, nil)
19 if dist < 3 { // threshold determined experimentally
20 return suggestion
21 }
22 }
23 return ""
24}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go
new file mode 100644
index 0000000..617bc29
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go
@@ -0,0 +1,7 @@
1// Package hclsyntax contains the parser, AST, etc for HCL's native language,
2// as opposed to the JSON variant.
3//
4// In normal use applications should rarely depend on this package directly,
5// instead preferring the higher-level interface of the main hcl package and
6// its companion package hclparse.
7package hclsyntax
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go
new file mode 100644
index 0000000..cfc7cd9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go
@@ -0,0 +1,1275 @@
1package hclsyntax
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/convert"
9 "github.com/zclconf/go-cty/cty/function"
10)
11
12// Expression is the abstract type for nodes that behave as HCL expressions.
13type Expression interface {
14 Node
15
16 // The hcl.Expression methods are duplicated here, rather than simply
17 // embedded, because both Node and hcl.Expression have a Range method
18 // and so they conflict.
19
20 Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics)
21 Variables() []hcl.Traversal
22 StartRange() hcl.Range
23}
24
25// Assert that Expression implements hcl.Expression
26var assertExprImplExpr hcl.Expression = Expression(nil)
27
28// LiteralValueExpr is an expression that just always returns a given value.
29type LiteralValueExpr struct {
30 Val cty.Value
31 SrcRange hcl.Range
32}
33
34func (e *LiteralValueExpr) walkChildNodes(w internalWalkFunc) {
35 // Literal values have no child nodes
36}
37
38func (e *LiteralValueExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
39 return e.Val, nil
40}
41
42func (e *LiteralValueExpr) Range() hcl.Range {
43 return e.SrcRange
44}
45
46func (e *LiteralValueExpr) StartRange() hcl.Range {
47 return e.SrcRange
48}
49
50// Implementation for hcl.AbsTraversalForExpr.
51func (e *LiteralValueExpr) AsTraversal() hcl.Traversal {
52 // This one's a little weird: the contract for AsTraversal is to interpret
53 // an expression as if it were traversal syntax, and traversal syntax
54 // doesn't have the special keywords "null", "true", and "false" so these
55 // are expected to be treated like variables in that case.
56 // Since our parser already turned them into LiteralValueExpr by the time
57 // we get here, we need to undo this and infer the name that would've
58 // originally led to our value.
59 // We don't do anything for any other values, since they don't overlap
60 // with traversal roots.
61
62 if e.Val.IsNull() {
63 // In practice the parser only generates null values of the dynamic
64 // pseudo-type for literals, so we can safely assume that any null
65 // was orignally the keyword "null".
66 return hcl.Traversal{
67 hcl.TraverseRoot{
68 Name: "null",
69 SrcRange: e.SrcRange,
70 },
71 }
72 }
73
74 switch e.Val {
75 case cty.True:
76 return hcl.Traversal{
77 hcl.TraverseRoot{
78 Name: "true",
79 SrcRange: e.SrcRange,
80 },
81 }
82 case cty.False:
83 return hcl.Traversal{
84 hcl.TraverseRoot{
85 Name: "false",
86 SrcRange: e.SrcRange,
87 },
88 }
89 default:
90 // No traversal is possible for any other value.
91 return nil
92 }
93}
94
95// ScopeTraversalExpr is an Expression that retrieves a value from the scope
96// using a traversal.
97type ScopeTraversalExpr struct {
98 Traversal hcl.Traversal
99 SrcRange hcl.Range
100}
101
102func (e *ScopeTraversalExpr) walkChildNodes(w internalWalkFunc) {
103 // Scope traversals have no child nodes
104}
105
106func (e *ScopeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
107 return e.Traversal.TraverseAbs(ctx)
108}
109
110func (e *ScopeTraversalExpr) Range() hcl.Range {
111 return e.SrcRange
112}
113
114func (e *ScopeTraversalExpr) StartRange() hcl.Range {
115 return e.SrcRange
116}
117
118// Implementation for hcl.AbsTraversalForExpr.
119func (e *ScopeTraversalExpr) AsTraversal() hcl.Traversal {
120 return e.Traversal
121}
122
123// RelativeTraversalExpr is an Expression that retrieves a value from another
124// value using a _relative_ traversal.
125type RelativeTraversalExpr struct {
126 Source Expression
127 Traversal hcl.Traversal
128 SrcRange hcl.Range
129}
130
131func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) {
132 // Scope traversals have no child nodes
133}
134
135func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
136 src, diags := e.Source.Value(ctx)
137 ret, travDiags := e.Traversal.TraverseRel(src)
138 diags = append(diags, travDiags...)
139 return ret, diags
140}
141
142func (e *RelativeTraversalExpr) Range() hcl.Range {
143 return e.SrcRange
144}
145
146func (e *RelativeTraversalExpr) StartRange() hcl.Range {
147 return e.SrcRange
148}
149
150// Implementation for hcl.AbsTraversalForExpr.
151func (e *RelativeTraversalExpr) AsTraversal() hcl.Traversal {
152 // We can produce a traversal only if our source can.
153 st, diags := hcl.AbsTraversalForExpr(e.Source)
154 if diags.HasErrors() {
155 return nil
156 }
157
158 ret := make(hcl.Traversal, len(st)+len(e.Traversal))
159 copy(ret, st)
160 copy(ret[len(st):], e.Traversal)
161 return ret
162}
163
164// FunctionCallExpr is an Expression that calls a function from the EvalContext
165// and returns its result.
166type FunctionCallExpr struct {
167 Name string
168 Args []Expression
169
170 // If true, the final argument should be a tuple, list or set which will
171 // expand to be one argument per element.
172 ExpandFinal bool
173
174 NameRange hcl.Range
175 OpenParenRange hcl.Range
176 CloseParenRange hcl.Range
177}
178
179func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) {
180 for i, arg := range e.Args {
181 e.Args[i] = w(arg).(Expression)
182 }
183}
184
185func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
186 var diags hcl.Diagnostics
187
188 var f function.Function
189 exists := false
190 hasNonNilMap := false
191 thisCtx := ctx
192 for thisCtx != nil {
193 if thisCtx.Functions == nil {
194 thisCtx = thisCtx.Parent()
195 continue
196 }
197 hasNonNilMap = true
198 f, exists = thisCtx.Functions[e.Name]
199 if exists {
200 break
201 }
202 thisCtx = thisCtx.Parent()
203 }
204
205 if !exists {
206 if !hasNonNilMap {
207 return cty.DynamicVal, hcl.Diagnostics{
208 {
209 Severity: hcl.DiagError,
210 Summary: "Function calls not allowed",
211 Detail: "Functions may not be called here.",
212 Subject: e.Range().Ptr(),
213 },
214 }
215 }
216
217 avail := make([]string, 0, len(ctx.Functions))
218 for name := range ctx.Functions {
219 avail = append(avail, name)
220 }
221 suggestion := nameSuggestion(e.Name, avail)
222 if suggestion != "" {
223 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
224 }
225
226 return cty.DynamicVal, hcl.Diagnostics{
227 {
228 Severity: hcl.DiagError,
229 Summary: "Call to unknown function",
230 Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion),
231 Subject: &e.NameRange,
232 Context: e.Range().Ptr(),
233 },
234 }
235 }
236
237 params := f.Params()
238 varParam := f.VarParam()
239
240 args := e.Args
241 if e.ExpandFinal {
242 if len(args) < 1 {
243 // should never happen if the parser is behaving
244 panic("ExpandFinal set on function call with no arguments")
245 }
246 expandExpr := args[len(args)-1]
247 expandVal, expandDiags := expandExpr.Value(ctx)
248 diags = append(diags, expandDiags...)
249 if expandDiags.HasErrors() {
250 return cty.DynamicVal, diags
251 }
252
253 switch {
254 case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType():
255 if expandVal.IsNull() {
256 diags = append(diags, &hcl.Diagnostic{
257 Severity: hcl.DiagError,
258 Summary: "Invalid expanding argument value",
259 Detail: "The expanding argument (indicated by ...) must not be null.",
260 Context: expandExpr.Range().Ptr(),
261 Subject: e.Range().Ptr(),
262 })
263 return cty.DynamicVal, diags
264 }
265 if !expandVal.IsKnown() {
266 return cty.DynamicVal, diags
267 }
268
269 newArgs := make([]Expression, 0, (len(args)-1)+expandVal.LengthInt())
270 newArgs = append(newArgs, args[:len(args)-1]...)
271 it := expandVal.ElementIterator()
272 for it.Next() {
273 _, val := it.Element()
274 newArgs = append(newArgs, &LiteralValueExpr{
275 Val: val,
276 SrcRange: expandExpr.Range(),
277 })
278 }
279 args = newArgs
280 default:
281 diags = append(diags, &hcl.Diagnostic{
282 Severity: hcl.DiagError,
283 Summary: "Invalid expanding argument value",
284 Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.",
285 Context: expandExpr.Range().Ptr(),
286 Subject: e.Range().Ptr(),
287 })
288 return cty.DynamicVal, diags
289 }
290 }
291
292 if len(args) < len(params) {
293 missing := params[len(args)]
294 qual := ""
295 if varParam != nil {
296 qual = " at least"
297 }
298 return cty.DynamicVal, hcl.Diagnostics{
299 {
300 Severity: hcl.DiagError,
301 Summary: "Not enough function arguments",
302 Detail: fmt.Sprintf(
303 "Function %q expects%s %d argument(s). Missing value for %q.",
304 e.Name, qual, len(params), missing.Name,
305 ),
306 Subject: &e.CloseParenRange,
307 Context: e.Range().Ptr(),
308 },
309 }
310 }
311
312 if varParam == nil && len(args) > len(params) {
313 return cty.DynamicVal, hcl.Diagnostics{
314 {
315 Severity: hcl.DiagError,
316 Summary: "Too many function arguments",
317 Detail: fmt.Sprintf(
318 "Function %q expects only %d argument(s).",
319 e.Name, len(params),
320 ),
321 Subject: args[len(params)].StartRange().Ptr(),
322 Context: e.Range().Ptr(),
323 },
324 }
325 }
326
327 argVals := make([]cty.Value, len(args))
328
329 for i, argExpr := range args {
330 var param *function.Parameter
331 if i < len(params) {
332 param = &params[i]
333 } else {
334 param = varParam
335 }
336
337 val, argDiags := argExpr.Value(ctx)
338 if len(argDiags) > 0 {
339 diags = append(diags, argDiags...)
340 }
341
342 // Try to convert our value to the parameter type
343 val, err := convert.Convert(val, param.Type)
344 if err != nil {
345 diags = append(diags, &hcl.Diagnostic{
346 Severity: hcl.DiagError,
347 Summary: "Invalid function argument",
348 Detail: fmt.Sprintf(
349 "Invalid value for %q parameter: %s.",
350 param.Name, err,
351 ),
352 Subject: argExpr.StartRange().Ptr(),
353 Context: e.Range().Ptr(),
354 })
355 }
356
357 argVals[i] = val
358 }
359
360 if diags.HasErrors() {
361 // Don't try to execute the function if we already have errors with
362 // the arguments, because the result will probably be a confusing
363 // error message.
364 return cty.DynamicVal, diags
365 }
366
367 resultVal, err := f.Call(argVals)
368 if err != nil {
369 switch terr := err.(type) {
370 case function.ArgError:
371 i := terr.Index
372 var param *function.Parameter
373 if i < len(params) {
374 param = &params[i]
375 } else {
376 param = varParam
377 }
378 argExpr := e.Args[i]
379
380 // TODO: we should also unpick a PathError here and show the
381 // path to the deep value where the error was detected.
382 diags = append(diags, &hcl.Diagnostic{
383 Severity: hcl.DiagError,
384 Summary: "Invalid function argument",
385 Detail: fmt.Sprintf(
386 "Invalid value for %q parameter: %s.",
387 param.Name, err,
388 ),
389 Subject: argExpr.StartRange().Ptr(),
390 Context: e.Range().Ptr(),
391 })
392
393 default:
394 diags = append(diags, &hcl.Diagnostic{
395 Severity: hcl.DiagError,
396 Summary: "Error in function call",
397 Detail: fmt.Sprintf(
398 "Call to function %q failed: %s.",
399 e.Name, err,
400 ),
401 Subject: e.StartRange().Ptr(),
402 Context: e.Range().Ptr(),
403 })
404 }
405
406 return cty.DynamicVal, diags
407 }
408
409 return resultVal, diags
410}
411
412func (e *FunctionCallExpr) Range() hcl.Range {
413 return hcl.RangeBetween(e.NameRange, e.CloseParenRange)
414}
415
416func (e *FunctionCallExpr) StartRange() hcl.Range {
417 return hcl.RangeBetween(e.NameRange, e.OpenParenRange)
418}
419
420// Implementation for hcl.ExprCall.
421func (e *FunctionCallExpr) ExprCall() *hcl.StaticCall {
422 ret := &hcl.StaticCall{
423 Name: e.Name,
424 NameRange: e.NameRange,
425 Arguments: make([]hcl.Expression, len(e.Args)),
426 ArgsRange: hcl.RangeBetween(e.OpenParenRange, e.CloseParenRange),
427 }
428 // Need to convert our own Expression objects into hcl.Expression.
429 for i, arg := range e.Args {
430 ret.Arguments[i] = arg
431 }
432 return ret
433}
434
435type ConditionalExpr struct {
436 Condition Expression
437 TrueResult Expression
438 FalseResult Expression
439
440 SrcRange hcl.Range
441}
442
443func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) {
444 e.Condition = w(e.Condition).(Expression)
445 e.TrueResult = w(e.TrueResult).(Expression)
446 e.FalseResult = w(e.FalseResult).(Expression)
447}
448
449func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
450 trueResult, trueDiags := e.TrueResult.Value(ctx)
451 falseResult, falseDiags := e.FalseResult.Value(ctx)
452 var diags hcl.Diagnostics
453
454 // Try to find a type that both results can be converted to.
455 resultType, convs := convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()})
456 if resultType == cty.NilType {
457 return cty.DynamicVal, hcl.Diagnostics{
458 {
459 Severity: hcl.DiagError,
460 Summary: "Inconsistent conditional result types",
461 Detail: fmt.Sprintf(
462 // FIXME: Need a helper function for showing natural-language type diffs,
463 // since this will generate some useless messages in some cases, like
464 // "These expressions are object and object respectively" if the
465 // object types don't exactly match.
466 "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.",
467 trueResult.Type(), falseResult.Type(),
468 ),
469 Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(),
470 Context: &e.SrcRange,
471 },
472 }
473 }
474
475 condResult, condDiags := e.Condition.Value(ctx)
476 diags = append(diags, condDiags...)
477 if condResult.IsNull() {
478 diags = append(diags, &hcl.Diagnostic{
479 Severity: hcl.DiagError,
480 Summary: "Null condition",
481 Detail: "The condition value is null. Conditions must either be true or false.",
482 Subject: e.Condition.Range().Ptr(),
483 Context: &e.SrcRange,
484 })
485 return cty.UnknownVal(resultType), diags
486 }
487 if !condResult.IsKnown() {
488 return cty.UnknownVal(resultType), diags
489 }
490 condResult, err := convert.Convert(condResult, cty.Bool)
491 if err != nil {
492 diags = append(diags, &hcl.Diagnostic{
493 Severity: hcl.DiagError,
494 Summary: "Incorrect condition type",
495 Detail: fmt.Sprintf("The condition expression must be of type bool."),
496 Subject: e.Condition.Range().Ptr(),
497 Context: &e.SrcRange,
498 })
499 return cty.UnknownVal(resultType), diags
500 }
501
502 if condResult.True() {
503 diags = append(diags, trueDiags...)
504 if convs[0] != nil {
505 var err error
506 trueResult, err = convs[0](trueResult)
507 if err != nil {
508 // Unsafe conversion failed with the concrete result value
509 diags = append(diags, &hcl.Diagnostic{
510 Severity: hcl.DiagError,
511 Summary: "Inconsistent conditional result types",
512 Detail: fmt.Sprintf(
513 "The true result value has the wrong type: %s.",
514 err.Error(),
515 ),
516 Subject: e.TrueResult.Range().Ptr(),
517 Context: &e.SrcRange,
518 })
519 trueResult = cty.UnknownVal(resultType)
520 }
521 }
522 return trueResult, diags
523 } else {
524 diags = append(diags, falseDiags...)
525 if convs[1] != nil {
526 var err error
527 falseResult, err = convs[1](falseResult)
528 if err != nil {
529 // Unsafe conversion failed with the concrete result value
530 diags = append(diags, &hcl.Diagnostic{
531 Severity: hcl.DiagError,
532 Summary: "Inconsistent conditional result types",
533 Detail: fmt.Sprintf(
534 "The false result value has the wrong type: %s.",
535 err.Error(),
536 ),
537 Subject: e.TrueResult.Range().Ptr(),
538 Context: &e.SrcRange,
539 })
540 falseResult = cty.UnknownVal(resultType)
541 }
542 }
543 return falseResult, diags
544 }
545}
546
547func (e *ConditionalExpr) Range() hcl.Range {
548 return e.SrcRange
549}
550
551func (e *ConditionalExpr) StartRange() hcl.Range {
552 return e.Condition.StartRange()
553}
554
555type IndexExpr struct {
556 Collection Expression
557 Key Expression
558
559 SrcRange hcl.Range
560 OpenRange hcl.Range
561}
562
563func (e *IndexExpr) walkChildNodes(w internalWalkFunc) {
564 e.Collection = w(e.Collection).(Expression)
565 e.Key = w(e.Key).(Expression)
566}
567
568func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
569 var diags hcl.Diagnostics
570 coll, collDiags := e.Collection.Value(ctx)
571 key, keyDiags := e.Key.Value(ctx)
572 diags = append(diags, collDiags...)
573 diags = append(diags, keyDiags...)
574
575 return hcl.Index(coll, key, &e.SrcRange)
576}
577
578func (e *IndexExpr) Range() hcl.Range {
579 return e.SrcRange
580}
581
582func (e *IndexExpr) StartRange() hcl.Range {
583 return e.OpenRange
584}
585
586type TupleConsExpr struct {
587 Exprs []Expression
588
589 SrcRange hcl.Range
590 OpenRange hcl.Range
591}
592
593func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) {
594 for i, expr := range e.Exprs {
595 e.Exprs[i] = w(expr).(Expression)
596 }
597}
598
599func (e *TupleConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
600 var vals []cty.Value
601 var diags hcl.Diagnostics
602
603 vals = make([]cty.Value, len(e.Exprs))
604 for i, expr := range e.Exprs {
605 val, valDiags := expr.Value(ctx)
606 vals[i] = val
607 diags = append(diags, valDiags...)
608 }
609
610 return cty.TupleVal(vals), diags
611}
612
613func (e *TupleConsExpr) Range() hcl.Range {
614 return e.SrcRange
615}
616
617func (e *TupleConsExpr) StartRange() hcl.Range {
618 return e.OpenRange
619}
620
621// Implementation for hcl.ExprList
622func (e *TupleConsExpr) ExprList() []hcl.Expression {
623 ret := make([]hcl.Expression, len(e.Exprs))
624 for i, expr := range e.Exprs {
625 ret[i] = expr
626 }
627 return ret
628}
629
630type ObjectConsExpr struct {
631 Items []ObjectConsItem
632
633 SrcRange hcl.Range
634 OpenRange hcl.Range
635}
636
637type ObjectConsItem struct {
638 KeyExpr Expression
639 ValueExpr Expression
640}
641
642func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) {
643 for i, item := range e.Items {
644 e.Items[i].KeyExpr = w(item.KeyExpr).(Expression)
645 e.Items[i].ValueExpr = w(item.ValueExpr).(Expression)
646 }
647}
648
649func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
650 var vals map[string]cty.Value
651 var diags hcl.Diagnostics
652
653 // This will get set to true if we fail to produce any of our keys,
654 // either because they are actually unknown or if the evaluation produces
655 // errors. In all of these case we must return DynamicPseudoType because
656 // we're unable to know the full set of keys our object has, and thus
657 // we can't produce a complete value of the intended type.
658 //
659 // We still evaluate all of the item keys and values to make sure that we
660 // get as complete as possible a set of diagnostics.
661 known := true
662
663 vals = make(map[string]cty.Value, len(e.Items))
664 for _, item := range e.Items {
665 key, keyDiags := item.KeyExpr.Value(ctx)
666 diags = append(diags, keyDiags...)
667
668 val, valDiags := item.ValueExpr.Value(ctx)
669 diags = append(diags, valDiags...)
670
671 if keyDiags.HasErrors() {
672 known = false
673 continue
674 }
675
676 if key.IsNull() {
677 diags = append(diags, &hcl.Diagnostic{
678 Severity: hcl.DiagError,
679 Summary: "Null value as key",
680 Detail: "Can't use a null value as a key.",
681 Subject: item.ValueExpr.Range().Ptr(),
682 })
683 known = false
684 continue
685 }
686
687 var err error
688 key, err = convert.Convert(key, cty.String)
689 if err != nil {
690 diags = append(diags, &hcl.Diagnostic{
691 Severity: hcl.DiagError,
692 Summary: "Incorrect key type",
693 Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()),
694 Subject: item.ValueExpr.Range().Ptr(),
695 })
696 known = false
697 continue
698 }
699
700 if !key.IsKnown() {
701 known = false
702 continue
703 }
704
705 keyStr := key.AsString()
706
707 vals[keyStr] = val
708 }
709
710 if !known {
711 return cty.DynamicVal, diags
712 }
713
714 return cty.ObjectVal(vals), diags
715}
716
717func (e *ObjectConsExpr) Range() hcl.Range {
718 return e.SrcRange
719}
720
721func (e *ObjectConsExpr) StartRange() hcl.Range {
722 return e.OpenRange
723}
724
725// Implementation for hcl.ExprMap
726func (e *ObjectConsExpr) ExprMap() []hcl.KeyValuePair {
727 ret := make([]hcl.KeyValuePair, len(e.Items))
728 for i, item := range e.Items {
729 ret[i] = hcl.KeyValuePair{
730 Key: item.KeyExpr,
731 Value: item.ValueExpr,
732 }
733 }
734 return ret
735}
736
737// ObjectConsKeyExpr is a special wrapper used only for ObjectConsExpr keys,
738// which deals with the special case that a naked identifier in that position
739// must be interpreted as a literal string rather than evaluated directly.
740type ObjectConsKeyExpr struct {
741 Wrapped Expression
742}
743
744func (e *ObjectConsKeyExpr) literalName() string {
745 // This is our logic for deciding whether to behave like a literal string.
746 // We lean on our AbsTraversalForExpr implementation here, which already
747 // deals with some awkward cases like the expression being the result
748 // of the keywords "null", "true" and "false" which we'd want to interpret
749 // as keys here too.
750 return hcl.ExprAsKeyword(e.Wrapped)
751}
752
753func (e *ObjectConsKeyExpr) walkChildNodes(w internalWalkFunc) {
754 // We only treat our wrapped expression as a real expression if we're
755 // not going to interpret it as a literal.
756 if e.literalName() == "" {
757 e.Wrapped = w(e.Wrapped).(Expression)
758 }
759}
760
761func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
762 if ln := e.literalName(); ln != "" {
763 return cty.StringVal(ln), nil
764 }
765 return e.Wrapped.Value(ctx)
766}
767
768func (e *ObjectConsKeyExpr) Range() hcl.Range {
769 return e.Wrapped.Range()
770}
771
772func (e *ObjectConsKeyExpr) StartRange() hcl.Range {
773 return e.Wrapped.StartRange()
774}
775
776// Implementation for hcl.AbsTraversalForExpr.
777func (e *ObjectConsKeyExpr) AsTraversal() hcl.Traversal {
778 // We can produce a traversal only if our wrappee can.
779 st, diags := hcl.AbsTraversalForExpr(e.Wrapped)
780 if diags.HasErrors() {
781 return nil
782 }
783
784 return st
785}
786
787func (e *ObjectConsKeyExpr) UnwrapExpression() Expression {
788 return e.Wrapped
789}
790
791// ForExpr represents iteration constructs:
792//
793// tuple = [for i, v in list: upper(v) if i > 2]
794// object = {for k, v in map: k => upper(v)}
795// object_of_tuples = {for v in list: v.key: v...}
796type ForExpr struct {
797 KeyVar string // empty if ignoring the key
798 ValVar string
799
800 CollExpr Expression
801
802 KeyExpr Expression // nil when producing a tuple
803 ValExpr Expression
804 CondExpr Expression // null if no "if" clause is present
805
806 Group bool // set if the ellipsis is used on the value in an object for
807
808 SrcRange hcl.Range
809 OpenRange hcl.Range
810 CloseRange hcl.Range
811}
812
813func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
814 var diags hcl.Diagnostics
815
816 collVal, collDiags := e.CollExpr.Value(ctx)
817 diags = append(diags, collDiags...)
818
819 if collVal.IsNull() {
820 diags = append(diags, &hcl.Diagnostic{
821 Severity: hcl.DiagError,
822 Summary: "Iteration over null value",
823 Detail: "A null value cannot be used as the collection in a 'for' expression.",
824 Subject: e.CollExpr.Range().Ptr(),
825 Context: &e.SrcRange,
826 })
827 return cty.DynamicVal, diags
828 }
829 if collVal.Type() == cty.DynamicPseudoType {
830 return cty.DynamicVal, diags
831 }
832 if !collVal.CanIterateElements() {
833 diags = append(diags, &hcl.Diagnostic{
834 Severity: hcl.DiagError,
835 Summary: "Iteration over non-iterable value",
836 Detail: fmt.Sprintf(
837 "A value of type %s cannot be used as the collection in a 'for' expression.",
838 collVal.Type().FriendlyName(),
839 ),
840 Subject: e.CollExpr.Range().Ptr(),
841 Context: &e.SrcRange,
842 })
843 return cty.DynamicVal, diags
844 }
845 if !collVal.IsKnown() {
846 return cty.DynamicVal, diags
847 }
848
849 childCtx := ctx.NewChild()
850 childCtx.Variables = map[string]cty.Value{}
851
852 // Before we start we'll do an early check to see if any CondExpr we've
853 // been given is of the wrong type. This isn't 100% reliable (it may
854 // be DynamicVal until real values are given) but it should catch some
855 // straightforward cases and prevent a barrage of repeated errors.
856 if e.CondExpr != nil {
857 if e.KeyVar != "" {
858 childCtx.Variables[e.KeyVar] = cty.DynamicVal
859 }
860 childCtx.Variables[e.ValVar] = cty.DynamicVal
861
862 result, condDiags := e.CondExpr.Value(childCtx)
863 diags = append(diags, condDiags...)
864 if result.IsNull() {
865 diags = append(diags, &hcl.Diagnostic{
866 Severity: hcl.DiagError,
867 Summary: "Condition is null",
868 Detail: "The value of the 'if' clause must not be null.",
869 Subject: e.CondExpr.Range().Ptr(),
870 Context: &e.SrcRange,
871 })
872 return cty.DynamicVal, diags
873 }
874 _, err := convert.Convert(result, cty.Bool)
875 if err != nil {
876 diags = append(diags, &hcl.Diagnostic{
877 Severity: hcl.DiagError,
878 Summary: "Invalid 'for' condition",
879 Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()),
880 Subject: e.CondExpr.Range().Ptr(),
881 Context: &e.SrcRange,
882 })
883 return cty.DynamicVal, diags
884 }
885 if condDiags.HasErrors() {
886 return cty.DynamicVal, diags
887 }
888 }
889
890 if e.KeyExpr != nil {
891 // Producing an object
892 var vals map[string]cty.Value
893 var groupVals map[string][]cty.Value
894 if e.Group {
895 groupVals = map[string][]cty.Value{}
896 } else {
897 vals = map[string]cty.Value{}
898 }
899
900 it := collVal.ElementIterator()
901
902 known := true
903 for it.Next() {
904 k, v := it.Element()
905 if e.KeyVar != "" {
906 childCtx.Variables[e.KeyVar] = k
907 }
908 childCtx.Variables[e.ValVar] = v
909
910 if e.CondExpr != nil {
911 includeRaw, condDiags := e.CondExpr.Value(childCtx)
912 diags = append(diags, condDiags...)
913 if includeRaw.IsNull() {
914 if known {
915 diags = append(diags, &hcl.Diagnostic{
916 Severity: hcl.DiagError,
917 Summary: "Condition is null",
918 Detail: "The value of the 'if' clause must not be null.",
919 Subject: e.CondExpr.Range().Ptr(),
920 Context: &e.SrcRange,
921 })
922 }
923 known = false
924 continue
925 }
926 include, err := convert.Convert(includeRaw, cty.Bool)
927 if err != nil {
928 if known {
929 diags = append(diags, &hcl.Diagnostic{
930 Severity: hcl.DiagError,
931 Summary: "Invalid 'for' condition",
932 Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()),
933 Subject: e.CondExpr.Range().Ptr(),
934 Context: &e.SrcRange,
935 })
936 }
937 known = false
938 continue
939 }
940 if !include.IsKnown() {
941 known = false
942 continue
943 }
944
945 if include.False() {
946 // Skip this element
947 continue
948 }
949 }
950
951 keyRaw, keyDiags := e.KeyExpr.Value(childCtx)
952 diags = append(diags, keyDiags...)
953 if keyRaw.IsNull() {
954 if known {
955 diags = append(diags, &hcl.Diagnostic{
956 Severity: hcl.DiagError,
957 Summary: "Invalid object key",
958 Detail: "Key expression in 'for' expression must not produce a null value.",
959 Subject: e.KeyExpr.Range().Ptr(),
960 Context: &e.SrcRange,
961 })
962 }
963 known = false
964 continue
965 }
966 if !keyRaw.IsKnown() {
967 known = false
968 continue
969 }
970
971 key, err := convert.Convert(keyRaw, cty.String)
972 if err != nil {
973 if known {
974 diags = append(diags, &hcl.Diagnostic{
975 Severity: hcl.DiagError,
976 Summary: "Invalid object key",
977 Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()),
978 Subject: e.KeyExpr.Range().Ptr(),
979 Context: &e.SrcRange,
980 })
981 }
982 known = false
983 continue
984 }
985
986 val, valDiags := e.ValExpr.Value(childCtx)
987 diags = append(diags, valDiags...)
988
989 if e.Group {
990 k := key.AsString()
991 groupVals[k] = append(groupVals[k], val)
992 } else {
993 k := key.AsString()
994 if _, exists := vals[k]; exists {
995 diags = append(diags, &hcl.Diagnostic{
996 Severity: hcl.DiagError,
997 Summary: "Duplicate object key",
998 Detail: fmt.Sprintf(
999 "Two different items produced the key %q in this for expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.",
1000 k,
1001 ),
1002 Subject: e.KeyExpr.Range().Ptr(),
1003 Context: &e.SrcRange,
1004 })
1005 } else {
1006 vals[key.AsString()] = val
1007 }
1008 }
1009 }
1010
1011 if !known {
1012 return cty.DynamicVal, diags
1013 }
1014
1015 if e.Group {
1016 vals = map[string]cty.Value{}
1017 for k, gvs := range groupVals {
1018 vals[k] = cty.TupleVal(gvs)
1019 }
1020 }
1021
1022 return cty.ObjectVal(vals), diags
1023
1024 } else {
1025 // Producing a tuple
1026 vals := []cty.Value{}
1027
1028 it := collVal.ElementIterator()
1029
1030 known := true
1031 for it.Next() {
1032 k, v := it.Element()
1033 if e.KeyVar != "" {
1034 childCtx.Variables[e.KeyVar] = k
1035 }
1036 childCtx.Variables[e.ValVar] = v
1037
1038 if e.CondExpr != nil {
1039 includeRaw, condDiags := e.CondExpr.Value(childCtx)
1040 diags = append(diags, condDiags...)
1041 if includeRaw.IsNull() {
1042 if known {
1043 diags = append(diags, &hcl.Diagnostic{
1044 Severity: hcl.DiagError,
1045 Summary: "Condition is null",
1046 Detail: "The value of the 'if' clause must not be null.",
1047 Subject: e.CondExpr.Range().Ptr(),
1048 Context: &e.SrcRange,
1049 })
1050 }
1051 known = false
1052 continue
1053 }
1054 if !includeRaw.IsKnown() {
1055 // We will eventually return DynamicVal, but we'll continue
1056 // iterating in case there are other diagnostics to gather
1057 // for later elements.
1058 known = false
1059 continue
1060 }
1061
1062 include, err := convert.Convert(includeRaw, cty.Bool)
1063 if err != nil {
1064 if known {
1065 diags = append(diags, &hcl.Diagnostic{
1066 Severity: hcl.DiagError,
1067 Summary: "Invalid 'for' condition",
1068 Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()),
1069 Subject: e.CondExpr.Range().Ptr(),
1070 Context: &e.SrcRange,
1071 })
1072 }
1073 known = false
1074 continue
1075 }
1076
1077 if include.False() {
1078 // Skip this element
1079 continue
1080 }
1081 }
1082
1083 val, valDiags := e.ValExpr.Value(childCtx)
1084 diags = append(diags, valDiags...)
1085 vals = append(vals, val)
1086 }
1087
1088 if !known {
1089 return cty.DynamicVal, diags
1090 }
1091
1092 return cty.TupleVal(vals), diags
1093 }
1094}
1095
1096func (e *ForExpr) walkChildNodes(w internalWalkFunc) {
1097 e.CollExpr = w(e.CollExpr).(Expression)
1098
1099 scopeNames := map[string]struct{}{}
1100 if e.KeyVar != "" {
1101 scopeNames[e.KeyVar] = struct{}{}
1102 }
1103 if e.ValVar != "" {
1104 scopeNames[e.ValVar] = struct{}{}
1105 }
1106
1107 if e.KeyExpr != nil {
1108 w(ChildScope{
1109 LocalNames: scopeNames,
1110 Expr: &e.KeyExpr,
1111 })
1112 }
1113 w(ChildScope{
1114 LocalNames: scopeNames,
1115 Expr: &e.ValExpr,
1116 })
1117 if e.CondExpr != nil {
1118 w(ChildScope{
1119 LocalNames: scopeNames,
1120 Expr: &e.CondExpr,
1121 })
1122 }
1123}
1124
1125func (e *ForExpr) Range() hcl.Range {
1126 return e.SrcRange
1127}
1128
1129func (e *ForExpr) StartRange() hcl.Range {
1130 return e.OpenRange
1131}
1132
1133type SplatExpr struct {
1134 Source Expression
1135 Each Expression
1136 Item *AnonSymbolExpr
1137
1138 SrcRange hcl.Range
1139 MarkerRange hcl.Range
1140}
1141
1142func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1143 sourceVal, diags := e.Source.Value(ctx)
1144 if diags.HasErrors() {
1145 // We'll evaluate our "Each" expression here just to see if it
1146 // produces any more diagnostics we can report. Since we're not
1147 // assigning a value to our AnonSymbolExpr here it will return
1148 // DynamicVal, which should short-circuit any use of it.
1149 _, itemDiags := e.Item.Value(ctx)
1150 diags = append(diags, itemDiags...)
1151 return cty.DynamicVal, diags
1152 }
1153
1154 if sourceVal.IsNull() {
1155 diags = append(diags, &hcl.Diagnostic{
1156 Severity: hcl.DiagError,
1157 Summary: "Splat of null value",
1158 Detail: "Splat expressions (with the * symbol) cannot be applied to null values.",
1159 Subject: e.Source.Range().Ptr(),
1160 Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(),
1161 })
1162 return cty.DynamicVal, diags
1163 }
1164 if !sourceVal.IsKnown() {
1165 return cty.DynamicVal, diags
1166 }
1167
1168 // A "special power" of splat expressions is that they can be applied
1169 // both to tuples/lists and to other values, and in the latter case
1170 // the value will be treated as an implicit single-value list. We'll
1171 // deal with that here first.
1172 if !(sourceVal.Type().IsTupleType() || sourceVal.Type().IsListType()) {
1173 sourceVal = cty.ListVal([]cty.Value{sourceVal})
1174 }
1175
1176 vals := make([]cty.Value, 0, sourceVal.LengthInt())
1177 it := sourceVal.ElementIterator()
1178 if ctx == nil {
1179 // we need a context to use our AnonSymbolExpr, so we'll just
1180 // make an empty one here to use as a placeholder.
1181 ctx = ctx.NewChild()
1182 }
1183 isKnown := true
1184 for it.Next() {
1185 _, sourceItem := it.Element()
1186 e.Item.setValue(ctx, sourceItem)
1187 newItem, itemDiags := e.Each.Value(ctx)
1188 diags = append(diags, itemDiags...)
1189 if itemDiags.HasErrors() {
1190 isKnown = false
1191 }
1192 vals = append(vals, newItem)
1193 }
1194 e.Item.clearValue(ctx) // clean up our temporary value
1195
1196 if !isKnown {
1197 return cty.DynamicVal, diags
1198 }
1199
1200 return cty.TupleVal(vals), diags
1201}
1202
1203func (e *SplatExpr) walkChildNodes(w internalWalkFunc) {
1204 e.Source = w(e.Source).(Expression)
1205 e.Each = w(e.Each).(Expression)
1206}
1207
1208func (e *SplatExpr) Range() hcl.Range {
1209 return e.SrcRange
1210}
1211
1212func (e *SplatExpr) StartRange() hcl.Range {
1213 return e.MarkerRange
1214}
1215
1216// AnonSymbolExpr is used as a placeholder for a value in an expression that
1217// can be applied dynamically to any value at runtime.
1218//
1219// This is a rather odd, synthetic expression. It is used as part of the
1220// representation of splat expressions as a placeholder for the current item
1221// being visited in the splat evaluation.
1222//
1223// AnonSymbolExpr cannot be evaluated in isolation. If its Value is called
1224// directly then cty.DynamicVal will be returned. Instead, it is evaluated
1225// in terms of another node (i.e. a splat expression) which temporarily
1226// assigns it a value.
1227type AnonSymbolExpr struct {
1228 SrcRange hcl.Range
1229 values map[*hcl.EvalContext]cty.Value
1230}
1231
1232func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1233 if ctx == nil {
1234 return cty.DynamicVal, nil
1235 }
1236 val, exists := e.values[ctx]
1237 if !exists {
1238 return cty.DynamicVal, nil
1239 }
1240 return val, nil
1241}
1242
1243// setValue sets a temporary local value for the expression when evaluated
1244// in the given context, which must be non-nil.
1245func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) {
1246 if e.values == nil {
1247 e.values = make(map[*hcl.EvalContext]cty.Value)
1248 }
1249 if ctx == nil {
1250 panic("can't setValue for a nil EvalContext")
1251 }
1252 e.values[ctx] = val
1253}
1254
1255func (e *AnonSymbolExpr) clearValue(ctx *hcl.EvalContext) {
1256 if e.values == nil {
1257 return
1258 }
1259 if ctx == nil {
1260 panic("can't clearValue for a nil EvalContext")
1261 }
1262 delete(e.values, ctx)
1263}
1264
1265func (e *AnonSymbolExpr) walkChildNodes(w internalWalkFunc) {
1266 // AnonSymbolExpr is a leaf node in the tree
1267}
1268
1269func (e *AnonSymbolExpr) Range() hcl.Range {
1270 return e.SrcRange
1271}
1272
1273func (e *AnonSymbolExpr) StartRange() hcl.Range {
1274 return e.SrcRange
1275}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go
new file mode 100644
index 0000000..9a5da04
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go
@@ -0,0 +1,258 @@
1package hclsyntax
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/convert"
9 "github.com/zclconf/go-cty/cty/function"
10 "github.com/zclconf/go-cty/cty/function/stdlib"
11)
12
13type Operation struct {
14 Impl function.Function
15 Type cty.Type
16}
17
18var (
19 OpLogicalOr = &Operation{
20 Impl: stdlib.OrFunc,
21 Type: cty.Bool,
22 }
23 OpLogicalAnd = &Operation{
24 Impl: stdlib.AndFunc,
25 Type: cty.Bool,
26 }
27 OpLogicalNot = &Operation{
28 Impl: stdlib.NotFunc,
29 Type: cty.Bool,
30 }
31
32 OpEqual = &Operation{
33 Impl: stdlib.EqualFunc,
34 Type: cty.Bool,
35 }
36 OpNotEqual = &Operation{
37 Impl: stdlib.NotEqualFunc,
38 Type: cty.Bool,
39 }
40
41 OpGreaterThan = &Operation{
42 Impl: stdlib.GreaterThanFunc,
43 Type: cty.Bool,
44 }
45 OpGreaterThanOrEqual = &Operation{
46 Impl: stdlib.GreaterThanOrEqualToFunc,
47 Type: cty.Bool,
48 }
49 OpLessThan = &Operation{
50 Impl: stdlib.LessThanFunc,
51 Type: cty.Bool,
52 }
53 OpLessThanOrEqual = &Operation{
54 Impl: stdlib.LessThanOrEqualToFunc,
55 Type: cty.Bool,
56 }
57
58 OpAdd = &Operation{
59 Impl: stdlib.AddFunc,
60 Type: cty.Number,
61 }
62 OpSubtract = &Operation{
63 Impl: stdlib.SubtractFunc,
64 Type: cty.Number,
65 }
66 OpMultiply = &Operation{
67 Impl: stdlib.MultiplyFunc,
68 Type: cty.Number,
69 }
70 OpDivide = &Operation{
71 Impl: stdlib.DivideFunc,
72 Type: cty.Number,
73 }
74 OpModulo = &Operation{
75 Impl: stdlib.ModuloFunc,
76 Type: cty.Number,
77 }
78 OpNegate = &Operation{
79 Impl: stdlib.NegateFunc,
80 Type: cty.Number,
81 }
82)
83
84var binaryOps []map[TokenType]*Operation
85
86func init() {
87 // This operation table maps from the operator's token type
88 // to the AST operation type. All expressions produced from
89 // binary operators are BinaryOp nodes.
90 //
91 // Binary operator groups are listed in order of precedence, with
92 // the *lowest* precedence first. Operators within the same group
93 // have left-to-right associativity.
94 binaryOps = []map[TokenType]*Operation{
95 {
96 TokenOr: OpLogicalOr,
97 },
98 {
99 TokenAnd: OpLogicalAnd,
100 },
101 {
102 TokenEqualOp: OpEqual,
103 TokenNotEqual: OpNotEqual,
104 },
105 {
106 TokenGreaterThan: OpGreaterThan,
107 TokenGreaterThanEq: OpGreaterThanOrEqual,
108 TokenLessThan: OpLessThan,
109 TokenLessThanEq: OpLessThanOrEqual,
110 },
111 {
112 TokenPlus: OpAdd,
113 TokenMinus: OpSubtract,
114 },
115 {
116 TokenStar: OpMultiply,
117 TokenSlash: OpDivide,
118 TokenPercent: OpModulo,
119 },
120 }
121}
122
123type BinaryOpExpr struct {
124 LHS Expression
125 Op *Operation
126 RHS Expression
127
128 SrcRange hcl.Range
129}
130
131func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) {
132 e.LHS = w(e.LHS).(Expression)
133 e.RHS = w(e.RHS).(Expression)
134}
135
136func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
137 impl := e.Op.Impl // assumed to be a function taking exactly two arguments
138 params := impl.Params()
139 lhsParam := params[0]
140 rhsParam := params[1]
141
142 var diags hcl.Diagnostics
143
144 givenLHSVal, lhsDiags := e.LHS.Value(ctx)
145 givenRHSVal, rhsDiags := e.RHS.Value(ctx)
146 diags = append(diags, lhsDiags...)
147 diags = append(diags, rhsDiags...)
148
149 lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type)
150 if err != nil {
151 diags = append(diags, &hcl.Diagnostic{
152 Severity: hcl.DiagError,
153 Summary: "Invalid operand",
154 Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err),
155 Subject: e.LHS.Range().Ptr(),
156 Context: &e.SrcRange,
157 })
158 }
159 rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type)
160 if err != nil {
161 diags = append(diags, &hcl.Diagnostic{
162 Severity: hcl.DiagError,
163 Summary: "Invalid operand",
164 Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err),
165 Subject: e.RHS.Range().Ptr(),
166 Context: &e.SrcRange,
167 })
168 }
169
170 if diags.HasErrors() {
171 // Don't actually try the call if we have errors already, since the
172 // this will probably just produce a confusing duplicative diagnostic.
173 return cty.UnknownVal(e.Op.Type), diags
174 }
175
176 args := []cty.Value{lhsVal, rhsVal}
177 result, err := impl.Call(args)
178 if err != nil {
179 diags = append(diags, &hcl.Diagnostic{
180 // FIXME: This diagnostic is useless.
181 Severity: hcl.DiagError,
182 Summary: "Operation failed",
183 Detail: fmt.Sprintf("Error during operation: %s.", err),
184 Subject: &e.SrcRange,
185 })
186 return cty.UnknownVal(e.Op.Type), diags
187 }
188
189 return result, diags
190}
191
192func (e *BinaryOpExpr) Range() hcl.Range {
193 return e.SrcRange
194}
195
196func (e *BinaryOpExpr) StartRange() hcl.Range {
197 return e.LHS.StartRange()
198}
199
200type UnaryOpExpr struct {
201 Op *Operation
202 Val Expression
203
204 SrcRange hcl.Range
205 SymbolRange hcl.Range
206}
207
208func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) {
209 e.Val = w(e.Val).(Expression)
210}
211
212func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
213 impl := e.Op.Impl // assumed to be a function taking exactly one argument
214 params := impl.Params()
215 param := params[0]
216
217 givenVal, diags := e.Val.Value(ctx)
218
219 val, err := convert.Convert(givenVal, param.Type)
220 if err != nil {
221 diags = append(diags, &hcl.Diagnostic{
222 Severity: hcl.DiagError,
223 Summary: "Invalid operand",
224 Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err),
225 Subject: e.Val.Range().Ptr(),
226 Context: &e.SrcRange,
227 })
228 }
229
230 if diags.HasErrors() {
231 // Don't actually try the call if we have errors already, since the
232 // this will probably just produce a confusing duplicative diagnostic.
233 return cty.UnknownVal(e.Op.Type), diags
234 }
235
236 args := []cty.Value{val}
237 result, err := impl.Call(args)
238 if err != nil {
239 diags = append(diags, &hcl.Diagnostic{
240 // FIXME: This diagnostic is useless.
241 Severity: hcl.DiagError,
242 Summary: "Operation failed",
243 Detail: fmt.Sprintf("Error during operation: %s.", err),
244 Subject: &e.SrcRange,
245 })
246 return cty.UnknownVal(e.Op.Type), diags
247 }
248
249 return result, diags
250}
251
252func (e *UnaryOpExpr) Range() hcl.Range {
253 return e.SrcRange
254}
255
256func (e *UnaryOpExpr) StartRange() hcl.Range {
257 return e.SymbolRange
258}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go
new file mode 100644
index 0000000..a1c4727
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go
@@ -0,0 +1,192 @@
1package hclsyntax
2
3import (
4 "bytes"
5 "fmt"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/zclconf/go-cty/cty"
9 "github.com/zclconf/go-cty/cty/convert"
10)
11
12type TemplateExpr struct {
13 Parts []Expression
14
15 SrcRange hcl.Range
16}
17
18func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) {
19 for i, part := range e.Parts {
20 e.Parts[i] = w(part).(Expression)
21 }
22}
23
24func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
25 buf := &bytes.Buffer{}
26 var diags hcl.Diagnostics
27 isKnown := true
28
29 for _, part := range e.Parts {
30 partVal, partDiags := part.Value(ctx)
31 diags = append(diags, partDiags...)
32
33 if partVal.IsNull() {
34 diags = append(diags, &hcl.Diagnostic{
35 Severity: hcl.DiagError,
36 Summary: "Invalid template interpolation value",
37 Detail: fmt.Sprintf(
38 "The expression result is null. Cannot include a null value in a string template.",
39 ),
40 Subject: part.Range().Ptr(),
41 Context: &e.SrcRange,
42 })
43 continue
44 }
45
46 if !partVal.IsKnown() {
47 // If any part is unknown then the result as a whole must be
48 // unknown too. We'll keep on processing the rest of the parts
49 // anyway, because we want to still emit any diagnostics resulting
50 // from evaluating those.
51 isKnown = false
52 continue
53 }
54
55 strVal, err := convert.Convert(partVal, cty.String)
56 if err != nil {
57 diags = append(diags, &hcl.Diagnostic{
58 Severity: hcl.DiagError,
59 Summary: "Invalid template interpolation value",
60 Detail: fmt.Sprintf(
61 "Cannot include the given value in a string template: %s.",
62 err.Error(),
63 ),
64 Subject: part.Range().Ptr(),
65 Context: &e.SrcRange,
66 })
67 continue
68 }
69
70 buf.WriteString(strVal.AsString())
71 }
72
73 if !isKnown {
74 return cty.UnknownVal(cty.String), diags
75 }
76
77 return cty.StringVal(buf.String()), diags
78}
79
80func (e *TemplateExpr) Range() hcl.Range {
81 return e.SrcRange
82}
83
84func (e *TemplateExpr) StartRange() hcl.Range {
85 return e.Parts[0].StartRange()
86}
87
88// TemplateJoinExpr is used to convert tuples of strings produced by template
89// constructs (i.e. for loops) into flat strings, by converting the values
90// tos strings and joining them. This AST node is not used directly; it's
91// produced as part of the AST of a "for" loop in a template.
92type TemplateJoinExpr struct {
93 Tuple Expression
94}
95
96func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) {
97 e.Tuple = w(e.Tuple).(Expression)
98}
99
100func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
101 tuple, diags := e.Tuple.Value(ctx)
102
103 if tuple.IsNull() {
104 // This indicates a bug in the code that constructed the AST.
105 panic("TemplateJoinExpr got null tuple")
106 }
107 if tuple.Type() == cty.DynamicPseudoType {
108 return cty.UnknownVal(cty.String), diags
109 }
110 if !tuple.Type().IsTupleType() {
111 // This indicates a bug in the code that constructed the AST.
112 panic("TemplateJoinExpr got non-tuple tuple")
113 }
114 if !tuple.IsKnown() {
115 return cty.UnknownVal(cty.String), diags
116 }
117
118 buf := &bytes.Buffer{}
119 it := tuple.ElementIterator()
120 for it.Next() {
121 _, val := it.Element()
122
123 if val.IsNull() {
124 diags = append(diags, &hcl.Diagnostic{
125 Severity: hcl.DiagError,
126 Summary: "Invalid template interpolation value",
127 Detail: fmt.Sprintf(
128 "An iteration result is null. Cannot include a null value in a string template.",
129 ),
130 Subject: e.Range().Ptr(),
131 })
132 continue
133 }
134 if val.Type() == cty.DynamicPseudoType {
135 return cty.UnknownVal(cty.String), diags
136 }
137 strVal, err := convert.Convert(val, cty.String)
138 if err != nil {
139 diags = append(diags, &hcl.Diagnostic{
140 Severity: hcl.DiagError,
141 Summary: "Invalid template interpolation value",
142 Detail: fmt.Sprintf(
143 "Cannot include one of the interpolation results into the string template: %s.",
144 err.Error(),
145 ),
146 Subject: e.Range().Ptr(),
147 })
148 continue
149 }
150 if !val.IsKnown() {
151 return cty.UnknownVal(cty.String), diags
152 }
153
154 buf.WriteString(strVal.AsString())
155 }
156
157 return cty.StringVal(buf.String()), diags
158}
159
160func (e *TemplateJoinExpr) Range() hcl.Range {
161 return e.Tuple.Range()
162}
163
164func (e *TemplateJoinExpr) StartRange() hcl.Range {
165 return e.Tuple.StartRange()
166}
167
168// TemplateWrapExpr is used instead of a TemplateExpr when a template
169// consists _only_ of a single interpolation sequence. In that case, the
170// template's result is the single interpolation's result, verbatim with
171// no type conversions.
172type TemplateWrapExpr struct {
173 Wrapped Expression
174
175 SrcRange hcl.Range
176}
177
178func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) {
179 e.Wrapped = w(e.Wrapped).(Expression)
180}
181
182func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
183 return e.Wrapped.Value(ctx)
184}
185
186func (e *TemplateWrapExpr) Range() hcl.Range {
187 return e.SrcRange
188}
189
190func (e *TemplateWrapExpr) StartRange() hcl.Range {
191 return e.SrcRange
192}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go
new file mode 100644
index 0000000..9177092
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go
@@ -0,0 +1,76 @@
1package hclsyntax
2
3// Generated by expression_vars_get.go. DO NOT EDIT.
4// Run 'go generate' on this package to update the set of functions here.
5
6import (
7 "github.com/hashicorp/hcl2/hcl"
8)
9
10func (e *AnonSymbolExpr) Variables() []hcl.Traversal {
11 return Variables(e)
12}
13
14func (e *BinaryOpExpr) Variables() []hcl.Traversal {
15 return Variables(e)
16}
17
18func (e *ConditionalExpr) Variables() []hcl.Traversal {
19 return Variables(e)
20}
21
22func (e *ForExpr) Variables() []hcl.Traversal {
23 return Variables(e)
24}
25
26func (e *FunctionCallExpr) Variables() []hcl.Traversal {
27 return Variables(e)
28}
29
30func (e *IndexExpr) Variables() []hcl.Traversal {
31 return Variables(e)
32}
33
34func (e *LiteralValueExpr) Variables() []hcl.Traversal {
35 return Variables(e)
36}
37
38func (e *ObjectConsExpr) Variables() []hcl.Traversal {
39 return Variables(e)
40}
41
42func (e *ObjectConsKeyExpr) Variables() []hcl.Traversal {
43 return Variables(e)
44}
45
46func (e *RelativeTraversalExpr) Variables() []hcl.Traversal {
47 return Variables(e)
48}
49
50func (e *ScopeTraversalExpr) Variables() []hcl.Traversal {
51 return Variables(e)
52}
53
54func (e *SplatExpr) Variables() []hcl.Traversal {
55 return Variables(e)
56}
57
58func (e *TemplateExpr) Variables() []hcl.Traversal {
59 return Variables(e)
60}
61
62func (e *TemplateJoinExpr) Variables() []hcl.Traversal {
63 return Variables(e)
64}
65
66func (e *TemplateWrapExpr) Variables() []hcl.Traversal {
67 return Variables(e)
68}
69
70func (e *TupleConsExpr) Variables() []hcl.Traversal {
71 return Variables(e)
72}
73
74func (e *UnaryOpExpr) Variables() []hcl.Traversal {
75 return Variables(e)
76}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go
new file mode 100644
index 0000000..88f1980
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go
@@ -0,0 +1,99 @@
1// This is a 'go generate'-oriented program for producing the "Variables"
2// method on every Expression implementation found within this package.
3// All expressions share the same implementation for this method, which
4// just wraps the package-level function "Variables" and uses an AST walk
5// to do its work.
6
7// +build ignore
8
9package main
10
11import (
12 "fmt"
13 "go/ast"
14 "go/parser"
15 "go/token"
16 "os"
17 "sort"
18)
19
20func main() {
21 fs := token.NewFileSet()
22 pkgs, err := parser.ParseDir(fs, ".", nil, 0)
23 if err != nil {
24 fmt.Fprintf(os.Stderr, "error while parsing: %s\n", err)
25 os.Exit(1)
26 }
27 pkg := pkgs["hclsyntax"]
28
29 // Walk all the files and collect the receivers of any "Value" methods
30 // that look like they are trying to implement Expression.
31 var recvs []string
32 for _, f := range pkg.Files {
33 for _, decl := range f.Decls {
34 fd, ok := decl.(*ast.FuncDecl)
35 if !ok {
36 continue
37 }
38 if fd.Name.Name != "Value" {
39 continue
40 }
41 results := fd.Type.Results.List
42 if len(results) != 2 {
43 continue
44 }
45 valResult := fd.Type.Results.List[0].Type.(*ast.SelectorExpr).X.(*ast.Ident)
46 diagsResult := fd.Type.Results.List[1].Type.(*ast.SelectorExpr).X.(*ast.Ident)
47
48 if valResult.Name != "cty" && diagsResult.Name != "hcl" {
49 continue
50 }
51
52 // If we have a method called Value and it returns something in
53 // "cty" followed by something in "hcl" then that's specific enough
54 // for now, even though this is not 100% exact as a correct
55 // implementation of Value.
56
57 recvTy := fd.Recv.List[0].Type
58
59 switch rtt := recvTy.(type) {
60 case *ast.StarExpr:
61 name := rtt.X.(*ast.Ident).Name
62 recvs = append(recvs, fmt.Sprintf("*%s", name))
63 default:
64 fmt.Fprintf(os.Stderr, "don't know what to do with a %T receiver\n", recvTy)
65 }
66
67 }
68 }
69
70 sort.Strings(recvs)
71
72 of, err := os.OpenFile("expression_vars.go", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
73 if err != nil {
74 fmt.Fprintf(os.Stderr, "failed to open output file: %s\n", err)
75 os.Exit(1)
76 }
77
78 fmt.Fprint(of, outputPreamble)
79 for _, recv := range recvs {
80 fmt.Fprintf(of, outputMethodFmt, recv)
81 }
82 fmt.Fprint(of, "\n")
83
84}
85
86const outputPreamble = `package hclsyntax
87
88// Generated by expression_vars_get.go. DO NOT EDIT.
89// Run 'go generate' on this package to update the set of functions here.
90
91import (
92 "github.com/hashicorp/hcl2/hcl"
93)`
94
95const outputMethodFmt = `
96
97func (e %s) Variables() []hcl.Traversal {
98 return Variables(e)
99}`
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go
new file mode 100644
index 0000000..490c025
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go
@@ -0,0 +1,20 @@
1package hclsyntax
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// File is the top-level object resulting from parsing a configuration file.
8type File struct {
9 Body *Body
10 Bytes []byte
11}
12
13func (f *File) AsHCLFile() *hcl.File {
14 return &hcl.File{
15 Body: f.Body,
16 Bytes: f.Bytes,
17
18 // TODO: The Nav object, once we have an implementation of it
19 }
20}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go
new file mode 100644
index 0000000..841656a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go
@@ -0,0 +1,9 @@
1package hclsyntax
2
3//go:generate go run expression_vars_gen.go
4//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt -m UnicodeDerived -p ID_Start,ID_Continue -o unicode_derived.rl
5//go:generate ragel -Z scan_tokens.rl
6//go:generate gofmt -w scan_tokens.go
7//go:generate ragel -Z scan_string_lit.rl
8//go:generate gofmt -w scan_string_lit.go
9//go:generate stringer -type TokenType -output token_type_string.go
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go
new file mode 100644
index 0000000..eef8b96
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go
@@ -0,0 +1,21 @@
1package hclsyntax
2
3import (
4 "bytes"
5)
6
7type Keyword []byte
8
9var forKeyword = Keyword([]byte{'f', 'o', 'r'})
10var inKeyword = Keyword([]byte{'i', 'n'})
11var ifKeyword = Keyword([]byte{'i', 'f'})
12var elseKeyword = Keyword([]byte{'e', 'l', 's', 'e'})
13var endifKeyword = Keyword([]byte{'e', 'n', 'd', 'i', 'f'})
14var endforKeyword = Keyword([]byte{'e', 'n', 'd', 'f', 'o', 'r'})
15
16func (kw Keyword) TokenMatches(token Token) bool {
17 if token.Type != TokenIdent {
18 return false
19 }
20 return bytes.Equal([]byte(kw), token.Bytes)
21}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go
new file mode 100644
index 0000000..4d41b6b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go
@@ -0,0 +1,41 @@
1package hclsyntax
2
3import (
4 "bytes"
5 "fmt"
6)
7
8type navigation struct {
9 root *Body
10}
11
12// Implementation of hcled.ContextString
13func (n navigation) ContextString(offset int) string {
14 // We will walk our top-level blocks until we find one that contains
15 // the given offset, and then construct a representation of the header
16 // of the block.
17
18 var block *Block
19 for _, candidate := range n.root.Blocks {
20 if candidate.Range().ContainsOffset(offset) {
21 block = candidate
22 break
23 }
24 }
25
26 if block == nil {
27 return ""
28 }
29
30 if len(block.Labels) == 0 {
31 // Easy case!
32 return block.Type
33 }
34
35 buf := &bytes.Buffer{}
36 buf.WriteString(block.Type)
37 for _, label := range block.Labels {
38 fmt.Fprintf(buf, " %q", label)
39 }
40 return buf.String()
41}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go
new file mode 100644
index 0000000..fd426d4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go
@@ -0,0 +1,22 @@
1package hclsyntax
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// Node is the abstract type that every AST node implements.
8//
9// This is a closed interface, so it cannot be implemented from outside of
10// this package.
11type Node interface {
12 // This is the mechanism by which the public-facing walk functions
13 // are implemented. Implementations should call the given function
14 // for each child node and then replace that node with its return value.
15 // The return value might just be the same node, for non-transforming
16 // walks.
17 walkChildNodes(w internalWalkFunc)
18
19 Range() hcl.Range
20}
21
22type internalWalkFunc func(Node) Node
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go
new file mode 100644
index 0000000..002858f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go
@@ -0,0 +1,1836 @@
1package hclsyntax
2
3import (
4 "bytes"
5 "fmt"
6 "strconv"
7 "unicode/utf8"
8
9 "github.com/apparentlymart/go-textseg/textseg"
10 "github.com/hashicorp/hcl2/hcl"
11 "github.com/zclconf/go-cty/cty"
12 "github.com/zclconf/go-cty/cty/convert"
13)
14
15type parser struct {
16 *peeker
17
18 // set to true if any recovery is attempted. The parser can use this
19 // to attempt to reduce error noise by suppressing "bad token" errors
20 // in recovery mode, assuming that the recovery heuristics have failed
21 // in this case and left the peeker in a wrong place.
22 recovery bool
23}
24
25func (p *parser) ParseBody(end TokenType) (*Body, hcl.Diagnostics) {
26 attrs := Attributes{}
27 blocks := Blocks{}
28 var diags hcl.Diagnostics
29
30 startRange := p.PrevRange()
31 var endRange hcl.Range
32
33Token:
34 for {
35 next := p.Peek()
36 if next.Type == end {
37 endRange = p.NextRange()
38 p.Read()
39 break Token
40 }
41
42 switch next.Type {
43 case TokenNewline:
44 p.Read()
45 continue
46 case TokenIdent:
47 item, itemDiags := p.ParseBodyItem()
48 diags = append(diags, itemDiags...)
49 switch titem := item.(type) {
50 case *Block:
51 blocks = append(blocks, titem)
52 case *Attribute:
53 if existing, exists := attrs[titem.Name]; exists {
54 diags = append(diags, &hcl.Diagnostic{
55 Severity: hcl.DiagError,
56 Summary: "Attribute redefined",
57 Detail: fmt.Sprintf(
58 "The attribute %q was already defined at %s. Each attribute may be defined only once.",
59 titem.Name, existing.NameRange.String(),
60 ),
61 Subject: &titem.NameRange,
62 })
63 } else {
64 attrs[titem.Name] = titem
65 }
66 default:
67 // This should never happen for valid input, but may if a
68 // syntax error was detected in ParseBodyItem that prevented
69 // it from even producing a partially-broken item. In that
70 // case, it would've left at least one error in the diagnostics
71 // slice we already dealt with above.
72 //
73 // We'll assume ParseBodyItem attempted recovery to leave
74 // us in a reasonable position to try parsing the next item.
75 continue
76 }
77 default:
78 bad := p.Read()
79 if !p.recovery {
80 if bad.Type == TokenOQuote {
81 diags = append(diags, &hcl.Diagnostic{
82 Severity: hcl.DiagError,
83 Summary: "Invalid attribute name",
84 Detail: "Attribute names must not be quoted.",
85 Subject: &bad.Range,
86 })
87 } else {
88 diags = append(diags, &hcl.Diagnostic{
89 Severity: hcl.DiagError,
90 Summary: "Attribute or block definition required",
91 Detail: "An attribute or block definition is required here.",
92 Subject: &bad.Range,
93 })
94 }
95 }
96 endRange = p.PrevRange() // arbitrary, but somewhere inside the body means better diagnostics
97
98 p.recover(end) // attempt to recover to the token after the end of this body
99 break Token
100 }
101 }
102
103 return &Body{
104 Attributes: attrs,
105 Blocks: blocks,
106
107 SrcRange: hcl.RangeBetween(startRange, endRange),
108 EndRange: hcl.Range{
109 Filename: endRange.Filename,
110 Start: endRange.End,
111 End: endRange.End,
112 },
113 }, diags
114}
115
116func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) {
117 ident := p.Read()
118 if ident.Type != TokenIdent {
119 p.recoverAfterBodyItem()
120 return nil, hcl.Diagnostics{
121 {
122 Severity: hcl.DiagError,
123 Summary: "Attribute or block definition required",
124 Detail: "An attribute or block definition is required here.",
125 Subject: &ident.Range,
126 },
127 }
128 }
129
130 next := p.Peek()
131
132 switch next.Type {
133 case TokenEqual:
134 return p.finishParsingBodyAttribute(ident)
135 case TokenOQuote, TokenOBrace, TokenIdent:
136 return p.finishParsingBodyBlock(ident)
137 default:
138 p.recoverAfterBodyItem()
139 return nil, hcl.Diagnostics{
140 {
141 Severity: hcl.DiagError,
142 Summary: "Attribute or block definition required",
143 Detail: "An attribute or block definition is required here. To define an attribute, use the equals sign \"=\" to introduce the attribute value.",
144 Subject: &ident.Range,
145 },
146 }
147 }
148
149 return nil, nil
150}
151
152func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics) {
153 eqTok := p.Read() // eat equals token
154 if eqTok.Type != TokenEqual {
155 // should never happen if caller behaves
156 panic("finishParsingBodyAttribute called with next not equals")
157 }
158
159 var endRange hcl.Range
160
161 expr, diags := p.ParseExpression()
162 if p.recovery && diags.HasErrors() {
163 // recovery within expressions tends to be tricky, so we've probably
164 // landed somewhere weird. We'll try to reset to the start of a body
165 // item so parsing can continue.
166 endRange = p.PrevRange()
167 p.recoverAfterBodyItem()
168 } else {
169 end := p.Peek()
170 if end.Type != TokenNewline && end.Type != TokenEOF {
171 if !p.recovery {
172 diags = append(diags, &hcl.Diagnostic{
173 Severity: hcl.DiagError,
174 Summary: "Missing newline after attribute definition",
175 Detail: "An attribute definition must end with a newline.",
176 Subject: &end.Range,
177 Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(),
178 })
179 }
180 endRange = p.PrevRange()
181 p.recoverAfterBodyItem()
182 } else {
183 endRange = p.PrevRange()
184 p.Read() // eat newline
185 }
186 }
187
188 return &Attribute{
189 Name: string(ident.Bytes),
190 Expr: expr,
191
192 SrcRange: hcl.RangeBetween(ident.Range, endRange),
193 NameRange: ident.Range,
194 EqualsRange: eqTok.Range,
195 }, diags
196}
197
198func (p *parser) finishParsingBodyBlock(ident Token) (Node, hcl.Diagnostics) {
199 var blockType = string(ident.Bytes)
200 var diags hcl.Diagnostics
201 var labels []string
202 var labelRanges []hcl.Range
203
204 var oBrace Token
205
206Token:
207 for {
208 tok := p.Peek()
209
210 switch tok.Type {
211
212 case TokenOBrace:
213 oBrace = p.Read()
214 break Token
215
216 case TokenOQuote:
217 label, labelRange, labelDiags := p.parseQuotedStringLiteral()
218 diags = append(diags, labelDiags...)
219 labels = append(labels, label)
220 labelRanges = append(labelRanges, labelRange)
221 if labelDiags.HasErrors() {
222 p.recoverAfterBodyItem()
223 return &Block{
224 Type: blockType,
225 Labels: labels,
226 Body: nil,
227
228 TypeRange: ident.Range,
229 LabelRanges: labelRanges,
230 OpenBraceRange: ident.Range, // placeholder
231 CloseBraceRange: ident.Range, // placeholder
232 }, diags
233 }
234
235 case TokenIdent:
236 tok = p.Read() // eat token
237 label, labelRange := string(tok.Bytes), tok.Range
238 labels = append(labels, label)
239 labelRanges = append(labelRanges, labelRange)
240
241 default:
242 switch tok.Type {
243 case TokenEqual:
244 diags = append(diags, &hcl.Diagnostic{
245 Severity: hcl.DiagError,
246 Summary: "Invalid block definition",
247 Detail: "The equals sign \"=\" indicates an attribute definition, and must not be used when defining a block.",
248 Subject: &tok.Range,
249 Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(),
250 })
251 case TokenNewline:
252 diags = append(diags, &hcl.Diagnostic{
253 Severity: hcl.DiagError,
254 Summary: "Invalid block definition",
255 Detail: "A block definition must have block content delimited by \"{\" and \"}\", starting on the same line as the block header.",
256 Subject: &tok.Range,
257 Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(),
258 })
259 default:
260 if !p.recovery {
261 diags = append(diags, &hcl.Diagnostic{
262 Severity: hcl.DiagError,
263 Summary: "Invalid block definition",
264 Detail: "Either a quoted string block label or an opening brace (\"{\") is expected here.",
265 Subject: &tok.Range,
266 Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(),
267 })
268 }
269 }
270
271 p.recoverAfterBodyItem()
272
273 return &Block{
274 Type: blockType,
275 Labels: labels,
276 Body: nil,
277
278 TypeRange: ident.Range,
279 LabelRanges: labelRanges,
280 OpenBraceRange: ident.Range, // placeholder
281 CloseBraceRange: ident.Range, // placeholder
282 }, diags
283 }
284 }
285
286 // Once we fall out here, the peeker is pointed just after our opening
287 // brace, so we can begin our nested body parsing.
288 body, bodyDiags := p.ParseBody(TokenCBrace)
289 diags = append(diags, bodyDiags...)
290 cBraceRange := p.PrevRange()
291
292 eol := p.Peek()
293 if eol.Type == TokenNewline || eol.Type == TokenEOF {
294 p.Read() // eat newline
295 } else {
296 if !p.recovery {
297 diags = append(diags, &hcl.Diagnostic{
298 Severity: hcl.DiagError,
299 Summary: "Missing newline after block definition",
300 Detail: "A block definition must end with a newline.",
301 Subject: &eol.Range,
302 Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(),
303 })
304 }
305 p.recoverAfterBodyItem()
306 }
307
308 return &Block{
309 Type: blockType,
310 Labels: labels,
311 Body: body,
312
313 TypeRange: ident.Range,
314 LabelRanges: labelRanges,
315 OpenBraceRange: oBrace.Range,
316 CloseBraceRange: cBraceRange,
317 }, diags
318}
319
320func (p *parser) ParseExpression() (Expression, hcl.Diagnostics) {
321 return p.parseTernaryConditional()
322}
323
324func (p *parser) parseTernaryConditional() (Expression, hcl.Diagnostics) {
325 // The ternary conditional operator (.. ? .. : ..) behaves somewhat
326 // like a binary operator except that the "symbol" is itself
327 // an expression enclosed in two punctuation characters.
328 // The middle expression is parsed as if the ? and : symbols
329 // were parentheses. The "rhs" (the "false expression") is then
330 // treated right-associatively so it behaves similarly to the
331 // middle in terms of precedence.
332
333 startRange := p.NextRange()
334 var condExpr, trueExpr, falseExpr Expression
335 var diags hcl.Diagnostics
336
337 condExpr, condDiags := p.parseBinaryOps(binaryOps)
338 diags = append(diags, condDiags...)
339 if p.recovery && condDiags.HasErrors() {
340 return condExpr, diags
341 }
342
343 questionMark := p.Peek()
344 if questionMark.Type != TokenQuestion {
345 return condExpr, diags
346 }
347
348 p.Read() // eat question mark
349
350 trueExpr, trueDiags := p.ParseExpression()
351 diags = append(diags, trueDiags...)
352 if p.recovery && trueDiags.HasErrors() {
353 return condExpr, diags
354 }
355
356 colon := p.Peek()
357 if colon.Type != TokenColon {
358 diags = append(diags, &hcl.Diagnostic{
359 Severity: hcl.DiagError,
360 Summary: "Missing false expression in conditional",
361 Detail: "The conditional operator (...?...:...) requires a false expression, delimited by a colon.",
362 Subject: &colon.Range,
363 Context: hcl.RangeBetween(startRange, colon.Range).Ptr(),
364 })
365 return condExpr, diags
366 }
367
368 p.Read() // eat colon
369
370 falseExpr, falseDiags := p.ParseExpression()
371 diags = append(diags, falseDiags...)
372 if p.recovery && falseDiags.HasErrors() {
373 return condExpr, diags
374 }
375
376 return &ConditionalExpr{
377 Condition: condExpr,
378 TrueResult: trueExpr,
379 FalseResult: falseExpr,
380
381 SrcRange: hcl.RangeBetween(startRange, falseExpr.Range()),
382 }, diags
383}
384
385// parseBinaryOps calls itself recursively to work through all of the
386// operator precedence groups, and then eventually calls parseExpressionTerm
387// for each operand.
388func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl.Diagnostics) {
389 if len(ops) == 0 {
390 // We've run out of operators, so now we'll just try to parse a term.
391 return p.parseExpressionWithTraversals()
392 }
393
394 thisLevel := ops[0]
395 remaining := ops[1:]
396
397 var lhs, rhs Expression
398 var operation *Operation
399 var diags hcl.Diagnostics
400
401 // Parse a term that might be the first operand of a binary
402 // operation or it might just be a standalone term.
403 // We won't know until we've parsed it and can look ahead
404 // to see if there's an operator token for this level.
405 lhs, lhsDiags := p.parseBinaryOps(remaining)
406 diags = append(diags, lhsDiags...)
407 if p.recovery && lhsDiags.HasErrors() {
408 return lhs, diags
409 }
410
411 // We'll keep eating up operators until we run out, so that operators
412 // with the same precedence will combine in a left-associative manner:
413 // a+b+c => (a+b)+c, not a+(b+c)
414 //
415 // Should we later want to have right-associative operators, a way
416 // to achieve that would be to call back up to ParseExpression here
417 // instead of iteratively parsing only the remaining operators.
418 for {
419 next := p.Peek()
420 var newOp *Operation
421 var ok bool
422 if newOp, ok = thisLevel[next.Type]; !ok {
423 break
424 }
425
426 // Are we extending an expression started on the previous iteration?
427 if operation != nil {
428 lhs = &BinaryOpExpr{
429 LHS: lhs,
430 Op: operation,
431 RHS: rhs,
432
433 SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()),
434 }
435 }
436
437 operation = newOp
438 p.Read() // eat operator token
439 var rhsDiags hcl.Diagnostics
440 rhs, rhsDiags = p.parseBinaryOps(remaining)
441 diags = append(diags, rhsDiags...)
442 if p.recovery && rhsDiags.HasErrors() {
443 return lhs, diags
444 }
445 }
446
447 if operation == nil {
448 return lhs, diags
449 }
450
451 return &BinaryOpExpr{
452 LHS: lhs,
453 Op: operation,
454 RHS: rhs,
455
456 SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()),
457 }, diags
458}
459
460func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) {
461 term, diags := p.parseExpressionTerm()
462 ret := term
463
464Traversal:
465 for {
466 next := p.Peek()
467
468 switch next.Type {
469 case TokenDot:
470 // Attribute access or splat
471 dot := p.Read()
472 attrTok := p.Peek()
473
474 switch attrTok.Type {
475 case TokenIdent:
476 attrTok = p.Read() // eat token
477 name := string(attrTok.Bytes)
478 rng := hcl.RangeBetween(dot.Range, attrTok.Range)
479 step := hcl.TraverseAttr{
480 Name: name,
481 SrcRange: rng,
482 }
483
484 ret = makeRelativeTraversal(ret, step, rng)
485
486 case TokenNumberLit:
487 // This is a weird form we inherited from HIL, allowing numbers
488 // to be used as attributes as a weird way of writing [n].
489 // This was never actually a first-class thing in HIL, but
490 // HIL tolerated sequences like .0. in its variable names and
491 // calling applications like Terraform exploited that to
492 // introduce indexing syntax where none existed.
493 numTok := p.Read() // eat token
494 attrTok = numTok
495
496 // This syntax is ambiguous if multiple indices are used in
497 // succession, like foo.0.1.baz: that actually parses as
498 // a fractional number 0.1. Since we're only supporting this
499 // syntax for compatibility with legacy Terraform
500 // configurations, and Terraform does not tend to have lists
501 // of lists, we'll choose to reject that here with a helpful
502 // error message, rather than failing later because the index
503 // isn't a whole number.
504 if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 {
505 first := numTok.Bytes[:dotIdx]
506 second := numTok.Bytes[dotIdx+1:]
507 diags = append(diags, &hcl.Diagnostic{
508 Severity: hcl.DiagError,
509 Summary: "Invalid legacy index syntax",
510 Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax instead, like [%s][%s].", first, second),
511 Subject: &attrTok.Range,
512 })
513 rng := hcl.RangeBetween(dot.Range, numTok.Range)
514 step := hcl.TraverseIndex{
515 Key: cty.DynamicVal,
516 SrcRange: rng,
517 }
518 ret = makeRelativeTraversal(ret, step, rng)
519 break
520 }
521
522 numVal, numDiags := p.numberLitValue(numTok)
523 diags = append(diags, numDiags...)
524
525 rng := hcl.RangeBetween(dot.Range, numTok.Range)
526 step := hcl.TraverseIndex{
527 Key: numVal,
528 SrcRange: rng,
529 }
530
531 ret = makeRelativeTraversal(ret, step, rng)
532
533 case TokenStar:
534 // "Attribute-only" splat expression.
535 // (This is a kinda weird construct inherited from HIL, which
536 // behaves a bit like a [*] splat except that it is only able
537 // to do attribute traversals into each of its elements,
538 // whereas foo[*] can support _any_ traversal.
539 marker := p.Read() // eat star
540 trav := make(hcl.Traversal, 0, 1)
541 var firstRange, lastRange hcl.Range
542 firstRange = p.NextRange()
543 for p.Peek().Type == TokenDot {
544 dot := p.Read()
545
546 if p.Peek().Type == TokenNumberLit {
547 // Continuing the "weird stuff inherited from HIL"
548 // theme, we also allow numbers as attribute names
549 // inside splats and interpret them as indexing
550 // into a list, for expressions like:
551 // foo.bar.*.baz.0.foo
552 numTok := p.Read()
553
554 // Weird special case if the user writes something
555 // like foo.bar.*.baz.0.0.foo, where 0.0 parses
556 // as a number.
557 if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 {
558 first := numTok.Bytes[:dotIdx]
559 second := numTok.Bytes[dotIdx+1:]
560 diags = append(diags, &hcl.Diagnostic{
561 Severity: hcl.DiagError,
562 Summary: "Invalid legacy index syntax",
563 Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax with a full splat expression [*] instead, like [%s][%s].", first, second),
564 Subject: &attrTok.Range,
565 })
566 trav = append(trav, hcl.TraverseIndex{
567 Key: cty.DynamicVal,
568 SrcRange: hcl.RangeBetween(dot.Range, numTok.Range),
569 })
570 lastRange = numTok.Range
571 continue
572 }
573
574 numVal, numDiags := p.numberLitValue(numTok)
575 diags = append(diags, numDiags...)
576 trav = append(trav, hcl.TraverseIndex{
577 Key: numVal,
578 SrcRange: hcl.RangeBetween(dot.Range, numTok.Range),
579 })
580 lastRange = numTok.Range
581 continue
582 }
583
584 if p.Peek().Type != TokenIdent {
585 if !p.recovery {
586 if p.Peek().Type == TokenStar {
587 diags = append(diags, &hcl.Diagnostic{
588 Severity: hcl.DiagError,
589 Summary: "Nested splat expression not allowed",
590 Detail: "A splat expression (*) cannot be used inside another attribute-only splat expression.",
591 Subject: p.Peek().Range.Ptr(),
592 })
593 } else {
594 diags = append(diags, &hcl.Diagnostic{
595 Severity: hcl.DiagError,
596 Summary: "Invalid attribute name",
597 Detail: "An attribute name is required after a dot.",
598 Subject: &attrTok.Range,
599 })
600 }
601 }
602 p.setRecovery()
603 continue Traversal
604 }
605
606 attrTok := p.Read()
607 trav = append(trav, hcl.TraverseAttr{
608 Name: string(attrTok.Bytes),
609 SrcRange: hcl.RangeBetween(dot.Range, attrTok.Range),
610 })
611 lastRange = attrTok.Range
612 }
613
614 itemExpr := &AnonSymbolExpr{
615 SrcRange: hcl.RangeBetween(dot.Range, marker.Range),
616 }
617 var travExpr Expression
618 if len(trav) == 0 {
619 travExpr = itemExpr
620 } else {
621 travExpr = &RelativeTraversalExpr{
622 Source: itemExpr,
623 Traversal: trav,
624 SrcRange: hcl.RangeBetween(firstRange, lastRange),
625 }
626 }
627
628 ret = &SplatExpr{
629 Source: ret,
630 Each: travExpr,
631 Item: itemExpr,
632
633 SrcRange: hcl.RangeBetween(dot.Range, lastRange),
634 MarkerRange: hcl.RangeBetween(dot.Range, marker.Range),
635 }
636
637 default:
638 diags = append(diags, &hcl.Diagnostic{
639 Severity: hcl.DiagError,
640 Summary: "Invalid attribute name",
641 Detail: "An attribute name is required after a dot.",
642 Subject: &attrTok.Range,
643 })
644 // This leaves the peeker in a bad place, so following items
645 // will probably be misparsed until we hit something that
646 // allows us to re-sync.
647 //
648 // We will probably need to do something better here eventually
649 // in order to support autocomplete triggered by typing a
650 // period.
651 p.setRecovery()
652 }
653
654 case TokenOBrack:
655 // Indexing of a collection.
656 // This may or may not be a hcl.Traverser, depending on whether
657 // the key value is something constant.
658
659 open := p.Read()
660 // TODO: If we have a TokenStar inside our brackets, parse as
661 // a Splat expression: foo[*].baz[0].
662 var close Token
663 p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets
664 keyExpr, keyDiags := p.ParseExpression()
665 diags = append(diags, keyDiags...)
666 if p.recovery && keyDiags.HasErrors() {
667 close = p.recover(TokenCBrack)
668 } else {
669 close = p.Read()
670 if close.Type != TokenCBrack && !p.recovery {
671 diags = append(diags, &hcl.Diagnostic{
672 Severity: hcl.DiagError,
673 Summary: "Missing close bracket on index",
674 Detail: "The index operator must end with a closing bracket (\"]\").",
675 Subject: &close.Range,
676 })
677 close = p.recover(TokenCBrack)
678 }
679 }
680 p.PopIncludeNewlines()
681
682 if lit, isLit := keyExpr.(*LiteralValueExpr); isLit {
683 litKey, _ := lit.Value(nil)
684 rng := hcl.RangeBetween(open.Range, close.Range)
685 step := hcl.TraverseIndex{
686 Key: litKey,
687 SrcRange: rng,
688 }
689 ret = makeRelativeTraversal(ret, step, rng)
690 } else {
691 rng := hcl.RangeBetween(open.Range, close.Range)
692 ret = &IndexExpr{
693 Collection: ret,
694 Key: keyExpr,
695
696 SrcRange: rng,
697 OpenRange: open.Range,
698 }
699 }
700
701 default:
702 break Traversal
703 }
704 }
705
706 return ret, diags
707}
708
709// makeRelativeTraversal takes an expression and a traverser and returns
710// a traversal expression that combines the two. If the given expression
711// is already a traversal, it is extended in place (mutating it) and
712// returned. If it isn't, a new RelativeTraversalExpr is created and returned.
713func makeRelativeTraversal(expr Expression, next hcl.Traverser, rng hcl.Range) Expression {
714 switch texpr := expr.(type) {
715 case *ScopeTraversalExpr:
716 texpr.Traversal = append(texpr.Traversal, next)
717 texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng)
718 return texpr
719 case *RelativeTraversalExpr:
720 texpr.Traversal = append(texpr.Traversal, next)
721 texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng)
722 return texpr
723 default:
724 return &RelativeTraversalExpr{
725 Source: expr,
726 Traversal: hcl.Traversal{next},
727 SrcRange: rng,
728 }
729 }
730}
731
732func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) {
733 start := p.Peek()
734
735 switch start.Type {
736 case TokenOParen:
737 p.Read() // eat open paren
738
739 p.PushIncludeNewlines(false)
740
741 expr, diags := p.ParseExpression()
742 if diags.HasErrors() {
743 // attempt to place the peeker after our closing paren
744 // before we return, so that the next parser has some
745 // chance of finding a valid expression.
746 p.recover(TokenCParen)
747 p.PopIncludeNewlines()
748 return expr, diags
749 }
750
751 close := p.Peek()
752 if close.Type != TokenCParen {
753 diags = append(diags, &hcl.Diagnostic{
754 Severity: hcl.DiagError,
755 Summary: "Unbalanced parentheses",
756 Detail: "Expected a closing parenthesis to terminate the expression.",
757 Subject: &close.Range,
758 Context: hcl.RangeBetween(start.Range, close.Range).Ptr(),
759 })
760 p.setRecovery()
761 }
762
763 p.Read() // eat closing paren
764 p.PopIncludeNewlines()
765
766 return expr, diags
767
768 case TokenNumberLit:
769 tok := p.Read() // eat number token
770
771 numVal, diags := p.numberLitValue(tok)
772 return &LiteralValueExpr{
773 Val: numVal,
774 SrcRange: tok.Range,
775 }, diags
776
777 case TokenIdent:
778 tok := p.Read() // eat identifier token
779
780 if p.Peek().Type == TokenOParen {
781 return p.finishParsingFunctionCall(tok)
782 }
783
784 name := string(tok.Bytes)
785 switch name {
786 case "true":
787 return &LiteralValueExpr{
788 Val: cty.True,
789 SrcRange: tok.Range,
790 }, nil
791 case "false":
792 return &LiteralValueExpr{
793 Val: cty.False,
794 SrcRange: tok.Range,
795 }, nil
796 case "null":
797 return &LiteralValueExpr{
798 Val: cty.NullVal(cty.DynamicPseudoType),
799 SrcRange: tok.Range,
800 }, nil
801 default:
802 return &ScopeTraversalExpr{
803 Traversal: hcl.Traversal{
804 hcl.TraverseRoot{
805 Name: name,
806 SrcRange: tok.Range,
807 },
808 },
809 SrcRange: tok.Range,
810 }, nil
811 }
812
813 case TokenOQuote, TokenOHeredoc:
814 open := p.Read() // eat opening marker
815 closer := p.oppositeBracket(open.Type)
816 exprs, passthru, _, diags := p.parseTemplateInner(closer)
817
818 closeRange := p.PrevRange()
819
820 if passthru {
821 if len(exprs) != 1 {
822 panic("passthru set with len(exprs) != 1")
823 }
824 return &TemplateWrapExpr{
825 Wrapped: exprs[0],
826 SrcRange: hcl.RangeBetween(open.Range, closeRange),
827 }, diags
828 }
829
830 return &TemplateExpr{
831 Parts: exprs,
832 SrcRange: hcl.RangeBetween(open.Range, closeRange),
833 }, diags
834
835 case TokenMinus:
836 tok := p.Read() // eat minus token
837
838 // Important to use parseExpressionWithTraversals rather than parseExpression
839 // here, otherwise we can capture a following binary expression into
840 // our negation.
841 // e.g. -46+5 should parse as (-46)+5, not -(46+5)
842 operand, diags := p.parseExpressionWithTraversals()
843 return &UnaryOpExpr{
844 Op: OpNegate,
845 Val: operand,
846
847 SrcRange: hcl.RangeBetween(tok.Range, operand.Range()),
848 SymbolRange: tok.Range,
849 }, diags
850
851 case TokenBang:
852 tok := p.Read() // eat bang token
853
854 // Important to use parseExpressionWithTraversals rather than parseExpression
855 // here, otherwise we can capture a following binary expression into
856 // our negation.
857 operand, diags := p.parseExpressionWithTraversals()
858 return &UnaryOpExpr{
859 Op: OpLogicalNot,
860 Val: operand,
861
862 SrcRange: hcl.RangeBetween(tok.Range, operand.Range()),
863 SymbolRange: tok.Range,
864 }, diags
865
866 case TokenOBrack:
867 return p.parseTupleCons()
868
869 case TokenOBrace:
870 return p.parseObjectCons()
871
872 default:
873 var diags hcl.Diagnostics
874 if !p.recovery {
875 diags = append(diags, &hcl.Diagnostic{
876 Severity: hcl.DiagError,
877 Summary: "Invalid expression",
878 Detail: "Expected the start of an expression, but found an invalid expression token.",
879 Subject: &start.Range,
880 })
881 }
882 p.setRecovery()
883
884 // Return a placeholder so that the AST is still structurally sound
885 // even in the presence of parse errors.
886 return &LiteralValueExpr{
887 Val: cty.DynamicVal,
888 SrcRange: start.Range,
889 }, diags
890 }
891}
892
893func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) {
894 // We'll lean on the cty converter to do the conversion, to ensure that
895 // the behavior is the same as what would happen if converting a
896 // non-literal string to a number.
897 numStrVal := cty.StringVal(string(tok.Bytes))
898 numVal, err := convert.Convert(numStrVal, cty.Number)
899 if err != nil {
900 ret := cty.UnknownVal(cty.Number)
901 return ret, hcl.Diagnostics{
902 {
903 Severity: hcl.DiagError,
904 Summary: "Invalid number literal",
905 // FIXME: not a very good error message, but convert only
906 // gives us "a number is required", so not much help either.
907 Detail: "Failed to recognize the value of this number literal.",
908 Subject: &tok.Range,
909 },
910 }
911 }
912 return numVal, nil
913}
914
915// finishParsingFunctionCall parses a function call assuming that the function
916// name was already read, and so the peeker should be pointing at the opening
917// parenthesis after the name.
918func (p *parser) finishParsingFunctionCall(name Token) (Expression, hcl.Diagnostics) {
919 openTok := p.Read()
920 if openTok.Type != TokenOParen {
921 // should never happen if callers behave
922 panic("finishParsingFunctionCall called with non-parenthesis as next token")
923 }
924
925 var args []Expression
926 var diags hcl.Diagnostics
927 var expandFinal bool
928 var closeTok Token
929
930 // Arbitrary newlines are allowed inside the function call parentheses.
931 p.PushIncludeNewlines(false)
932
933Token:
934 for {
935 tok := p.Peek()
936
937 if tok.Type == TokenCParen {
938 closeTok = p.Read() // eat closing paren
939 break Token
940 }
941
942 arg, argDiags := p.ParseExpression()
943 args = append(args, arg)
944 diags = append(diags, argDiags...)
945 if p.recovery && argDiags.HasErrors() {
946 // if there was a parse error in the argument then we've
947 // probably been left in a weird place in the token stream,
948 // so we'll bail out with a partial argument list.
949 p.recover(TokenCParen)
950 break Token
951 }
952
953 sep := p.Read()
954 if sep.Type == TokenCParen {
955 closeTok = sep
956 break Token
957 }
958
959 if sep.Type == TokenEllipsis {
960 expandFinal = true
961
962 if p.Peek().Type != TokenCParen {
963 if !p.recovery {
964 diags = append(diags, &hcl.Diagnostic{
965 Severity: hcl.DiagError,
966 Summary: "Missing closing parenthesis",
967 Detail: "An expanded function argument (with ...) must be immediately followed by closing parentheses.",
968 Subject: &sep.Range,
969 Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(),
970 })
971 }
972 closeTok = p.recover(TokenCParen)
973 } else {
974 closeTok = p.Read() // eat closing paren
975 }
976 break Token
977 }
978
979 if sep.Type != TokenComma {
980 diags = append(diags, &hcl.Diagnostic{
981 Severity: hcl.DiagError,
982 Summary: "Missing argument separator",
983 Detail: "A comma is required to separate each function argument from the next.",
984 Subject: &sep.Range,
985 Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(),
986 })
987 closeTok = p.recover(TokenCParen)
988 break Token
989 }
990
991 if p.Peek().Type == TokenCParen {
992 // A trailing comma after the last argument gets us in here.
993 closeTok = p.Read() // eat closing paren
994 break Token
995 }
996
997 }
998
999 p.PopIncludeNewlines()
1000
1001 return &FunctionCallExpr{
1002 Name: string(name.Bytes),
1003 Args: args,
1004
1005 ExpandFinal: expandFinal,
1006
1007 NameRange: name.Range,
1008 OpenParenRange: openTok.Range,
1009 CloseParenRange: closeTok.Range,
1010 }, diags
1011}
1012
1013func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) {
1014 open := p.Read()
1015 if open.Type != TokenOBrack {
1016 // Should never happen if callers are behaving
1017 panic("parseTupleCons called without peeker pointing to open bracket")
1018 }
1019
1020 p.PushIncludeNewlines(false)
1021 defer p.PopIncludeNewlines()
1022
1023 if forKeyword.TokenMatches(p.Peek()) {
1024 return p.finishParsingForExpr(open)
1025 }
1026
1027 var close Token
1028
1029 var diags hcl.Diagnostics
1030 var exprs []Expression
1031
1032 for {
1033 next := p.Peek()
1034 if next.Type == TokenCBrack {
1035 close = p.Read() // eat closer
1036 break
1037 }
1038
1039 expr, exprDiags := p.ParseExpression()
1040 exprs = append(exprs, expr)
1041 diags = append(diags, exprDiags...)
1042
1043 if p.recovery && exprDiags.HasErrors() {
1044 // If expression parsing failed then we are probably in a strange
1045 // place in the token stream, so we'll bail out and try to reset
1046 // to after our closing bracket to allow parsing to continue.
1047 close = p.recover(TokenCBrack)
1048 break
1049 }
1050
1051 next = p.Peek()
1052 if next.Type == TokenCBrack {
1053 close = p.Read() // eat closer
1054 break
1055 }
1056
1057 if next.Type != TokenComma {
1058 if !p.recovery {
1059 diags = append(diags, &hcl.Diagnostic{
1060 Severity: hcl.DiagError,
1061 Summary: "Missing item separator",
1062 Detail: "Expected a comma to mark the beginning of the next item.",
1063 Subject: &next.Range,
1064 Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
1065 })
1066 }
1067 close = p.recover(TokenCBrack)
1068 break
1069 }
1070
1071 p.Read() // eat comma
1072
1073 }
1074
1075 return &TupleConsExpr{
1076 Exprs: exprs,
1077
1078 SrcRange: hcl.RangeBetween(open.Range, close.Range),
1079 OpenRange: open.Range,
1080 }, diags
1081}
1082
1083func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
1084 open := p.Read()
1085 if open.Type != TokenOBrace {
1086 // Should never happen if callers are behaving
1087 panic("parseObjectCons called without peeker pointing to open brace")
1088 }
1089
1090 p.PushIncludeNewlines(true)
1091 defer p.PopIncludeNewlines()
1092
1093 if forKeyword.TokenMatches(p.Peek()) {
1094 return p.finishParsingForExpr(open)
1095 }
1096
1097 var close Token
1098
1099 var diags hcl.Diagnostics
1100 var items []ObjectConsItem
1101
1102 for {
1103 next := p.Peek()
1104 if next.Type == TokenNewline {
1105 p.Read() // eat newline
1106 continue
1107 }
1108
1109 if next.Type == TokenCBrace {
1110 close = p.Read() // eat closer
1111 break
1112 }
1113
1114 var key Expression
1115 var keyDiags hcl.Diagnostics
1116 key, keyDiags = p.ParseExpression()
1117 diags = append(diags, keyDiags...)
1118
1119 if p.recovery && keyDiags.HasErrors() {
1120 // If expression parsing failed then we are probably in a strange
1121 // place in the token stream, so we'll bail out and try to reset
1122 // to after our closing brace to allow parsing to continue.
1123 close = p.recover(TokenCBrace)
1124 break
1125 }
1126
1127 // We wrap up the key expression in a special wrapper that deals
1128 // with our special case that naked identifiers as object keys
1129 // are interpreted as literal strings.
1130 key = &ObjectConsKeyExpr{Wrapped: key}
1131
1132 next = p.Peek()
1133 if next.Type != TokenEqual && next.Type != TokenColon {
1134 if !p.recovery {
1135 if next.Type == TokenNewline || next.Type == TokenComma {
1136 diags = append(diags, &hcl.Diagnostic{
1137 Severity: hcl.DiagError,
1138 Summary: "Missing item value",
1139 Detail: "Expected an item value, introduced by an equals sign (\"=\").",
1140 Subject: &next.Range,
1141 Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
1142 })
1143 } else {
1144 diags = append(diags, &hcl.Diagnostic{
1145 Severity: hcl.DiagError,
1146 Summary: "Missing key/value separator",
1147 Detail: "Expected an equals sign (\"=\") to mark the beginning of the item value.",
1148 Subject: &next.Range,
1149 Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
1150 })
1151 }
1152 }
1153 close = p.recover(TokenCBrace)
1154 break
1155 }
1156
1157 p.Read() // eat equals sign or colon
1158
1159 value, valueDiags := p.ParseExpression()
1160 diags = append(diags, valueDiags...)
1161
1162 if p.recovery && valueDiags.HasErrors() {
1163 // If expression parsing failed then we are probably in a strange
1164 // place in the token stream, so we'll bail out and try to reset
1165 // to after our closing brace to allow parsing to continue.
1166 close = p.recover(TokenCBrace)
1167 break
1168 }
1169
1170 items = append(items, ObjectConsItem{
1171 KeyExpr: key,
1172 ValueExpr: value,
1173 })
1174
1175 next = p.Peek()
1176 if next.Type == TokenCBrace {
1177 close = p.Read() // eat closer
1178 break
1179 }
1180
1181 if next.Type != TokenComma && next.Type != TokenNewline {
1182 if !p.recovery {
1183 diags = append(diags, &hcl.Diagnostic{
1184 Severity: hcl.DiagError,
1185 Summary: "Missing item separator",
1186 Detail: "Expected a newline or comma to mark the beginning of the next item.",
1187 Subject: &next.Range,
1188 Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
1189 })
1190 }
1191 close = p.recover(TokenCBrace)
1192 break
1193 }
1194
1195 p.Read() // eat comma or newline
1196
1197 }
1198
1199 return &ObjectConsExpr{
1200 Items: items,
1201
1202 SrcRange: hcl.RangeBetween(open.Range, close.Range),
1203 OpenRange: open.Range,
1204 }, diags
1205}
1206
1207func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) {
1208 introducer := p.Read()
1209 if !forKeyword.TokenMatches(introducer) {
1210 // Should never happen if callers are behaving
1211 panic("finishParsingForExpr called without peeker pointing to 'for' identifier")
1212 }
1213
1214 var makeObj bool
1215 var closeType TokenType
1216 switch open.Type {
1217 case TokenOBrace:
1218 makeObj = true
1219 closeType = TokenCBrace
1220 case TokenOBrack:
1221 makeObj = false // making a tuple
1222 closeType = TokenCBrack
1223 default:
1224 // Should never happen if callers are behaving
1225 panic("finishParsingForExpr called with invalid open token")
1226 }
1227
1228 var diags hcl.Diagnostics
1229 var keyName, valName string
1230
1231 if p.Peek().Type != TokenIdent {
1232 if !p.recovery {
1233 diags = append(diags, &hcl.Diagnostic{
1234 Severity: hcl.DiagError,
1235 Summary: "Invalid 'for' expression",
1236 Detail: "For expression requires variable name after 'for'.",
1237 Subject: p.Peek().Range.Ptr(),
1238 Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
1239 })
1240 }
1241 close := p.recover(closeType)
1242 return &LiteralValueExpr{
1243 Val: cty.DynamicVal,
1244 SrcRange: hcl.RangeBetween(open.Range, close.Range),
1245 }, diags
1246 }
1247
1248 valName = string(p.Read().Bytes)
1249
1250 if p.Peek().Type == TokenComma {
1251 // What we just read was actually the key, then.
1252 keyName = valName
1253 p.Read() // eat comma
1254
1255 if p.Peek().Type != TokenIdent {
1256 if !p.recovery {
1257 diags = append(diags, &hcl.Diagnostic{
1258 Severity: hcl.DiagError,
1259 Summary: "Invalid 'for' expression",
1260 Detail: "For expression requires value variable name after comma.",
1261 Subject: p.Peek().Range.Ptr(),
1262 Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
1263 })
1264 }
1265 close := p.recover(closeType)
1266 return &LiteralValueExpr{
1267 Val: cty.DynamicVal,
1268 SrcRange: hcl.RangeBetween(open.Range, close.Range),
1269 }, diags
1270 }
1271
1272 valName = string(p.Read().Bytes)
1273 }
1274
1275 if !inKeyword.TokenMatches(p.Peek()) {
1276 if !p.recovery {
1277 diags = append(diags, &hcl.Diagnostic{
1278 Severity: hcl.DiagError,
1279 Summary: "Invalid 'for' expression",
1280 Detail: "For expression requires 'in' keyword after names.",
1281 Subject: p.Peek().Range.Ptr(),
1282 Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
1283 })
1284 }
1285 close := p.recover(closeType)
1286 return &LiteralValueExpr{
1287 Val: cty.DynamicVal,
1288 SrcRange: hcl.RangeBetween(open.Range, close.Range),
1289 }, diags
1290 }
1291 p.Read() // eat 'in' keyword
1292
1293 collExpr, collDiags := p.ParseExpression()
1294 diags = append(diags, collDiags...)
1295 if p.recovery && collDiags.HasErrors() {
1296 close := p.recover(closeType)
1297 return &LiteralValueExpr{
1298 Val: cty.DynamicVal,
1299 SrcRange: hcl.RangeBetween(open.Range, close.Range),
1300 }, diags
1301 }
1302
1303 if p.Peek().Type != TokenColon {
1304 if !p.recovery {
1305 diags = append(diags, &hcl.Diagnostic{
1306 Severity: hcl.DiagError,
1307 Summary: "Invalid 'for' expression",
1308 Detail: "For expression requires colon after collection expression.",
1309 Subject: p.Peek().Range.Ptr(),
1310 Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
1311 })
1312 }
1313 close := p.recover(closeType)
1314 return &LiteralValueExpr{
1315 Val: cty.DynamicVal,
1316 SrcRange: hcl.RangeBetween(open.Range, close.Range),
1317 }, diags
1318 }
1319 p.Read() // eat colon
1320
1321 var keyExpr, valExpr Expression
1322 var keyDiags, valDiags hcl.Diagnostics
1323 valExpr, valDiags = p.ParseExpression()
1324 if p.Peek().Type == TokenFatArrow {
1325 // What we just parsed was actually keyExpr
1326 p.Read() // eat the fat arrow
1327 keyExpr, keyDiags = valExpr, valDiags
1328
1329 valExpr, valDiags = p.ParseExpression()
1330 }
1331 diags = append(diags, keyDiags...)
1332 diags = append(diags, valDiags...)
1333 if p.recovery && (keyDiags.HasErrors() || valDiags.HasErrors()) {
1334 close := p.recover(closeType)
1335 return &LiteralValueExpr{
1336 Val: cty.DynamicVal,
1337 SrcRange: hcl.RangeBetween(open.Range, close.Range),
1338 }, diags
1339 }
1340
1341 group := false
1342 var ellipsis Token
1343 if p.Peek().Type == TokenEllipsis {
1344 ellipsis = p.Read()
1345 group = true
1346 }
1347
1348 var condExpr Expression
1349 var condDiags hcl.Diagnostics
1350 if ifKeyword.TokenMatches(p.Peek()) {
1351 p.Read() // eat "if"
1352 condExpr, condDiags = p.ParseExpression()
1353 diags = append(diags, condDiags...)
1354 if p.recovery && condDiags.HasErrors() {
1355 close := p.recover(p.oppositeBracket(open.Type))
1356 return &LiteralValueExpr{
1357 Val: cty.DynamicVal,
1358 SrcRange: hcl.RangeBetween(open.Range, close.Range),
1359 }, diags
1360 }
1361 }
1362
1363 var close Token
1364 if p.Peek().Type == closeType {
1365 close = p.Read()
1366 } else {
1367 if !p.recovery {
1368 diags = append(diags, &hcl.Diagnostic{
1369 Severity: hcl.DiagError,
1370 Summary: "Invalid 'for' expression",
1371 Detail: "Extra characters after the end of the 'for' expression.",
1372 Subject: p.Peek().Range.Ptr(),
1373 Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
1374 })
1375 }
1376 close = p.recover(closeType)
1377 }
1378
1379 if !makeObj {
1380 if keyExpr != nil {
1381 diags = append(diags, &hcl.Diagnostic{
1382 Severity: hcl.DiagError,
1383 Summary: "Invalid 'for' expression",
1384 Detail: "Key expression is not valid when building a tuple.",
1385 Subject: keyExpr.Range().Ptr(),
1386 Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
1387 })
1388 }
1389
1390 if group {
1391 diags = append(diags, &hcl.Diagnostic{
1392 Severity: hcl.DiagError,
1393 Summary: "Invalid 'for' expression",
1394 Detail: "Grouping ellipsis (...) cannot be used when building a tuple.",
1395 Subject: &ellipsis.Range,
1396 Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
1397 })
1398 }
1399 } else {
1400 if keyExpr == nil {
1401 diags = append(diags, &hcl.Diagnostic{
1402 Severity: hcl.DiagError,
1403 Summary: "Invalid 'for' expression",
1404 Detail: "Key expression is required when building an object.",
1405 Subject: valExpr.Range().Ptr(),
1406 Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
1407 })
1408 }
1409 }
1410
1411 return &ForExpr{
1412 KeyVar: keyName,
1413 ValVar: valName,
1414 CollExpr: collExpr,
1415 KeyExpr: keyExpr,
1416 ValExpr: valExpr,
1417 CondExpr: condExpr,
1418 Group: group,
1419
1420 SrcRange: hcl.RangeBetween(open.Range, close.Range),
1421 OpenRange: open.Range,
1422 CloseRange: close.Range,
1423 }, diags
1424}
1425
1426// parseQuotedStringLiteral is a helper for parsing quoted strings that
1427// aren't allowed to contain any interpolations, such as block labels.
1428func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) {
1429 oQuote := p.Read()
1430 if oQuote.Type != TokenOQuote {
1431 return "", oQuote.Range, hcl.Diagnostics{
1432 {
1433 Severity: hcl.DiagError,
1434 Summary: "Invalid string literal",
1435 Detail: "A quoted string is required here.",
1436 Subject: &oQuote.Range,
1437 },
1438 }
1439 }
1440
1441 var diags hcl.Diagnostics
1442 ret := &bytes.Buffer{}
1443 var cQuote Token
1444
1445Token:
1446 for {
1447 tok := p.Read()
1448 switch tok.Type {
1449
1450 case TokenCQuote:
1451 cQuote = tok
1452 break Token
1453
1454 case TokenQuotedLit:
1455 s, sDiags := p.decodeStringLit(tok)
1456 diags = append(diags, sDiags...)
1457 ret.WriteString(s)
1458
1459 case TokenTemplateControl, TokenTemplateInterp:
1460 which := "$"
1461 if tok.Type == TokenTemplateControl {
1462 which = "!"
1463 }
1464
1465 diags = append(diags, &hcl.Diagnostic{
1466 Severity: hcl.DiagError,
1467 Summary: "Invalid string literal",
1468 Detail: fmt.Sprintf(
1469 "Template sequences are not allowed in this string. To include a literal %q, double it (as \"%s%s\") to escape it.",
1470 which, which, which,
1471 ),
1472 Subject: &tok.Range,
1473 Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(),
1474 })
1475 p.recover(TokenTemplateSeqEnd)
1476
1477 case TokenEOF:
1478 diags = append(diags, &hcl.Diagnostic{
1479 Severity: hcl.DiagError,
1480 Summary: "Unterminated string literal",
1481 Detail: "Unable to find the closing quote mark before the end of the file.",
1482 Subject: &tok.Range,
1483 Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(),
1484 })
1485 break Token
1486
1487 default:
1488 // Should never happen, as long as the scanner is behaving itself
1489 diags = append(diags, &hcl.Diagnostic{
1490 Severity: hcl.DiagError,
1491 Summary: "Invalid string literal",
1492 Detail: "This item is not valid in a string literal.",
1493 Subject: &tok.Range,
1494 Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(),
1495 })
1496 p.recover(TokenOQuote)
1497 break Token
1498
1499 }
1500
1501 }
1502
1503 return ret.String(), hcl.RangeBetween(oQuote.Range, cQuote.Range), diags
1504}
1505
1506// decodeStringLit processes the given token, which must be either a
1507// TokenQuotedLit or a TokenStringLit, returning the string resulting from
1508// resolving any escape sequences.
1509//
1510// If any error diagnostics are returned, the returned string may be incomplete
1511// or otherwise invalid.
1512func (p *parser) decodeStringLit(tok Token) (string, hcl.Diagnostics) {
1513 var quoted bool
1514 switch tok.Type {
1515 case TokenQuotedLit:
1516 quoted = true
1517 case TokenStringLit:
1518 quoted = false
1519 default:
1520 panic("decodeQuotedLit can only be used with TokenStringLit and TokenQuotedLit tokens")
1521 }
1522 var diags hcl.Diagnostics
1523
1524 ret := make([]byte, 0, len(tok.Bytes))
1525 slices := scanStringLit(tok.Bytes, quoted)
1526
1527 // We will mutate rng constantly as we walk through our token slices below.
1528 // Any diagnostics must take a copy of this rng rather than simply pointing
1529 // to it, e.g. by using rng.Ptr() rather than &rng.
1530 rng := tok.Range
1531 rng.End = rng.Start
1532
1533Slices:
1534 for _, slice := range slices {
1535 if len(slice) == 0 {
1536 continue
1537 }
1538
1539 // Advance the start of our range to where the previous token ended
1540 rng.Start = rng.End
1541
1542 // Advance the end of our range to after our token.
1543 b := slice
1544 for len(b) > 0 {
1545 adv, ch, _ := textseg.ScanGraphemeClusters(b, true)
1546 rng.End.Byte += adv
1547 switch ch[0] {
1548 case '\r', '\n':
1549 rng.End.Line++
1550 rng.End.Column = 1
1551 default:
1552 rng.End.Column++
1553 }
1554 b = b[adv:]
1555 }
1556
1557 TokenType:
1558 switch slice[0] {
1559 case '\\':
1560 if !quoted {
1561 // If we're not in quoted mode then just treat this token as
1562 // normal. (Slices can still start with backslash even if we're
1563 // not specifically looking for backslash sequences.)
1564 break TokenType
1565 }
1566 if len(slice) < 2 {
1567 diags = append(diags, &hcl.Diagnostic{
1568 Severity: hcl.DiagError,
1569 Summary: "Invalid escape sequence",
1570 Detail: "Backslash must be followed by an escape sequence selector character.",
1571 Subject: rng.Ptr(),
1572 })
1573 break TokenType
1574 }
1575
1576 switch slice[1] {
1577
1578 case 'n':
1579 ret = append(ret, '\n')
1580 continue Slices
1581 case 'r':
1582 ret = append(ret, '\r')
1583 continue Slices
1584 case 't':
1585 ret = append(ret, '\t')
1586 continue Slices
1587 case '"':
1588 ret = append(ret, '"')
1589 continue Slices
1590 case '\\':
1591 ret = append(ret, '\\')
1592 continue Slices
1593 case 'u', 'U':
1594 if slice[1] == 'u' && len(slice) != 6 {
1595 diags = append(diags, &hcl.Diagnostic{
1596 Severity: hcl.DiagError,
1597 Summary: "Invalid escape sequence",
1598 Detail: "The \\u escape sequence must be followed by four hexadecimal digits.",
1599 Subject: rng.Ptr(),
1600 })
1601 break TokenType
1602 } else if slice[1] == 'U' && len(slice) != 10 {
1603 diags = append(diags, &hcl.Diagnostic{
1604 Severity: hcl.DiagError,
1605 Summary: "Invalid escape sequence",
1606 Detail: "The \\U escape sequence must be followed by eight hexadecimal digits.",
1607 Subject: rng.Ptr(),
1608 })
1609 break TokenType
1610 }
1611
1612 numHex := string(slice[2:])
1613 num, err := strconv.ParseUint(numHex, 16, 32)
1614 if err != nil {
1615 // Should never happen because the scanner won't match
1616 // a sequence of digits that isn't valid.
1617 panic(err)
1618 }
1619
1620 r := rune(num)
1621 l := utf8.RuneLen(r)
1622 if l == -1 {
1623 diags = append(diags, &hcl.Diagnostic{
1624 Severity: hcl.DiagError,
1625 Summary: "Invalid escape sequence",
1626 Detail: fmt.Sprintf("Cannot encode character U+%04x in UTF-8.", num),
1627 Subject: rng.Ptr(),
1628 })
1629 break TokenType
1630 }
1631 for i := 0; i < l; i++ {
1632 ret = append(ret, 0)
1633 }
1634 rb := ret[len(ret)-l:]
1635 utf8.EncodeRune(rb, r)
1636
1637 continue Slices
1638
1639 default:
1640 diags = append(diags, &hcl.Diagnostic{
1641 Severity: hcl.DiagError,
1642 Summary: "Invalid escape sequence",
1643 Detail: fmt.Sprintf("The symbol %q is not a valid escape sequence selector.", slice[1:]),
1644 Subject: rng.Ptr(),
1645 })
1646 ret = append(ret, slice[1:]...)
1647 continue Slices
1648 }
1649
1650 case '$', '%':
1651 if len(slice) != 3 {
1652 // Not long enough to be our escape sequence, so it's literal.
1653 break TokenType
1654 }
1655
1656 if slice[1] == slice[0] && slice[2] == '{' {
1657 ret = append(ret, slice[0])
1658 ret = append(ret, '{')
1659 continue Slices
1660 }
1661
1662 break TokenType
1663 }
1664
1665 // If we fall out here or break out of here from the switch above
1666 // then this slice is just a literal.
1667 ret = append(ret, slice...)
1668 }
1669
1670 return string(ret), diags
1671}
1672
1673// setRecovery turns on recovery mode without actually doing any recovery.
1674// This can be used when a parser knowingly leaves the peeker in a useless
1675// place and wants to suppress errors that might result from that decision.
1676func (p *parser) setRecovery() {
1677 p.recovery = true
1678}
1679
1680// recover seeks forward in the token stream until it finds TokenType "end",
1681// then returns with the peeker pointed at the following token.
1682//
1683// If the given token type is a bracketer, this function will additionally
1684// count nested instances of the brackets to try to leave the peeker at
1685// the end of the _current_ instance of that bracketer, skipping over any
1686// nested instances. This is a best-effort operation and may have
1687// unpredictable results on input with bad bracketer nesting.
1688func (p *parser) recover(end TokenType) Token {
1689 start := p.oppositeBracket(end)
1690 p.recovery = true
1691
1692 nest := 0
1693 for {
1694 tok := p.Read()
1695 ty := tok.Type
1696 if end == TokenTemplateSeqEnd && ty == TokenTemplateControl {
1697 // normalize so that our matching behavior can work, since
1698 // TokenTemplateControl/TokenTemplateInterp are asymmetrical
1699 // with TokenTemplateSeqEnd and thus we need to count both
1700 // openers if that's the closer we're looking for.
1701 ty = TokenTemplateInterp
1702 }
1703
1704 switch ty {
1705 case start:
1706 nest++
1707 case end:
1708 if nest < 1 {
1709 return tok
1710 }
1711
1712 nest--
1713 case TokenEOF:
1714 return tok
1715 }
1716 }
1717}
1718
1719// recoverOver seeks forward in the token stream until it finds a block
1720// starting with TokenType "start", then finds the corresponding end token,
1721// leaving the peeker pointed at the token after that end token.
1722//
1723// The given token type _must_ be a bracketer. For example, if the given
1724// start token is TokenOBrace then the parser will be left at the _end_ of
1725// the next brace-delimited block encountered, or at EOF if no such block
1726// is found or it is unclosed.
1727func (p *parser) recoverOver(start TokenType) {
1728 end := p.oppositeBracket(start)
1729
1730 // find the opening bracket first
1731Token:
1732 for {
1733 tok := p.Read()
1734 switch tok.Type {
1735 case start, TokenEOF:
1736 break Token
1737 }
1738 }
1739
1740 // Now use our existing recover function to locate the _end_ of the
1741 // container we've found.
1742 p.recover(end)
1743}
1744
1745func (p *parser) recoverAfterBodyItem() {
1746 p.recovery = true
1747 var open []TokenType
1748
1749Token:
1750 for {
1751 tok := p.Read()
1752
1753 switch tok.Type {
1754
1755 case TokenNewline:
1756 if len(open) == 0 {
1757 break Token
1758 }
1759
1760 case TokenEOF:
1761 break Token
1762
1763 case TokenOBrace, TokenOBrack, TokenOParen, TokenOQuote, TokenOHeredoc, TokenTemplateInterp, TokenTemplateControl:
1764 open = append(open, tok.Type)
1765
1766 case TokenCBrace, TokenCBrack, TokenCParen, TokenCQuote, TokenCHeredoc:
1767 opener := p.oppositeBracket(tok.Type)
1768 for len(open) > 0 && open[len(open)-1] != opener {
1769 open = open[:len(open)-1]
1770 }
1771 if len(open) > 0 {
1772 open = open[:len(open)-1]
1773 }
1774
1775 case TokenTemplateSeqEnd:
1776 for len(open) > 0 && open[len(open)-1] != TokenTemplateInterp && open[len(open)-1] != TokenTemplateControl {
1777 open = open[:len(open)-1]
1778 }
1779 if len(open) > 0 {
1780 open = open[:len(open)-1]
1781 }
1782
1783 }
1784 }
1785}
1786
1787// oppositeBracket finds the bracket that opposes the given bracketer, or
1788// NilToken if the given token isn't a bracketer.
1789//
1790// "Bracketer", for the sake of this function, is one end of a matching
1791// open/close set of tokens that establish a bracketing context.
1792func (p *parser) oppositeBracket(ty TokenType) TokenType {
1793 switch ty {
1794
1795 case TokenOBrace:
1796 return TokenCBrace
1797 case TokenOBrack:
1798 return TokenCBrack
1799 case TokenOParen:
1800 return TokenCParen
1801 case TokenOQuote:
1802 return TokenCQuote
1803 case TokenOHeredoc:
1804 return TokenCHeredoc
1805
1806 case TokenCBrace:
1807 return TokenOBrace
1808 case TokenCBrack:
1809 return TokenOBrack
1810 case TokenCParen:
1811 return TokenOParen
1812 case TokenCQuote:
1813 return TokenOQuote
1814 case TokenCHeredoc:
1815 return TokenOHeredoc
1816
1817 case TokenTemplateControl:
1818 return TokenTemplateSeqEnd
1819 case TokenTemplateInterp:
1820 return TokenTemplateSeqEnd
1821 case TokenTemplateSeqEnd:
1822 // This is ambigous, but we return Interp here because that's
1823 // what's assumed by the "recover" method.
1824 return TokenTemplateInterp
1825
1826 default:
1827 return TokenNil
1828 }
1829}
1830
1831func errPlaceholderExpr(rng hcl.Range) Expression {
1832 return &LiteralValueExpr{
1833 Val: cty.DynamicVal,
1834 SrcRange: rng,
1835 }
1836}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go
new file mode 100644
index 0000000..3711067
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go
@@ -0,0 +1,728 @@
1package hclsyntax
2
3import (
4 "fmt"
5 "strings"
6 "unicode"
7
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/zclconf/go-cty/cty"
10)
11
12func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) {
13 return p.parseTemplate(TokenEOF)
14}
15
16func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) {
17 exprs, passthru, rng, diags := p.parseTemplateInner(end)
18
19 if passthru {
20 if len(exprs) != 1 {
21 panic("passthru set with len(exprs) != 1")
22 }
23 return &TemplateWrapExpr{
24 Wrapped: exprs[0],
25 SrcRange: rng,
26 }, diags
27 }
28
29 return &TemplateExpr{
30 Parts: exprs,
31 SrcRange: rng,
32 }, diags
33}
34
35func (p *parser) parseTemplateInner(end TokenType) ([]Expression, bool, hcl.Range, hcl.Diagnostics) {
36 parts, diags := p.parseTemplateParts(end)
37 tp := templateParser{
38 Tokens: parts.Tokens,
39 SrcRange: parts.SrcRange,
40 }
41 exprs, exprsDiags := tp.parseRoot()
42 diags = append(diags, exprsDiags...)
43
44 passthru := false
45 if len(parts.Tokens) == 2 { // one real token and one synthetic "end" token
46 if _, isInterp := parts.Tokens[0].(*templateInterpToken); isInterp {
47 passthru = true
48 }
49 }
50
51 return exprs, passthru, parts.SrcRange, diags
52}
53
54type templateParser struct {
55 Tokens []templateToken
56 SrcRange hcl.Range
57
58 pos int
59}
60
61func (p *templateParser) parseRoot() ([]Expression, hcl.Diagnostics) {
62 var exprs []Expression
63 var diags hcl.Diagnostics
64
65 for {
66 next := p.Peek()
67 if _, isEnd := next.(*templateEndToken); isEnd {
68 break
69 }
70
71 expr, exprDiags := p.parseExpr()
72 diags = append(diags, exprDiags...)
73 exprs = append(exprs, expr)
74 }
75
76 return exprs, diags
77}
78
79func (p *templateParser) parseExpr() (Expression, hcl.Diagnostics) {
80 next := p.Peek()
81 switch tok := next.(type) {
82
83 case *templateLiteralToken:
84 p.Read() // eat literal
85 return &LiteralValueExpr{
86 Val: cty.StringVal(tok.Val),
87 SrcRange: tok.SrcRange,
88 }, nil
89
90 case *templateInterpToken:
91 p.Read() // eat interp
92 return tok.Expr, nil
93
94 case *templateIfToken:
95 return p.parseIf()
96
97 case *templateForToken:
98 return p.parseFor()
99
100 case *templateEndToken:
101 p.Read() // eat erroneous token
102 return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{
103 {
104 // This is a particularly unhelpful diagnostic, so callers
105 // should attempt to pre-empt it and produce a more helpful
106 // diagnostic that is context-aware.
107 Severity: hcl.DiagError,
108 Summary: "Unexpected end of template",
109 Detail: "The control directives within this template are unbalanced.",
110 Subject: &tok.SrcRange,
111 },
112 }
113
114 case *templateEndCtrlToken:
115 p.Read() // eat erroneous token
116 return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{
117 {
118 Severity: hcl.DiagError,
119 Summary: fmt.Sprintf("Unexpected %s directive", tok.Name()),
120 Detail: "The control directives within this template are unbalanced.",
121 Subject: &tok.SrcRange,
122 },
123 }
124
125 default:
126 // should never happen, because above should be exhaustive
127 panic(fmt.Sprintf("unhandled template token type %T", next))
128 }
129}
130
131func (p *templateParser) parseIf() (Expression, hcl.Diagnostics) {
132 open := p.Read()
133 openIf, isIf := open.(*templateIfToken)
134 if !isIf {
135 // should never happen if caller is behaving
136 panic("parseIf called with peeker not pointing at if token")
137 }
138
139 var ifExprs, elseExprs []Expression
140 var diags hcl.Diagnostics
141 var endifRange hcl.Range
142
143 currentExprs := &ifExprs
144Token:
145 for {
146 next := p.Peek()
147 if end, isEnd := next.(*templateEndToken); isEnd {
148 diags = append(diags, &hcl.Diagnostic{
149 Severity: hcl.DiagError,
150 Summary: "Unexpected end of template",
151 Detail: fmt.Sprintf(
152 "The if directive at %s is missing its corresponding endif directive.",
153 openIf.SrcRange,
154 ),
155 Subject: &end.SrcRange,
156 })
157 return errPlaceholderExpr(end.SrcRange), diags
158 }
159 if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd {
160 p.Read() // eat end directive
161
162 switch end.Type {
163
164 case templateElse:
165 if currentExprs == &ifExprs {
166 currentExprs = &elseExprs
167 continue Token
168 }
169
170 diags = append(diags, &hcl.Diagnostic{
171 Severity: hcl.DiagError,
172 Summary: "Unexpected else directive",
173 Detail: fmt.Sprintf(
174 "Already in the else clause for the if started at %s.",
175 openIf.SrcRange,
176 ),
177 Subject: &end.SrcRange,
178 })
179
180 case templateEndIf:
181 endifRange = end.SrcRange
182 break Token
183
184 default:
185 diags = append(diags, &hcl.Diagnostic{
186 Severity: hcl.DiagError,
187 Summary: fmt.Sprintf("Unexpected %s directive", end.Name()),
188 Detail: fmt.Sprintf(
189 "Expecting an endif directive for the if started at %s.",
190 openIf.SrcRange,
191 ),
192 Subject: &end.SrcRange,
193 })
194 }
195
196 return errPlaceholderExpr(end.SrcRange), diags
197 }
198
199 expr, exprDiags := p.parseExpr()
200 diags = append(diags, exprDiags...)
201 *currentExprs = append(*currentExprs, expr)
202 }
203
204 if len(ifExprs) == 0 {
205 ifExprs = append(ifExprs, &LiteralValueExpr{
206 Val: cty.StringVal(""),
207 SrcRange: hcl.Range{
208 Filename: openIf.SrcRange.Filename,
209 Start: openIf.SrcRange.End,
210 End: openIf.SrcRange.End,
211 },
212 })
213 }
214 if len(elseExprs) == 0 {
215 elseExprs = append(elseExprs, &LiteralValueExpr{
216 Val: cty.StringVal(""),
217 SrcRange: hcl.Range{
218 Filename: endifRange.Filename,
219 Start: endifRange.Start,
220 End: endifRange.Start,
221 },
222 })
223 }
224
225 trueExpr := &TemplateExpr{
226 Parts: ifExprs,
227 SrcRange: hcl.RangeBetween(ifExprs[0].Range(), ifExprs[len(ifExprs)-1].Range()),
228 }
229 falseExpr := &TemplateExpr{
230 Parts: elseExprs,
231 SrcRange: hcl.RangeBetween(elseExprs[0].Range(), elseExprs[len(elseExprs)-1].Range()),
232 }
233
234 return &ConditionalExpr{
235 Condition: openIf.CondExpr,
236 TrueResult: trueExpr,
237 FalseResult: falseExpr,
238
239 SrcRange: hcl.RangeBetween(openIf.SrcRange, endifRange),
240 }, diags
241}
242
243func (p *templateParser) parseFor() (Expression, hcl.Diagnostics) {
244 open := p.Read()
245 openFor, isFor := open.(*templateForToken)
246 if !isFor {
247 // should never happen if caller is behaving
248 panic("parseFor called with peeker not pointing at for token")
249 }
250
251 var contentExprs []Expression
252 var diags hcl.Diagnostics
253 var endforRange hcl.Range
254
255Token:
256 for {
257 next := p.Peek()
258 if end, isEnd := next.(*templateEndToken); isEnd {
259 diags = append(diags, &hcl.Diagnostic{
260 Severity: hcl.DiagError,
261 Summary: "Unexpected end of template",
262 Detail: fmt.Sprintf(
263 "The for directive at %s is missing its corresponding endfor directive.",
264 openFor.SrcRange,
265 ),
266 Subject: &end.SrcRange,
267 })
268 return errPlaceholderExpr(end.SrcRange), diags
269 }
270 if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd {
271 p.Read() // eat end directive
272
273 switch end.Type {
274
275 case templateElse:
276 diags = append(diags, &hcl.Diagnostic{
277 Severity: hcl.DiagError,
278 Summary: "Unexpected else directive",
279 Detail: "An else clause is not expected for a for directive.",
280 Subject: &end.SrcRange,
281 })
282
283 case templateEndFor:
284 endforRange = end.SrcRange
285 break Token
286
287 default:
288 diags = append(diags, &hcl.Diagnostic{
289 Severity: hcl.DiagError,
290 Summary: fmt.Sprintf("Unexpected %s directive", end.Name()),
291 Detail: fmt.Sprintf(
292 "Expecting an endfor directive corresponding to the for directive at %s.",
293 openFor.SrcRange,
294 ),
295 Subject: &end.SrcRange,
296 })
297 }
298
299 return errPlaceholderExpr(end.SrcRange), diags
300 }
301
302 expr, exprDiags := p.parseExpr()
303 diags = append(diags, exprDiags...)
304 contentExprs = append(contentExprs, expr)
305 }
306
307 if len(contentExprs) == 0 {
308 contentExprs = append(contentExprs, &LiteralValueExpr{
309 Val: cty.StringVal(""),
310 SrcRange: hcl.Range{
311 Filename: openFor.SrcRange.Filename,
312 Start: openFor.SrcRange.End,
313 End: openFor.SrcRange.End,
314 },
315 })
316 }
317
318 contentExpr := &TemplateExpr{
319 Parts: contentExprs,
320 SrcRange: hcl.RangeBetween(contentExprs[0].Range(), contentExprs[len(contentExprs)-1].Range()),
321 }
322
323 forExpr := &ForExpr{
324 KeyVar: openFor.KeyVar,
325 ValVar: openFor.ValVar,
326
327 CollExpr: openFor.CollExpr,
328 ValExpr: contentExpr,
329
330 SrcRange: hcl.RangeBetween(openFor.SrcRange, endforRange),
331 OpenRange: openFor.SrcRange,
332 CloseRange: endforRange,
333 }
334
335 return &TemplateJoinExpr{
336 Tuple: forExpr,
337 }, diags
338}
339
340func (p *templateParser) Peek() templateToken {
341 return p.Tokens[p.pos]
342}
343
344func (p *templateParser) Read() templateToken {
345 ret := p.Peek()
346 if _, end := ret.(*templateEndToken); !end {
347 p.pos++
348 }
349 return ret
350}
351
352// parseTemplateParts produces a flat sequence of "template tokens", which are
353// either literal values (with any "trimming" already applied), interpolation
354// sequences, or control flow markers.
355//
356// A further pass is required on the result to turn it into an AST.
357func (p *parser) parseTemplateParts(end TokenType) (*templateParts, hcl.Diagnostics) {
358 var parts []templateToken
359 var diags hcl.Diagnostics
360
361 startRange := p.NextRange()
362 ltrimNext := false
363 nextCanTrimPrev := false
364 var endRange hcl.Range
365
366Token:
367 for {
368 next := p.Read()
369 if next.Type == end {
370 // all done!
371 endRange = next.Range
372 break
373 }
374
375 ltrim := ltrimNext
376 ltrimNext = false
377 canTrimPrev := nextCanTrimPrev
378 nextCanTrimPrev = false
379
380 switch next.Type {
381 case TokenStringLit, TokenQuotedLit:
382 str, strDiags := p.decodeStringLit(next)
383 diags = append(diags, strDiags...)
384
385 if ltrim {
386 str = strings.TrimLeftFunc(str, unicode.IsSpace)
387 }
388
389 parts = append(parts, &templateLiteralToken{
390 Val: str,
391 SrcRange: next.Range,
392 })
393 nextCanTrimPrev = true
394
395 case TokenTemplateInterp:
396 // if the opener is ${~ then we want to eat any trailing whitespace
397 // in the preceding literal token, assuming it is indeed a literal
398 // token.
399 if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 {
400 prevExpr := parts[len(parts)-1]
401 if lexpr, ok := prevExpr.(*templateLiteralToken); ok {
402 lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace)
403 }
404 }
405
406 p.PushIncludeNewlines(false)
407 expr, exprDiags := p.ParseExpression()
408 diags = append(diags, exprDiags...)
409 close := p.Peek()
410 if close.Type != TokenTemplateSeqEnd {
411 if !p.recovery {
412 diags = append(diags, &hcl.Diagnostic{
413 Severity: hcl.DiagError,
414 Summary: "Extra characters after interpolation expression",
415 Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.",
416 Subject: &close.Range,
417 Context: hcl.RangeBetween(startRange, close.Range).Ptr(),
418 })
419 }
420 p.recover(TokenTemplateSeqEnd)
421 } else {
422 p.Read() // eat closing brace
423
424 // If the closer is ~} then we want to eat any leading
425 // whitespace on the next token, if it turns out to be a
426 // literal token.
427 if len(close.Bytes) == 2 && close.Bytes[0] == '~' {
428 ltrimNext = true
429 }
430 }
431 p.PopIncludeNewlines()
432 parts = append(parts, &templateInterpToken{
433 Expr: expr,
434 SrcRange: hcl.RangeBetween(next.Range, close.Range),
435 })
436
437 case TokenTemplateControl:
438 // if the opener is %{~ then we want to eat any trailing whitespace
439 // in the preceding literal token, assuming it is indeed a literal
440 // token.
441 if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 {
442 prevExpr := parts[len(parts)-1]
443 if lexpr, ok := prevExpr.(*templateLiteralToken); ok {
444 lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace)
445 }
446 }
447 p.PushIncludeNewlines(false)
448
449 kw := p.Peek()
450 if kw.Type != TokenIdent {
451 if !p.recovery {
452 diags = append(diags, &hcl.Diagnostic{
453 Severity: hcl.DiagError,
454 Summary: "Invalid template directive",
455 Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a %{ sequence.",
456 Subject: &kw.Range,
457 Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(),
458 })
459 }
460 p.recover(TokenTemplateSeqEnd)
461 p.PopIncludeNewlines()
462 continue Token
463 }
464 p.Read() // eat keyword token
465
466 switch {
467
468 case ifKeyword.TokenMatches(kw):
469 condExpr, exprDiags := p.ParseExpression()
470 diags = append(diags, exprDiags...)
471 parts = append(parts, &templateIfToken{
472 CondExpr: condExpr,
473 SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
474 })
475
476 case elseKeyword.TokenMatches(kw):
477 parts = append(parts, &templateEndCtrlToken{
478 Type: templateElse,
479 SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
480 })
481
482 case endifKeyword.TokenMatches(kw):
483 parts = append(parts, &templateEndCtrlToken{
484 Type: templateEndIf,
485 SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
486 })
487
488 case forKeyword.TokenMatches(kw):
489 var keyName, valName string
490 if p.Peek().Type != TokenIdent {
491 if !p.recovery {
492 diags = append(diags, &hcl.Diagnostic{
493 Severity: hcl.DiagError,
494 Summary: "Invalid 'for' directive",
495 Detail: "For directive requires variable name after 'for'.",
496 Subject: p.Peek().Range.Ptr(),
497 })
498 }
499 p.recover(TokenTemplateSeqEnd)
500 p.PopIncludeNewlines()
501 continue Token
502 }
503
504 valName = string(p.Read().Bytes)
505
506 if p.Peek().Type == TokenComma {
507 // What we just read was actually the key, then.
508 keyName = valName
509 p.Read() // eat comma
510
511 if p.Peek().Type != TokenIdent {
512 if !p.recovery {
513 diags = append(diags, &hcl.Diagnostic{
514 Severity: hcl.DiagError,
515 Summary: "Invalid 'for' directive",
516 Detail: "For directive requires value variable name after comma.",
517 Subject: p.Peek().Range.Ptr(),
518 })
519 }
520 p.recover(TokenTemplateSeqEnd)
521 p.PopIncludeNewlines()
522 continue Token
523 }
524
525 valName = string(p.Read().Bytes)
526 }
527
528 if !inKeyword.TokenMatches(p.Peek()) {
529 if !p.recovery {
530 diags = append(diags, &hcl.Diagnostic{
531 Severity: hcl.DiagError,
532 Summary: "Invalid 'for' directive",
533 Detail: "For directive requires 'in' keyword after names.",
534 Subject: p.Peek().Range.Ptr(),
535 })
536 }
537 p.recover(TokenTemplateSeqEnd)
538 p.PopIncludeNewlines()
539 continue Token
540 }
541 p.Read() // eat 'in' keyword
542
543 collExpr, collDiags := p.ParseExpression()
544 diags = append(diags, collDiags...)
545 parts = append(parts, &templateForToken{
546 KeyVar: keyName,
547 ValVar: valName,
548 CollExpr: collExpr,
549
550 SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
551 })
552
553 case endforKeyword.TokenMatches(kw):
554 parts = append(parts, &templateEndCtrlToken{
555 Type: templateEndFor,
556 SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
557 })
558
559 default:
560 if !p.recovery {
561 suggestions := []string{"if", "for", "else", "endif", "endfor"}
562 given := string(kw.Bytes)
563 suggestion := nameSuggestion(given, suggestions)
564 if suggestion != "" {
565 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
566 }
567
568 diags = append(diags, &hcl.Diagnostic{
569 Severity: hcl.DiagError,
570 Summary: "Invalid template control keyword",
571 Detail: fmt.Sprintf("%q is not a valid template control keyword.%s", given, suggestion),
572 Subject: &kw.Range,
573 Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(),
574 })
575 }
576 p.recover(TokenTemplateSeqEnd)
577 p.PopIncludeNewlines()
578 continue Token
579
580 }
581
582 close := p.Peek()
583 if close.Type != TokenTemplateSeqEnd {
584 if !p.recovery {
585 diags = append(diags, &hcl.Diagnostic{
586 Severity: hcl.DiagError,
587 Summary: fmt.Sprintf("Extra characters in %s marker", kw.Bytes),
588 Detail: "Expected a closing brace to end the sequence, but found extra characters.",
589 Subject: &close.Range,
590 Context: hcl.RangeBetween(startRange, close.Range).Ptr(),
591 })
592 }
593 p.recover(TokenTemplateSeqEnd)
594 } else {
595 p.Read() // eat closing brace
596
597 // If the closer is ~} then we want to eat any leading
598 // whitespace on the next token, if it turns out to be a
599 // literal token.
600 if len(close.Bytes) == 2 && close.Bytes[0] == '~' {
601 ltrimNext = true
602 }
603 }
604 p.PopIncludeNewlines()
605
606 default:
607 if !p.recovery {
608 diags = append(diags, &hcl.Diagnostic{
609 Severity: hcl.DiagError,
610 Summary: "Unterminated template string",
611 Detail: "No closing marker was found for the string.",
612 Subject: &next.Range,
613 Context: hcl.RangeBetween(startRange, next.Range).Ptr(),
614 })
615 }
616 final := p.recover(end)
617 endRange = final.Range
618 break Token
619 }
620 }
621
622 if len(parts) == 0 {
623 // If a sequence has no content, we'll treat it as if it had an
624 // empty string in it because that's what the user probably means
625 // if they write "" in configuration.
626 parts = append(parts, &templateLiteralToken{
627 Val: "",
628 SrcRange: hcl.Range{
629 // Range is the zero-character span immediately after the
630 // opening quote.
631 Filename: startRange.Filename,
632 Start: startRange.End,
633 End: startRange.End,
634 },
635 })
636 }
637
638 // Always end with an end token, so the parser can produce diagnostics
639 // about unclosed items with proper position information.
640 parts = append(parts, &templateEndToken{
641 SrcRange: endRange,
642 })
643
644 ret := &templateParts{
645 Tokens: parts,
646 SrcRange: hcl.RangeBetween(startRange, endRange),
647 }
648
649 return ret, diags
650}
651
652type templateParts struct {
653 Tokens []templateToken
654 SrcRange hcl.Range
655}
656
657// templateToken is a higher-level token that represents a single atom within
658// the template language. Our template parsing first raises the raw token
659// stream to a sequence of templateToken, and then transforms the result into
660// an expression tree.
661type templateToken interface {
662 templateToken() templateToken
663}
664
665type templateLiteralToken struct {
666 Val string
667 SrcRange hcl.Range
668 isTemplateToken
669}
670
671type templateInterpToken struct {
672 Expr Expression
673 SrcRange hcl.Range
674 isTemplateToken
675}
676
677type templateIfToken struct {
678 CondExpr Expression
679 SrcRange hcl.Range
680 isTemplateToken
681}
682
683type templateForToken struct {
684 KeyVar string // empty if ignoring key
685 ValVar string
686 CollExpr Expression
687 SrcRange hcl.Range
688 isTemplateToken
689}
690
691type templateEndCtrlType int
692
693const (
694 templateEndIf templateEndCtrlType = iota
695 templateElse
696 templateEndFor
697)
698
699type templateEndCtrlToken struct {
700 Type templateEndCtrlType
701 SrcRange hcl.Range
702 isTemplateToken
703}
704
705func (t *templateEndCtrlToken) Name() string {
706 switch t.Type {
707 case templateEndIf:
708 return "endif"
709 case templateElse:
710 return "else"
711 case templateEndFor:
712 return "endfor"
713 default:
714 // should never happen
715 panic("invalid templateEndCtrlType")
716 }
717}
718
719type templateEndToken struct {
720 SrcRange hcl.Range
721 isTemplateToken
722}
723
724type isTemplateToken [0]int
725
726func (t isTemplateToken) templateToken() templateToken {
727 return t
728}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go
new file mode 100644
index 0000000..2ff3ed6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go
@@ -0,0 +1,159 @@
1package hclsyntax
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// ParseTraversalAbs parses an absolute traversal that is assumed to consume
9// all of the remaining tokens in the peeker. The usual parser recovery
10// behavior is not supported here because traversals are not expected to
11// be parsed as part of a larger program.
12func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) {
13 var ret hcl.Traversal
14 var diags hcl.Diagnostics
15
16 // Absolute traversal must always begin with a variable name
17 varTok := p.Read()
18 if varTok.Type != TokenIdent {
19 diags = append(diags, &hcl.Diagnostic{
20 Severity: hcl.DiagError,
21 Summary: "Variable name required",
22 Detail: "Must begin with a variable name.",
23 Subject: &varTok.Range,
24 })
25 return ret, diags
26 }
27
28 varName := string(varTok.Bytes)
29 ret = append(ret, hcl.TraverseRoot{
30 Name: varName,
31 SrcRange: varTok.Range,
32 })
33
34 for {
35 next := p.Peek()
36
37 if next.Type == TokenEOF {
38 return ret, diags
39 }
40
41 switch next.Type {
42 case TokenDot:
43 // Attribute access
44 dot := p.Read() // eat dot
45 nameTok := p.Read()
46 if nameTok.Type != TokenIdent {
47 if nameTok.Type == TokenStar {
48 diags = append(diags, &hcl.Diagnostic{
49 Severity: hcl.DiagError,
50 Summary: "Attribute name required",
51 Detail: "Splat expressions (.*) may not be used here.",
52 Subject: &nameTok.Range,
53 Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(),
54 })
55 } else {
56 diags = append(diags, &hcl.Diagnostic{
57 Severity: hcl.DiagError,
58 Summary: "Attribute name required",
59 Detail: "Dot must be followed by attribute name.",
60 Subject: &nameTok.Range,
61 Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(),
62 })
63 }
64 return ret, diags
65 }
66
67 attrName := string(nameTok.Bytes)
68 ret = append(ret, hcl.TraverseAttr{
69 Name: attrName,
70 SrcRange: hcl.RangeBetween(dot.Range, nameTok.Range),
71 })
72 case TokenOBrack:
73 // Index
74 open := p.Read() // eat open bracket
75 next := p.Peek()
76
77 switch next.Type {
78 case TokenNumberLit:
79 tok := p.Read() // eat number
80 numVal, numDiags := p.numberLitValue(tok)
81 diags = append(diags, numDiags...)
82
83 close := p.Read()
84 if close.Type != TokenCBrack {
85 diags = append(diags, &hcl.Diagnostic{
86 Severity: hcl.DiagError,
87 Summary: "Unclosed index brackets",
88 Detail: "Index key must be followed by a closing bracket.",
89 Subject: &close.Range,
90 Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
91 })
92 }
93
94 ret = append(ret, hcl.TraverseIndex{
95 Key: numVal,
96 SrcRange: hcl.RangeBetween(open.Range, close.Range),
97 })
98
99 if diags.HasErrors() {
100 return ret, diags
101 }
102
103 case TokenOQuote:
104 str, _, strDiags := p.parseQuotedStringLiteral()
105 diags = append(diags, strDiags...)
106
107 close := p.Read()
108 if close.Type != TokenCBrack {
109 diags = append(diags, &hcl.Diagnostic{
110 Severity: hcl.DiagError,
111 Summary: "Unclosed index brackets",
112 Detail: "Index key must be followed by a closing bracket.",
113 Subject: &close.Range,
114 Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
115 })
116 }
117
118 ret = append(ret, hcl.TraverseIndex{
119 Key: cty.StringVal(str),
120 SrcRange: hcl.RangeBetween(open.Range, close.Range),
121 })
122
123 if diags.HasErrors() {
124 return ret, diags
125 }
126
127 default:
128 if next.Type == TokenStar {
129 diags = append(diags, &hcl.Diagnostic{
130 Severity: hcl.DiagError,
131 Summary: "Attribute name required",
132 Detail: "Splat expressions ([*]) may not be used here.",
133 Subject: &next.Range,
134 Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
135 })
136 } else {
137 diags = append(diags, &hcl.Diagnostic{
138 Severity: hcl.DiagError,
139 Summary: "Index value required",
140 Detail: "Index brackets must contain either a literal number or a literal string.",
141 Subject: &next.Range,
142 Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
143 })
144 }
145 return ret, diags
146 }
147
148 default:
149 diags = append(diags, &hcl.Diagnostic{
150 Severity: hcl.DiagError,
151 Summary: "Invalid character",
152 Detail: "Expected an attribute access or an index operator.",
153 Subject: &next.Range,
154 Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
155 })
156 return ret, diags
157 }
158 }
159}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go
new file mode 100644
index 0000000..5a4b50e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go
@@ -0,0 +1,212 @@
1package hclsyntax
2
3import (
4 "bytes"
5 "fmt"
6 "path/filepath"
7 "runtime"
8 "strings"
9
10 "github.com/hashicorp/hcl2/hcl"
11)
12
13// This is set to true at init() time in tests, to enable more useful output
14// if a stack discipline error is detected. It should not be enabled in
15// normal mode since there is a performance penalty from accessing the
16// runtime stack to produce the traces, but could be temporarily set to
17// true for debugging if desired.
18var tracePeekerNewlinesStack = false
19
20type peeker struct {
21 Tokens Tokens
22 NextIndex int
23
24 IncludeComments bool
25 IncludeNewlinesStack []bool
26
27 // used only when tracePeekerNewlinesStack is set
28 newlineStackChanges []peekerNewlineStackChange
29}
30
31// for use in debugging the stack usage only
32type peekerNewlineStackChange struct {
33 Pushing bool // if false, then popping
34 Frame runtime.Frame
35 Include bool
36}
37
38func newPeeker(tokens Tokens, includeComments bool) *peeker {
39 return &peeker{
40 Tokens: tokens,
41 IncludeComments: includeComments,
42
43 IncludeNewlinesStack: []bool{true},
44 }
45}
46
47func (p *peeker) Peek() Token {
48 ret, _ := p.nextToken()
49 return ret
50}
51
52func (p *peeker) Read() Token {
53 ret, nextIdx := p.nextToken()
54 p.NextIndex = nextIdx
55 return ret
56}
57
58func (p *peeker) NextRange() hcl.Range {
59 return p.Peek().Range
60}
61
62func (p *peeker) PrevRange() hcl.Range {
63 if p.NextIndex == 0 {
64 return p.NextRange()
65 }
66
67 return p.Tokens[p.NextIndex-1].Range
68}
69
70func (p *peeker) nextToken() (Token, int) {
71 for i := p.NextIndex; i < len(p.Tokens); i++ {
72 tok := p.Tokens[i]
73 switch tok.Type {
74 case TokenComment:
75 if !p.IncludeComments {
76 // Single-line comment tokens, starting with # or //, absorb
77 // the trailing newline that terminates them as part of their
78 // bytes. When we're filtering out comments, we must as a
79 // special case transform these to newline tokens in order
80 // to properly parse newline-terminated block items.
81
82 if p.includingNewlines() {
83 if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' {
84 fakeNewline := Token{
85 Type: TokenNewline,
86 Bytes: tok.Bytes[len(tok.Bytes)-1 : len(tok.Bytes)],
87
88 // We use the whole token range as the newline
89 // range, even though that's a little... weird,
90 // because otherwise we'd need to go count
91 // characters again in order to figure out the
92 // column of the newline, and that complexity
93 // isn't justified when ranges of newlines are
94 // so rarely printed anyway.
95 Range: tok.Range,
96 }
97 return fakeNewline, i + 1
98 }
99 }
100
101 continue
102 }
103 case TokenNewline:
104 if !p.includingNewlines() {
105 continue
106 }
107 }
108
109 return tok, i + 1
110 }
111
112 // if we fall out here then we'll return the EOF token, and leave
113 // our index pointed off the end of the array so we'll keep
114 // returning EOF in future too.
115 return p.Tokens[len(p.Tokens)-1], len(p.Tokens)
116}
117
118func (p *peeker) includingNewlines() bool {
119 return p.IncludeNewlinesStack[len(p.IncludeNewlinesStack)-1]
120}
121
122func (p *peeker) PushIncludeNewlines(include bool) {
123 if tracePeekerNewlinesStack {
124 // Record who called us so that we can more easily track down any
125 // mismanagement of the stack in the parser.
126 callers := []uintptr{0}
127 runtime.Callers(2, callers)
128 frames := runtime.CallersFrames(callers)
129 frame, _ := frames.Next()
130 p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{
131 true, frame, include,
132 })
133 }
134
135 p.IncludeNewlinesStack = append(p.IncludeNewlinesStack, include)
136}
137
138func (p *peeker) PopIncludeNewlines() bool {
139 stack := p.IncludeNewlinesStack
140 remain, ret := stack[:len(stack)-1], stack[len(stack)-1]
141 p.IncludeNewlinesStack = remain
142
143 if tracePeekerNewlinesStack {
144 // Record who called us so that we can more easily track down any
145 // mismanagement of the stack in the parser.
146 callers := []uintptr{0}
147 runtime.Callers(2, callers)
148 frames := runtime.CallersFrames(callers)
149 frame, _ := frames.Next()
150 p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{
151 false, frame, ret,
152 })
153 }
154
155 return ret
156}
157
158// AssertEmptyNewlinesStack checks if the IncludeNewlinesStack is empty, doing
159// panicking if it is not. This can be used to catch stack mismanagement that
160// might otherwise just cause confusing downstream errors.
161//
162// This function is a no-op if the stack is empty when called.
163//
164// If newlines stack tracing is enabled by setting the global variable
165// tracePeekerNewlinesStack at init time, a full log of all of the push/pop
166// calls will be produced to help identify which caller in the parser is
167// misbehaving.
168func (p *peeker) AssertEmptyIncludeNewlinesStack() {
169 if len(p.IncludeNewlinesStack) != 1 {
170 // Should never happen; indicates mismanagement of the stack inside
171 // the parser.
172 if p.newlineStackChanges != nil { // only if traceNewlinesStack is enabled above
173 panic(fmt.Errorf(
174 "non-empty IncludeNewlinesStack after parse with %d calls unaccounted for:\n%s",
175 len(p.IncludeNewlinesStack)-1,
176 formatPeekerNewlineStackChanges(p.newlineStackChanges),
177 ))
178 } else {
179 panic(fmt.Errorf("non-empty IncludeNewlinesStack after parse: %#v", p.IncludeNewlinesStack))
180 }
181 }
182}
183
184func formatPeekerNewlineStackChanges(changes []peekerNewlineStackChange) string {
185 indent := 0
186 var buf bytes.Buffer
187 for _, change := range changes {
188 funcName := change.Frame.Function
189 if idx := strings.LastIndexByte(funcName, '.'); idx != -1 {
190 funcName = funcName[idx+1:]
191 }
192 filename := change.Frame.File
193 if idx := strings.LastIndexByte(filename, filepath.Separator); idx != -1 {
194 filename = filename[idx+1:]
195 }
196
197 switch change.Pushing {
198
199 case true:
200 buf.WriteString(strings.Repeat(" ", indent))
201 fmt.Fprintf(&buf, "PUSH %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line)
202 indent++
203
204 case false:
205 indent--
206 buf.WriteString(strings.Repeat(" ", indent))
207 fmt.Fprintf(&buf, "POP %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line)
208
209 }
210 }
211 return buf.String()
212}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go
new file mode 100644
index 0000000..cf0ee29
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go
@@ -0,0 +1,171 @@
1package hclsyntax
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// ParseConfig parses the given buffer as a whole HCL config file, returning
8// a *hcl.File representing its contents. If HasErrors called on the returned
9// diagnostics returns true, the returned body is likely to be incomplete
10// and should therefore be used with care.
11//
12// The body in the returned file has dynamic type *hclsyntax.Body, so callers
13// may freely type-assert this to get access to the full hclsyntax API in
14// situations where detailed access is required. However, most common use-cases
15// should be served using the hcl.Body interface to ensure compatibility with
16// other configurationg syntaxes, such as JSON.
17func ParseConfig(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) {
18 tokens, diags := LexConfig(src, filename, start)
19 peeker := newPeeker(tokens, false)
20 parser := &parser{peeker: peeker}
21 body, parseDiags := parser.ParseBody(TokenEOF)
22 diags = append(diags, parseDiags...)
23
24 // Panic if the parser uses incorrect stack discipline with the peeker's
25 // newlines stack, since otherwise it will produce confusing downstream
26 // errors.
27 peeker.AssertEmptyIncludeNewlinesStack()
28
29 return &hcl.File{
30 Body: body,
31 Bytes: src,
32
33 Nav: navigation{
34 root: body,
35 },
36 }, diags
37}
38
39// ParseExpression parses the given buffer as a standalone HCL expression,
40// returning it as an instance of Expression.
41func ParseExpression(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) {
42 tokens, diags := LexExpression(src, filename, start)
43 peeker := newPeeker(tokens, false)
44 parser := &parser{peeker: peeker}
45
46 // Bare expressions are always parsed in "ignore newlines" mode, as if
47 // they were wrapped in parentheses.
48 parser.PushIncludeNewlines(false)
49
50 expr, parseDiags := parser.ParseExpression()
51 diags = append(diags, parseDiags...)
52
53 next := parser.Peek()
54 if next.Type != TokenEOF && !parser.recovery {
55 diags = append(diags, &hcl.Diagnostic{
56 Severity: hcl.DiagError,
57 Summary: "Extra characters after expression",
58 Detail: "An expression was successfully parsed, but extra characters were found after it.",
59 Subject: &next.Range,
60 })
61 }
62
63 parser.PopIncludeNewlines()
64
65 // Panic if the parser uses incorrect stack discipline with the peeker's
66 // newlines stack, since otherwise it will produce confusing downstream
67 // errors.
68 peeker.AssertEmptyIncludeNewlinesStack()
69
70 return expr, diags
71}
72
73// ParseTemplate parses the given buffer as a standalone HCL template,
74// returning it as an instance of Expression.
75func ParseTemplate(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) {
76 tokens, diags := LexTemplate(src, filename, start)
77 peeker := newPeeker(tokens, false)
78 parser := &parser{peeker: peeker}
79 expr, parseDiags := parser.ParseTemplate()
80 diags = append(diags, parseDiags...)
81
82 // Panic if the parser uses incorrect stack discipline with the peeker's
83 // newlines stack, since otherwise it will produce confusing downstream
84 // errors.
85 peeker.AssertEmptyIncludeNewlinesStack()
86
87 return expr, diags
88}
89
90// ParseTraversalAbs parses the given buffer as a standalone absolute traversal.
91//
92// Parsing as a traversal is more limited than parsing as an expession since
93// it allows only attribute and indexing operations on variables. Traverals
94// are useful as a syntax for referring to objects without necessarily
95// evaluating them.
96func ParseTraversalAbs(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) {
97 tokens, diags := LexExpression(src, filename, start)
98 peeker := newPeeker(tokens, false)
99 parser := &parser{peeker: peeker}
100
101 // Bare traverals are always parsed in "ignore newlines" mode, as if
102 // they were wrapped in parentheses.
103 parser.PushIncludeNewlines(false)
104
105 expr, parseDiags := parser.ParseTraversalAbs()
106 diags = append(diags, parseDiags...)
107
108 parser.PopIncludeNewlines()
109
110 // Panic if the parser uses incorrect stack discipline with the peeker's
111 // newlines stack, since otherwise it will produce confusing downstream
112 // errors.
113 peeker.AssertEmptyIncludeNewlinesStack()
114
115 return expr, diags
116}
117
118// LexConfig performs lexical analysis on the given buffer, treating it as a
119// whole HCL config file, and returns the resulting tokens.
120//
121// Only minimal validation is done during lexical analysis, so the returned
122// diagnostics may include errors about lexical issues such as bad character
123// encodings or unrecognized characters, but full parsing is required to
124// detect _all_ syntax errors.
125func LexConfig(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
126 tokens := scanTokens(src, filename, start, scanNormal)
127 diags := checkInvalidTokens(tokens)
128 return tokens, diags
129}
130
131// LexExpression performs lexical analysis on the given buffer, treating it as
132// a standalone HCL expression, and returns the resulting tokens.
133//
134// Only minimal validation is done during lexical analysis, so the returned
135// diagnostics may include errors about lexical issues such as bad character
136// encodings or unrecognized characters, but full parsing is required to
137// detect _all_ syntax errors.
138func LexExpression(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
139 // This is actually just the same thing as LexConfig, since configs
140 // and expressions lex in the same way.
141 tokens := scanTokens(src, filename, start, scanNormal)
142 diags := checkInvalidTokens(tokens)
143 return tokens, diags
144}
145
146// LexTemplate performs lexical analysis on the given buffer, treating it as a
147// standalone HCL template, and returns the resulting tokens.
148//
149// Only minimal validation is done during lexical analysis, so the returned
150// diagnostics may include errors about lexical issues such as bad character
151// encodings or unrecognized characters, but full parsing is required to
152// detect _all_ syntax errors.
153func LexTemplate(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
154 tokens := scanTokens(src, filename, start, scanTemplate)
155 diags := checkInvalidTokens(tokens)
156 return tokens, diags
157}
158
159// ValidIdentifier tests if the given string could be a valid identifier in
160// a native syntax expression.
161//
162// This is useful when accepting names from the user that will be used as
163// variable or attribute names in the scope, to ensure that any name chosen
164// will be traversable using the variable or attribute traversal syntax.
165func ValidIdentifier(s string) bool {
166 // This is a kinda-expensive way to do something pretty simple, but it
167 // is easiest to do with our existing scanner-related infrastructure here
168 // and nobody should be validating identifiers in a tight loop.
169 tokens := scanTokens([]byte(s), "", hcl.Pos{}, scanIdentOnly)
170 return len(tokens) == 2 && tokens[0].Type == TokenIdent && tokens[1].Type == TokenEOF
171}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go
new file mode 100644
index 0000000..de1f524
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go
@@ -0,0 +1,301 @@
1// line 1 "scan_string_lit.rl"
2
3package hclsyntax
4
5// This file is generated from scan_string_lit.rl. DO NOT EDIT.
6
7// line 9 "scan_string_lit.go"
8var _hclstrtok_actions []byte = []byte{
9 0, 1, 0, 1, 1, 2, 1, 0,
10}
11
12var _hclstrtok_key_offsets []byte = []byte{
13 0, 0, 2, 4, 6, 10, 14, 18,
14 22, 27, 31, 36, 41, 46, 51, 57,
15 62, 74, 85, 96, 107, 118, 129, 140,
16 151,
17}
18
19var _hclstrtok_trans_keys []byte = []byte{
20 128, 191, 128, 191, 128, 191, 10, 13,
21 36, 37, 10, 13, 36, 37, 10, 13,
22 36, 37, 10, 13, 36, 37, 10, 13,
23 36, 37, 123, 10, 13, 36, 37, 10,
24 13, 36, 37, 92, 10, 13, 36, 37,
25 92, 10, 13, 36, 37, 92, 10, 13,
26 36, 37, 92, 10, 13, 36, 37, 92,
27 123, 10, 13, 36, 37, 92, 85, 117,
28 128, 191, 192, 223, 224, 239, 240, 247,
29 248, 255, 10, 13, 36, 37, 92, 48,
30 57, 65, 70, 97, 102, 10, 13, 36,
31 37, 92, 48, 57, 65, 70, 97, 102,
32 10, 13, 36, 37, 92, 48, 57, 65,
33 70, 97, 102, 10, 13, 36, 37, 92,
34 48, 57, 65, 70, 97, 102, 10, 13,
35 36, 37, 92, 48, 57, 65, 70, 97,
36 102, 10, 13, 36, 37, 92, 48, 57,
37 65, 70, 97, 102, 10, 13, 36, 37,
38 92, 48, 57, 65, 70, 97, 102, 10,
39 13, 36, 37, 92, 48, 57, 65, 70,
40 97, 102,
41}
42
43var _hclstrtok_single_lengths []byte = []byte{
44 0, 0, 0, 0, 4, 4, 4, 4,
45 5, 4, 5, 5, 5, 5, 6, 5,
46 2, 5, 5, 5, 5, 5, 5, 5,
47 5,
48}
49
50var _hclstrtok_range_lengths []byte = []byte{
51 0, 1, 1, 1, 0, 0, 0, 0,
52 0, 0, 0, 0, 0, 0, 0, 0,
53 5, 3, 3, 3, 3, 3, 3, 3,
54 3,
55}
56
57var _hclstrtok_index_offsets []byte = []byte{
58 0, 0, 2, 4, 6, 11, 16, 21,
59 26, 32, 37, 43, 49, 55, 61, 68,
60 74, 82, 91, 100, 109, 118, 127, 136,
61 145,
62}
63
64var _hclstrtok_indicies []byte = []byte{
65 0, 1, 2, 1, 3, 1, 5, 6,
66 7, 8, 4, 10, 11, 12, 13, 9,
67 14, 11, 12, 13, 9, 10, 11, 15,
68 13, 9, 10, 11, 12, 13, 14, 9,
69 10, 11, 12, 15, 9, 17, 18, 19,
70 20, 21, 16, 23, 24, 25, 26, 27,
71 22, 0, 24, 25, 26, 27, 22, 23,
72 24, 28, 26, 27, 22, 23, 24, 25,
73 26, 27, 0, 22, 23, 24, 25, 28,
74 27, 22, 29, 30, 22, 2, 3, 31,
75 22, 0, 23, 24, 25, 26, 27, 32,
76 32, 32, 22, 23, 24, 25, 26, 27,
77 33, 33, 33, 22, 23, 24, 25, 26,
78 27, 34, 34, 34, 22, 23, 24, 25,
79 26, 27, 30, 30, 30, 22, 23, 24,
80 25, 26, 27, 35, 35, 35, 22, 23,
81 24, 25, 26, 27, 36, 36, 36, 22,
82 23, 24, 25, 26, 27, 37, 37, 37,
83 22, 23, 24, 25, 26, 27, 0, 0,
84 0, 22,
85}
86
87var _hclstrtok_trans_targs []byte = []byte{
88 11, 0, 1, 2, 4, 5, 6, 7,
89 9, 4, 5, 6, 7, 9, 5, 8,
90 10, 11, 12, 13, 15, 16, 10, 11,
91 12, 13, 15, 16, 14, 17, 21, 3,
92 18, 19, 20, 22, 23, 24,
93}
94
95var _hclstrtok_trans_actions []byte = []byte{
96 0, 0, 0, 0, 0, 1, 1, 1,
97 1, 3, 5, 5, 5, 5, 0, 0,
98 0, 1, 1, 1, 1, 1, 3, 5,
99 5, 5, 5, 5, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0,
101}
102
103var _hclstrtok_eof_actions []byte = []byte{
104 0, 0, 0, 0, 0, 3, 3, 3,
105 3, 3, 0, 3, 3, 3, 3, 3,
106 3, 3, 3, 3, 3, 3, 3, 3,
107 3,
108}
109
110const hclstrtok_start int = 4
111const hclstrtok_first_final int = 4
112const hclstrtok_error int = 0
113
114const hclstrtok_en_quoted int = 10
115const hclstrtok_en_unquoted int = 4
116
117// line 10 "scan_string_lit.rl"
118
119func scanStringLit(data []byte, quoted bool) [][]byte {
120 var ret [][]byte
121
122 // line 61 "scan_string_lit.rl"
123
124 // Ragel state
125 p := 0 // "Pointer" into data
126 pe := len(data) // End-of-data "pointer"
127 ts := 0
128 te := 0
129 eof := pe
130
131 var cs int // current state
132 switch {
133 case quoted:
134 cs = hclstrtok_en_quoted
135 default:
136 cs = hclstrtok_en_unquoted
137 }
138
139 // Make Go compiler happy
140 _ = ts
141 _ = eof
142
143 /*token := func () {
144 ret = append(ret, data[ts:te])
145 }*/
146
147 // line 154 "scan_string_lit.go"
148 {
149 }
150
151 // line 158 "scan_string_lit.go"
152 {
153 var _klen int
154 var _trans int
155 var _acts int
156 var _nacts uint
157 var _keys int
158 if p == pe {
159 goto _test_eof
160 }
161 if cs == 0 {
162 goto _out
163 }
164 _resume:
165 _keys = int(_hclstrtok_key_offsets[cs])
166 _trans = int(_hclstrtok_index_offsets[cs])
167
168 _klen = int(_hclstrtok_single_lengths[cs])
169 if _klen > 0 {
170 _lower := int(_keys)
171 var _mid int
172 _upper := int(_keys + _klen - 1)
173 for {
174 if _upper < _lower {
175 break
176 }
177
178 _mid = _lower + ((_upper - _lower) >> 1)
179 switch {
180 case data[p] < _hclstrtok_trans_keys[_mid]:
181 _upper = _mid - 1
182 case data[p] > _hclstrtok_trans_keys[_mid]:
183 _lower = _mid + 1
184 default:
185 _trans += int(_mid - int(_keys))
186 goto _match
187 }
188 }
189 _keys += _klen
190 _trans += _klen
191 }
192
193 _klen = int(_hclstrtok_range_lengths[cs])
194 if _klen > 0 {
195 _lower := int(_keys)
196 var _mid int
197 _upper := int(_keys + (_klen << 1) - 2)
198 for {
199 if _upper < _lower {
200 break
201 }
202
203 _mid = _lower + (((_upper - _lower) >> 1) & ^1)
204 switch {
205 case data[p] < _hclstrtok_trans_keys[_mid]:
206 _upper = _mid - 2
207 case data[p] > _hclstrtok_trans_keys[_mid+1]:
208 _lower = _mid + 2
209 default:
210 _trans += int((_mid - int(_keys)) >> 1)
211 goto _match
212 }
213 }
214 _trans += _klen
215 }
216
217 _match:
218 _trans = int(_hclstrtok_indicies[_trans])
219 cs = int(_hclstrtok_trans_targs[_trans])
220
221 if _hclstrtok_trans_actions[_trans] == 0 {
222 goto _again
223 }
224
225 _acts = int(_hclstrtok_trans_actions[_trans])
226 _nacts = uint(_hclstrtok_actions[_acts])
227 _acts++
228 for ; _nacts > 0; _nacts-- {
229 _acts++
230 switch _hclstrtok_actions[_acts-1] {
231 case 0:
232 // line 40 "scan_string_lit.rl"
233
234 // If te is behind p then we've skipped over some literal
235 // characters which we must now return.
236 if te < p {
237 ret = append(ret, data[te:p])
238 }
239 ts = p
240
241 case 1:
242 // line 48 "scan_string_lit.rl"
243
244 te = p
245 ret = append(ret, data[ts:te])
246
247 // line 255 "scan_string_lit.go"
248 }
249 }
250
251 _again:
252 if cs == 0 {
253 goto _out
254 }
255 p++
256 if p != pe {
257 goto _resume
258 }
259 _test_eof:
260 {
261 }
262 if p == eof {
263 __acts := _hclstrtok_eof_actions[cs]
264 __nacts := uint(_hclstrtok_actions[__acts])
265 __acts++
266 for ; __nacts > 0; __nacts-- {
267 __acts++
268 switch _hclstrtok_actions[__acts-1] {
269 case 1:
270 // line 48 "scan_string_lit.rl"
271
272 te = p
273 ret = append(ret, data[ts:te])
274
275 // line 281 "scan_string_lit.go"
276 }
277 }
278 }
279
280 _out:
281 {
282 }
283 }
284
285 // line 89 "scan_string_lit.rl"
286
287 if te < p {
288 // Collect any leftover literal characters at the end of the input
289 ret = append(ret, data[te:p])
290 }
291
292 // If we fall out here without being in a final state then we've
293 // encountered something that the scanner can't match, which should
294 // be impossible (the scanner matches all bytes _somehow_) but we'll
295 // tolerate it and let the caller deal with it.
296 if cs < hclstrtok_first_final {
297 ret = append(ret, data[p:len(data)])
298 }
299
300 return ret
301}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl
new file mode 100644
index 0000000..f8ac117
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl
@@ -0,0 +1,105 @@
1
2package hclsyntax
3
4// This file is generated from scan_string_lit.rl. DO NOT EDIT.
5%%{
6 # (except you are actually in scan_string_lit.rl here, so edit away!)
7
8 machine hclstrtok;
9 write data;
10}%%
11
12func scanStringLit(data []byte, quoted bool) [][]byte {
13 var ret [][]byte
14
15 %%{
16 include UnicodeDerived "unicode_derived.rl";
17
18 UTF8Cont = 0x80 .. 0xBF;
19 AnyUTF8 = (
20 0x00..0x7F |
21 0xC0..0xDF . UTF8Cont |
22 0xE0..0xEF . UTF8Cont . UTF8Cont |
23 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
24 );
25 BadUTF8 = any - AnyUTF8;
26
27 Hex = ('0'..'9' | 'a'..'f' | 'A'..'F');
28
29 # Our goal with this patterns is to capture user intent as best as
30 # possible, even if the input is invalid. The caller will then verify
31 # whether each token is valid and generate suitable error messages
32 # if not.
33 UnicodeEscapeShort = "\\u" . Hex{0,4};
34 UnicodeEscapeLong = "\\U" . Hex{0,8};
35 UnicodeEscape = (UnicodeEscapeShort | UnicodeEscapeLong);
36 SimpleEscape = "\\" . (AnyUTF8 - ('U'|'u'))?;
37 TemplateEscape = ("$" . ("$" . ("{"?))?) | ("%" . ("%" . ("{"?))?);
38 Newline = ("\r\n" | "\r" | "\n");
39
40 action Begin {
41 // If te is behind p then we've skipped over some literal
42 // characters which we must now return.
43 if te < p {
44 ret = append(ret, data[te:p])
45 }
46 ts = p;
47 }
48 action End {
49 te = p;
50 ret = append(ret, data[ts:te]);
51 }
52
53 QuotedToken = (UnicodeEscape | SimpleEscape | TemplateEscape | Newline) >Begin %End;
54 UnquotedToken = (TemplateEscape | Newline) >Begin %End;
55 QuotedLiteral = (any - ("\\" | "$" | "%" | "\r" | "\n"));
56 UnquotedLiteral = (any - ("$" | "%" | "\r" | "\n"));
57
58 quoted := (QuotedToken | QuotedLiteral)**;
59 unquoted := (UnquotedToken | UnquotedLiteral)**;
60
61 }%%
62
63 // Ragel state
64 p := 0 // "Pointer" into data
65 pe := len(data) // End-of-data "pointer"
66 ts := 0
67 te := 0
68 eof := pe
69
70 var cs int // current state
71 switch {
72 case quoted:
73 cs = hclstrtok_en_quoted
74 default:
75 cs = hclstrtok_en_unquoted
76 }
77
78 // Make Go compiler happy
79 _ = ts
80 _ = eof
81
82 /*token := func () {
83 ret = append(ret, data[ts:te])
84 }*/
85
86 %%{
87 write init nocs;
88 write exec;
89 }%%
90
91 if te < p {
92 // Collect any leftover literal characters at the end of the input
93 ret = append(ret, data[te:p])
94 }
95
96 // If we fall out here without being in a final state then we've
97 // encountered something that the scanner can't match, which should
98 // be impossible (the scanner matches all bytes _somehow_) but we'll
99 // tolerate it and let the caller deal with it.
100 if cs < hclstrtok_first_final {
101 ret = append(ret, data[p:len(data)])
102 }
103
104 return ret
105}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go
new file mode 100644
index 0000000..395e9c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go
@@ -0,0 +1,5443 @@
1// line 1 "scan_tokens.rl"
2
3package hclsyntax
4
5import (
6 "bytes"
7
8 "github.com/hashicorp/hcl2/hcl"
9)
10
11// This file is generated from scan_tokens.rl. DO NOT EDIT.
12
13// line 15 "scan_tokens.go"
14var _hcltok_actions []byte = []byte{
15 0, 1, 0, 1, 1, 1, 2, 1, 3,
16 1, 4, 1, 6, 1, 7, 1, 8,
17 1, 9, 1, 10, 1, 11, 1, 12,
18 1, 13, 1, 14, 1, 15, 1, 16,
19 1, 17, 1, 18, 1, 19, 1, 22,
20 1, 23, 1, 24, 1, 25, 1, 26,
21 1, 27, 1, 28, 1, 29, 1, 30,
22 1, 31, 1, 34, 1, 35, 1, 36,
23 1, 37, 1, 38, 1, 39, 1, 40,
24 1, 41, 1, 42, 1, 43, 1, 46,
25 1, 47, 1, 48, 1, 49, 1, 50,
26 1, 51, 1, 52, 1, 58, 1, 59,
27 1, 60, 1, 61, 1, 62, 1, 63,
28 1, 64, 1, 65, 1, 66, 1, 67,
29 1, 68, 1, 69, 1, 70, 1, 71,
30 1, 72, 1, 73, 1, 74, 1, 75,
31 1, 76, 1, 77, 1, 78, 1, 79,
32 1, 80, 1, 81, 1, 82, 1, 83,
33 1, 84, 1, 85, 1, 86, 1, 87,
34 2, 0, 15, 2, 1, 15, 2, 2,
35 24, 2, 2, 28, 2, 3, 24, 2,
36 3, 28, 2, 4, 5, 2, 7, 0,
37 2, 7, 1, 2, 7, 20, 2, 7,
38 21, 2, 7, 32, 2, 7, 33, 2,
39 7, 44, 2, 7, 45, 2, 7, 53,
40 2, 7, 54, 2, 7, 55, 2, 7,
41 56, 2, 7, 57, 3, 7, 2, 20,
42 3, 7, 3, 20,
43}
44
45var _hcltok_key_offsets []int16 = []int16{
46 0, 0, 1, 2, 3, 5, 10, 14,
47 16, 58, 99, 145, 146, 150, 156, 156,
48 158, 160, 169, 175, 182, 183, 186, 187,
49 191, 196, 205, 209, 213, 221, 223, 225,
50 227, 230, 262, 264, 266, 270, 274, 277,
51 288, 301, 320, 333, 349, 361, 377, 392,
52 413, 423, 435, 446, 460, 475, 485, 497,
53 506, 518, 520, 524, 545, 554, 564, 570,
54 576, 577, 626, 628, 632, 634, 640, 647,
55 655, 662, 665, 671, 675, 679, 681, 685,
56 689, 693, 699, 707, 715, 721, 723, 727,
57 729, 735, 739, 743, 747, 751, 756, 763,
58 769, 771, 773, 777, 779, 785, 789, 793,
59 803, 808, 822, 837, 839, 847, 849, 854,
60 868, 873, 875, 879, 880, 884, 890, 896,
61 906, 916, 927, 935, 938, 941, 945, 949,
62 951, 954, 954, 957, 959, 989, 991, 993,
63 997, 1002, 1006, 1011, 1013, 1015, 1017, 1026,
64 1030, 1034, 1040, 1042, 1050, 1058, 1070, 1073,
65 1079, 1083, 1085, 1089, 1109, 1111, 1113, 1124,
66 1130, 1132, 1134, 1136, 1140, 1146, 1152, 1154,
67 1159, 1163, 1165, 1173, 1191, 1231, 1241, 1245,
68 1247, 1249, 1250, 1254, 1258, 1262, 1266, 1270,
69 1275, 1279, 1283, 1287, 1289, 1291, 1295, 1305,
70 1309, 1311, 1315, 1319, 1323, 1336, 1338, 1340,
71 1344, 1346, 1350, 1352, 1354, 1384, 1388, 1392,
72 1396, 1399, 1406, 1411, 1422, 1426, 1442, 1456,
73 1460, 1465, 1469, 1473, 1479, 1481, 1487, 1489,
74 1493, 1495, 1501, 1506, 1511, 1521, 1523, 1525,
75 1529, 1533, 1535, 1548, 1550, 1554, 1558, 1566,
76 1568, 1572, 1574, 1575, 1578, 1583, 1585, 1587,
77 1591, 1593, 1597, 1603, 1623, 1629, 1635, 1637,
78 1638, 1648, 1649, 1657, 1664, 1666, 1669, 1671,
79 1673, 1675, 1680, 1684, 1688, 1693, 1703, 1713,
80 1717, 1721, 1735, 1761, 1771, 1773, 1775, 1778,
81 1780, 1783, 1785, 1789, 1791, 1792, 1796, 1798,
82 1801, 1808, 1816, 1818, 1820, 1824, 1826, 1832,
83 1843, 1846, 1848, 1852, 1857, 1887, 1892, 1894,
84 1897, 1902, 1916, 1923, 1937, 1942, 1955, 1959,
85 1972, 1977, 1995, 1996, 2005, 2009, 2021, 2026,
86 2033, 2040, 2047, 2049, 2053, 2075, 2080, 2081,
87 2085, 2087, 2137, 2140, 2151, 2155, 2157, 2163,
88 2169, 2171, 2176, 2178, 2182, 2184, 2185, 2187,
89 2189, 2195, 2197, 2199, 2203, 2209, 2222, 2224,
90 2230, 2234, 2242, 2253, 2261, 2264, 2294, 2300,
91 2303, 2308, 2310, 2314, 2318, 2322, 2324, 2331,
92 2333, 2342, 2349, 2357, 2359, 2379, 2391, 2395,
93 2397, 2415, 2454, 2456, 2460, 2462, 2469, 2473,
94 2501, 2503, 2505, 2507, 2509, 2512, 2514, 2518,
95 2522, 2524, 2527, 2529, 2531, 2534, 2536, 2538,
96 2539, 2541, 2543, 2547, 2551, 2554, 2567, 2569,
97 2575, 2579, 2581, 2585, 2589, 2603, 2606, 2615,
98 2617, 2621, 2627, 2627, 2629, 2631, 2640, 2646,
99 2653, 2654, 2657, 2658, 2662, 2667, 2676, 2680,
100 2684, 2692, 2694, 2696, 2698, 2701, 2733, 2735,
101 2737, 2741, 2745, 2748, 2759, 2772, 2791, 2804,
102 2820, 2832, 2848, 2863, 2884, 2894, 2906, 2917,
103 2931, 2946, 2956, 2968, 2977, 2989, 2991, 2995,
104 3016, 3025, 3035, 3041, 3047, 3048, 3097, 3099,
105 3103, 3105, 3111, 3118, 3126, 3133, 3136, 3142,
106 3146, 3150, 3152, 3156, 3160, 3164, 3170, 3178,
107 3186, 3192, 3194, 3198, 3200, 3206, 3210, 3214,
108 3218, 3222, 3227, 3234, 3240, 3242, 3244, 3248,
109 3250, 3256, 3260, 3264, 3274, 3279, 3293, 3308,
110 3310, 3318, 3320, 3325, 3339, 3344, 3346, 3350,
111 3351, 3355, 3361, 3367, 3377, 3387, 3398, 3406,
112 3409, 3412, 3416, 3420, 3422, 3425, 3425, 3428,
113 3430, 3460, 3462, 3464, 3468, 3473, 3477, 3482,
114 3484, 3486, 3488, 3497, 3501, 3505, 3511, 3513,
115 3521, 3529, 3541, 3544, 3550, 3554, 3556, 3560,
116 3580, 3582, 3584, 3595, 3601, 3603, 3605, 3607,
117 3611, 3617, 3623, 3625, 3630, 3634, 3636, 3644,
118 3662, 3702, 3712, 3716, 3718, 3720, 3721, 3725,
119 3729, 3733, 3737, 3741, 3746, 3750, 3754, 3758,
120 3760, 3762, 3766, 3776, 3780, 3782, 3786, 3790,
121 3794, 3807, 3809, 3811, 3815, 3817, 3821, 3823,
122 3825, 3855, 3859, 3863, 3867, 3870, 3877, 3882,
123 3893, 3897, 3913, 3927, 3931, 3936, 3940, 3944,
124 3950, 3952, 3958, 3960, 3964, 3966, 3972, 3977,
125 3982, 3992, 3994, 3996, 4000, 4004, 4006, 4019,
126 4021, 4025, 4029, 4037, 4039, 4043, 4045, 4046,
127 4049, 4054, 4056, 4058, 4062, 4064, 4068, 4074,
128 4094, 4100, 4106, 4108, 4109, 4119, 4120, 4128,
129 4135, 4137, 4140, 4142, 4144, 4146, 4151, 4155,
130 4159, 4164, 4174, 4184, 4188, 4192, 4206, 4232,
131 4242, 4244, 4246, 4249, 4251, 4254, 4256, 4260,
132 4262, 4263, 4267, 4269, 4271, 4278, 4282, 4289,
133 4296, 4305, 4321, 4333, 4351, 4362, 4374, 4382,
134 4400, 4408, 4438, 4441, 4451, 4461, 4473, 4484,
135 4493, 4506, 4518, 4522, 4528, 4555, 4564, 4567,
136 4572, 4578, 4583, 4604, 4608, 4614, 4614, 4621,
137 4630, 4638, 4641, 4645, 4651, 4657, 4660, 4664,
138 4671, 4677, 4686, 4695, 4699, 4703, 4707, 4711,
139 4718, 4722, 4726, 4736, 4742, 4746, 4752, 4756,
140 4759, 4765, 4771, 4783, 4787, 4791, 4801, 4805,
141 4816, 4818, 4820, 4824, 4836, 4841, 4865, 4869,
142 4875, 4897, 4906, 4910, 4913, 4914, 4922, 4930,
143 4936, 4946, 4953, 4971, 4974, 4977, 4985, 4991,
144 4995, 4999, 5003, 5009, 5017, 5022, 5028, 5032,
145 5040, 5047, 5051, 5058, 5064, 5072, 5080, 5086,
146 5092, 5103, 5107, 5119, 5128, 5145, 5162, 5165,
147 5169, 5171, 5177, 5179, 5183, 5198, 5202, 5206,
148 5210, 5214, 5218, 5220, 5226, 5231, 5235, 5241,
149 5248, 5251, 5269, 5271, 5316, 5322, 5328, 5332,
150 5336, 5342, 5346, 5352, 5358, 5365, 5367, 5373,
151 5379, 5383, 5387, 5395, 5408, 5414, 5421, 5429,
152 5435, 5444, 5450, 5454, 5459, 5463, 5471, 5475,
153 5479, 5509, 5515, 5521, 5527, 5533, 5540, 5546,
154 5553, 5558, 5568, 5572, 5579, 5585, 5589, 5596,
155 5600, 5606, 5609, 5613, 5617, 5621, 5625, 5630,
156 5635, 5639, 5650, 5654, 5658, 5664, 5672, 5676,
157 5693, 5697, 5703, 5713, 5719, 5725, 5728, 5733,
158 5742, 5746, 5750, 5756, 5760, 5766, 5774, 5792,
159 5793, 5803, 5804, 5813, 5821, 5823, 5826, 5828,
160 5830, 5832, 5837, 5850, 5854, 5869, 5898, 5909,
161 5911, 5915, 5919, 5924, 5928, 5930, 5937, 5941,
162 5949, 5953, 5954, 5955, 5957, 5959, 5961, 5963,
163 5965, 5966, 5967, 5968, 5970, 5972, 5974, 5975,
164 5976, 5977, 5978, 5980, 5982, 5984, 5985, 5986,
165 5990, 5996, 5996, 5998, 6000, 6009, 6015, 6022,
166 6023, 6026, 6027, 6031, 6036, 6045, 6049, 6053,
167 6061, 6063, 6065, 6067, 6070, 6102, 6104, 6106,
168 6110, 6114, 6117, 6128, 6141, 6160, 6173, 6189,
169 6201, 6217, 6232, 6253, 6263, 6275, 6286, 6300,
170 6315, 6325, 6337, 6346, 6358, 6360, 6364, 6385,
171 6394, 6404, 6410, 6416, 6417, 6466, 6468, 6472,
172 6474, 6480, 6487, 6495, 6502, 6505, 6511, 6515,
173 6519, 6521, 6525, 6529, 6533, 6539, 6547, 6555,
174 6561, 6563, 6567, 6569, 6575, 6579, 6583, 6587,
175 6591, 6596, 6603, 6609, 6611, 6613, 6617, 6619,
176 6625, 6629, 6633, 6643, 6648, 6662, 6677, 6679,
177 6687, 6689, 6694, 6708, 6713, 6715, 6719, 6720,
178 6724, 6730, 6736, 6746, 6756, 6767, 6775, 6778,
179 6781, 6785, 6789, 6791, 6794, 6794, 6797, 6799,
180 6829, 6831, 6833, 6837, 6842, 6846, 6851, 6853,
181 6855, 6857, 6866, 6870, 6874, 6880, 6882, 6890,
182 6898, 6910, 6913, 6919, 6923, 6925, 6929, 6949,
183 6951, 6953, 6964, 6970, 6972, 6974, 6976, 6980,
184 6986, 6992, 6994, 6999, 7003, 7005, 7013, 7031,
185 7071, 7081, 7085, 7087, 7089, 7090, 7094, 7098,
186 7102, 7106, 7110, 7115, 7119, 7123, 7127, 7129,
187 7131, 7135, 7145, 7149, 7151, 7155, 7159, 7163,
188 7176, 7178, 7180, 7184, 7186, 7190, 7192, 7194,
189 7224, 7228, 7232, 7236, 7239, 7246, 7251, 7262,
190 7266, 7282, 7296, 7300, 7305, 7309, 7313, 7319,
191 7321, 7327, 7329, 7333, 7335, 7341, 7346, 7351,
192 7361, 7363, 7365, 7369, 7373, 7375, 7388, 7390,
193 7394, 7398, 7406, 7408, 7412, 7414, 7415, 7418,
194 7423, 7425, 7427, 7431, 7433, 7437, 7443, 7463,
195 7469, 7475, 7477, 7478, 7488, 7489, 7497, 7504,
196 7506, 7509, 7511, 7513, 7515, 7520, 7524, 7528,
197 7533, 7543, 7553, 7557, 7561, 7575, 7601, 7611,
198 7613, 7615, 7618, 7620, 7623, 7625, 7629, 7631,
199 7632, 7636, 7638, 7640, 7647, 7651, 7658, 7665,
200 7674, 7690, 7702, 7720, 7731, 7743, 7751, 7769,
201 7777, 7807, 7810, 7820, 7830, 7842, 7853, 7862,
202 7875, 7887, 7891, 7897, 7924, 7933, 7936, 7941,
203 7947, 7952, 7973, 7977, 7983, 7983, 7990, 7999,
204 8007, 8010, 8014, 8020, 8026, 8029, 8033, 8040,
205 8046, 8055, 8064, 8068, 8072, 8076, 8080, 8087,
206 8091, 8095, 8105, 8111, 8115, 8121, 8125, 8128,
207 8134, 8140, 8152, 8156, 8160, 8170, 8174, 8185,
208 8187, 8189, 8193, 8205, 8210, 8234, 8238, 8244,
209 8266, 8275, 8279, 8282, 8283, 8291, 8299, 8305,
210 8315, 8322, 8340, 8343, 8346, 8354, 8360, 8364,
211 8368, 8372, 8378, 8386, 8391, 8397, 8401, 8409,
212 8416, 8420, 8427, 8433, 8441, 8449, 8455, 8461,
213 8472, 8476, 8488, 8497, 8514, 8531, 8534, 8538,
214 8540, 8546, 8548, 8552, 8567, 8571, 8575, 8579,
215 8583, 8587, 8589, 8595, 8600, 8604, 8610, 8617,
216 8620, 8638, 8640, 8685, 8691, 8697, 8701, 8705,
217 8711, 8715, 8721, 8727, 8734, 8736, 8742, 8748,
218 8752, 8756, 8764, 8777, 8783, 8790, 8798, 8804,
219 8813, 8819, 8823, 8828, 8832, 8840, 8844, 8848,
220 8878, 8884, 8890, 8896, 8902, 8909, 8915, 8922,
221 8927, 8937, 8941, 8948, 8954, 8958, 8965, 8969,
222 8975, 8978, 8982, 8986, 8990, 8994, 8999, 9004,
223 9008, 9019, 9023, 9027, 9033, 9041, 9045, 9062,
224 9066, 9072, 9082, 9088, 9094, 9097, 9102, 9111,
225 9115, 9119, 9125, 9129, 9135, 9143, 9161, 9162,
226 9172, 9173, 9182, 9190, 9192, 9195, 9197, 9199,
227 9201, 9206, 9219, 9223, 9238, 9267, 9278, 9280,
228 9284, 9288, 9293, 9297, 9299, 9306, 9310, 9318,
229 9322, 9398, 9400, 9401, 9402, 9403, 9404, 9405,
230 9407, 9408, 9413, 9415, 9417, 9418, 9462, 9463,
231 9464, 9466, 9471, 9475, 9475, 9477, 9479, 9490,
232 9500, 9508, 9509, 9511, 9512, 9516, 9520, 9530,
233 9534, 9541, 9552, 9559, 9563, 9569, 9580, 9612,
234 9661, 9676, 9691, 9696, 9698, 9703, 9735, 9743,
235 9745, 9767, 9789, 9791, 9807, 9823, 9839, 9855,
236 9870, 9880, 9897, 9914, 9931, 9947, 9957, 9974,
237 9990, 10006, 10022, 10038, 10054, 10070, 10086, 10087,
238 10088, 10089, 10090, 10092, 10094, 10096, 10110, 10124,
239 10138, 10152, 10153, 10154, 10156, 10158, 10160, 10174,
240 10188, 10189, 10190, 10192, 10194, 10196, 10245, 10289,
241 10291, 10296, 10300, 10300, 10302, 10304, 10315, 10325,
242 10333, 10334, 10336, 10337, 10341, 10345, 10355, 10359,
243 10366, 10377, 10384, 10388, 10394, 10405, 10437, 10486,
244 10501, 10516, 10521, 10523, 10528, 10560, 10568, 10570,
245 10592, 10614,
246}
247
248var _hcltok_trans_keys []byte = []byte{
249 10, 46, 42, 42, 47, 46, 69, 101,
250 48, 57, 43, 45, 48, 57, 48, 57,
251 45, 95, 194, 195, 198, 199, 203, 205,
252 206, 207, 210, 212, 213, 214, 215, 216,
253 217, 219, 220, 221, 222, 223, 224, 225,
254 226, 227, 228, 233, 234, 237, 239, 240,
255 65, 90, 97, 122, 196, 202, 208, 218,
256 229, 236, 95, 194, 195, 198, 199, 203,
257 205, 206, 207, 210, 212, 213, 214, 215,
258 216, 217, 219, 220, 221, 222, 223, 224,
259 225, 226, 227, 228, 233, 234, 237, 239,
260 240, 65, 90, 97, 122, 196, 202, 208,
261 218, 229, 236, 10, 13, 45, 95, 194,
262 195, 198, 199, 203, 204, 205, 206, 207,
263 210, 212, 213, 214, 215, 216, 217, 219,
264 220, 221, 222, 223, 224, 225, 226, 227,
265 228, 233, 234, 237, 239, 240, 243, 48,
266 57, 65, 90, 97, 122, 196, 218, 229,
267 236, 10, 170, 181, 183, 186, 128, 150,
268 152, 182, 184, 255, 192, 255, 0, 127,
269 173, 130, 133, 146, 159, 165, 171, 175,
270 255, 181, 190, 184, 185, 192, 255, 140,
271 134, 138, 142, 161, 163, 255, 182, 130,
272 136, 137, 176, 151, 152, 154, 160, 190,
273 136, 144, 192, 255, 135, 129, 130, 132,
274 133, 144, 170, 176, 178, 144, 154, 160,
275 191, 128, 169, 174, 255, 148, 169, 157,
276 158, 189, 190, 192, 255, 144, 255, 139,
277 140, 178, 255, 186, 128, 181, 160, 161,
278 162, 163, 164, 165, 166, 167, 168, 169,
279 170, 171, 172, 173, 174, 175, 176, 177,
280 178, 179, 180, 181, 182, 183, 184, 185,
281 186, 187, 188, 189, 190, 191, 128, 173,
282 128, 155, 160, 180, 182, 189, 148, 161,
283 163, 255, 176, 164, 165, 132, 169, 177,
284 141, 142, 145, 146, 179, 181, 186, 187,
285 158, 133, 134, 137, 138, 143, 150, 152,
286 155, 164, 165, 178, 255, 188, 129, 131,
287 133, 138, 143, 144, 147, 168, 170, 176,
288 178, 179, 181, 182, 184, 185, 190, 255,
289 157, 131, 134, 137, 138, 142, 144, 146,
290 152, 159, 165, 182, 255, 129, 131, 133,
291 141, 143, 145, 147, 168, 170, 176, 178,
292 179, 181, 185, 188, 255, 134, 138, 142,
293 143, 145, 159, 164, 165, 176, 184, 186,
294 255, 129, 131, 133, 140, 143, 144, 147,
295 168, 170, 176, 178, 179, 181, 185, 188,
296 191, 177, 128, 132, 135, 136, 139, 141,
297 150, 151, 156, 157, 159, 163, 166, 175,
298 156, 130, 131, 133, 138, 142, 144, 146,
299 149, 153, 154, 158, 159, 163, 164, 168,
300 170, 174, 185, 190, 191, 144, 151, 128,
301 130, 134, 136, 138, 141, 166, 175, 128,
302 131, 133, 140, 142, 144, 146, 168, 170,
303 185, 189, 255, 133, 137, 151, 142, 148,
304 155, 159, 164, 165, 176, 255, 128, 131,
305 133, 140, 142, 144, 146, 168, 170, 179,
306 181, 185, 188, 191, 158, 128, 132, 134,
307 136, 138, 141, 149, 150, 160, 163, 166,
308 175, 177, 178, 129, 131, 133, 140, 142,
309 144, 146, 186, 189, 255, 133, 137, 143,
310 147, 152, 158, 164, 165, 176, 185, 192,
311 255, 189, 130, 131, 133, 150, 154, 177,
312 179, 187, 138, 150, 128, 134, 143, 148,
313 152, 159, 166, 175, 178, 179, 129, 186,
314 128, 142, 144, 153, 132, 138, 141, 165,
315 167, 129, 130, 135, 136, 148, 151, 153,
316 159, 161, 163, 170, 171, 173, 185, 187,
317 189, 134, 128, 132, 136, 141, 144, 153,
318 156, 159, 128, 181, 183, 185, 152, 153,
319 160, 169, 190, 191, 128, 135, 137, 172,
320 177, 191, 128, 132, 134, 151, 153, 188,
321 134, 128, 129, 130, 131, 137, 138, 139,
322 140, 141, 142, 143, 144, 153, 154, 155,
323 156, 157, 158, 159, 160, 161, 162, 163,
324 164, 165, 166, 167, 168, 169, 170, 173,
325 175, 176, 177, 178, 179, 181, 182, 183,
326 188, 189, 190, 191, 132, 152, 172, 184,
327 185, 187, 128, 191, 128, 137, 144, 255,
328 158, 159, 134, 187, 136, 140, 142, 143,
329 137, 151, 153, 142, 143, 158, 159, 137,
330 177, 142, 143, 182, 183, 191, 255, 128,
331 130, 133, 136, 150, 152, 255, 145, 150,
332 151, 155, 156, 160, 168, 178, 255, 128,
333 143, 160, 255, 182, 183, 190, 255, 129,
334 255, 173, 174, 192, 255, 129, 154, 160,
335 255, 171, 173, 185, 255, 128, 140, 142,
336 148, 160, 180, 128, 147, 160, 172, 174,
337 176, 178, 179, 148, 150, 152, 155, 158,
338 159, 170, 255, 139, 141, 144, 153, 160,
339 255, 184, 255, 128, 170, 176, 255, 182,
340 255, 128, 158, 160, 171, 176, 187, 134,
341 173, 176, 180, 128, 171, 176, 255, 138,
342 143, 155, 255, 128, 155, 160, 255, 159,
343 189, 190, 192, 255, 167, 128, 137, 144,
344 153, 176, 189, 140, 143, 154, 170, 180,
345 255, 180, 255, 128, 183, 128, 137, 141,
346 189, 128, 136, 144, 146, 148, 182, 184,
347 185, 128, 181, 187, 191, 150, 151, 158,
348 159, 152, 154, 156, 158, 134, 135, 142,
349 143, 190, 255, 190, 128, 180, 182, 188,
350 130, 132, 134, 140, 144, 147, 150, 155,
351 160, 172, 178, 180, 182, 188, 128, 129,
352 130, 131, 132, 133, 134, 176, 177, 178,
353 179, 180, 181, 182, 183, 191, 255, 129,
354 147, 149, 176, 178, 190, 192, 255, 144,
355 156, 161, 144, 156, 165, 176, 130, 135,
356 149, 164, 166, 168, 138, 147, 152, 157,
357 170, 185, 188, 191, 142, 133, 137, 160,
358 255, 137, 255, 128, 174, 176, 255, 159,
359 165, 170, 180, 255, 167, 173, 128, 165,
360 176, 255, 168, 174, 176, 190, 192, 255,
361 128, 150, 160, 166, 168, 174, 176, 182,
362 184, 190, 128, 134, 136, 142, 144, 150,
363 152, 158, 160, 191, 128, 129, 130, 131,
364 132, 133, 134, 135, 144, 145, 255, 133,
365 135, 161, 175, 177, 181, 184, 188, 160,
366 151, 152, 187, 192, 255, 133, 173, 177,
367 255, 143, 159, 187, 255, 176, 191, 182,
368 183, 184, 191, 192, 255, 150, 255, 128,
369 146, 147, 148, 152, 153, 154, 155, 156,
370 158, 159, 160, 161, 162, 163, 164, 165,
371 166, 167, 168, 169, 170, 171, 172, 173,
372 174, 175, 176, 129, 255, 141, 255, 144,
373 189, 141, 143, 172, 255, 191, 128, 175,
374 180, 189, 151, 159, 162, 255, 175, 137,
375 138, 184, 255, 183, 255, 168, 255, 128,
376 179, 188, 134, 143, 154, 159, 184, 186,
377 190, 255, 128, 173, 176, 255, 148, 159,
378 189, 255, 129, 142, 154, 159, 191, 255,
379 128, 182, 128, 141, 144, 153, 160, 182,
380 186, 255, 128, 130, 155, 157, 160, 175,
381 178, 182, 129, 134, 137, 142, 145, 150,
382 160, 166, 168, 174, 176, 255, 155, 166,
383 175, 128, 170, 172, 173, 176, 185, 158,
384 159, 160, 255, 164, 175, 135, 138, 188,
385 255, 164, 169, 171, 172, 173, 174, 175,
386 180, 181, 182, 183, 184, 185, 187, 188,
387 189, 190, 191, 165, 186, 174, 175, 154,
388 255, 190, 128, 134, 147, 151, 157, 168,
389 170, 182, 184, 188, 128, 129, 131, 132,
390 134, 255, 147, 255, 190, 255, 144, 145,
391 136, 175, 188, 255, 128, 143, 160, 175,
392 179, 180, 141, 143, 176, 180, 182, 255,
393 189, 255, 191, 144, 153, 161, 186, 129,
394 154, 166, 255, 191, 255, 130, 135, 138,
395 143, 146, 151, 154, 156, 144, 145, 146,
396 147, 148, 150, 151, 152, 155, 157, 158,
397 160, 170, 171, 172, 175, 161, 169, 128,
398 129, 130, 131, 133, 135, 138, 139, 140,
399 141, 142, 143, 144, 145, 146, 147, 148,
400 149, 152, 156, 157, 160, 161, 162, 163,
401 164, 166, 168, 169, 170, 171, 172, 173,
402 174, 176, 177, 153, 155, 178, 179, 128,
403 139, 141, 166, 168, 186, 188, 189, 191,
404 255, 142, 143, 158, 255, 187, 255, 128,
405 180, 189, 128, 156, 160, 255, 145, 159,
406 161, 255, 128, 159, 176, 255, 139, 143,
407 187, 255, 128, 157, 160, 255, 144, 132,
408 135, 150, 255, 158, 159, 170, 175, 148,
409 151, 188, 255, 128, 167, 176, 255, 164,
410 255, 183, 255, 128, 149, 160, 167, 136,
411 188, 128, 133, 138, 181, 183, 184, 191,
412 255, 150, 159, 183, 255, 128, 158, 160,
413 178, 180, 181, 128, 149, 160, 185, 128,
414 183, 190, 191, 191, 128, 131, 133, 134,
415 140, 147, 149, 151, 153, 179, 184, 186,
416 160, 188, 128, 156, 128, 135, 137, 166,
417 128, 181, 128, 149, 160, 178, 128, 145,
418 128, 178, 129, 130, 131, 132, 133, 135,
419 136, 138, 139, 140, 141, 144, 145, 146,
420 147, 150, 151, 152, 153, 154, 155, 156,
421 162, 163, 171, 176, 177, 178, 128, 134,
422 135, 165, 176, 190, 144, 168, 176, 185,
423 128, 180, 182, 191, 182, 144, 179, 155,
424 133, 137, 141, 143, 157, 255, 190, 128,
425 145, 147, 183, 136, 128, 134, 138, 141,
426 143, 157, 159, 168, 176, 255, 171, 175,
427 186, 255, 128, 131, 133, 140, 143, 144,
428 147, 168, 170, 176, 178, 179, 181, 185,
429 188, 191, 144, 151, 128, 132, 135, 136,
430 139, 141, 157, 163, 166, 172, 176, 180,
431 128, 138, 144, 153, 134, 136, 143, 154,
432 255, 128, 181, 184, 255, 129, 151, 158,
433 255, 129, 131, 133, 143, 154, 255, 128,
434 137, 128, 153, 157, 171, 176, 185, 160,
435 255, 170, 190, 192, 255, 128, 184, 128,
436 136, 138, 182, 184, 191, 128, 144, 153,
437 178, 255, 168, 144, 145, 183, 255, 128,
438 142, 145, 149, 129, 141, 144, 146, 147,
439 148, 175, 255, 132, 255, 128, 144, 129,
440 143, 144, 153, 145, 152, 135, 255, 160,
441 168, 169, 171, 172, 173, 174, 188, 189,
442 190, 191, 161, 167, 185, 255, 128, 158,
443 160, 169, 144, 173, 176, 180, 128, 131,
444 144, 153, 163, 183, 189, 255, 144, 255,
445 133, 143, 191, 255, 143, 159, 160, 128,
446 129, 255, 159, 160, 171, 172, 255, 173,
447 255, 179, 255, 128, 176, 177, 178, 128,
448 129, 171, 175, 189, 255, 128, 136, 144,
449 153, 157, 158, 133, 134, 137, 144, 145,
450 146, 147, 148, 149, 154, 155, 156, 157,
451 158, 159, 168, 169, 170, 150, 153, 165,
452 169, 173, 178, 187, 255, 131, 132, 140,
453 169, 174, 255, 130, 132, 149, 157, 173,
454 186, 188, 160, 161, 163, 164, 167, 168,
455 132, 134, 149, 157, 186, 139, 140, 191,
456 255, 134, 128, 132, 138, 144, 146, 255,
457 166, 167, 129, 155, 187, 149, 181, 143,
458 175, 137, 169, 131, 140, 141, 192, 255,
459 128, 182, 187, 255, 173, 180, 182, 255,
460 132, 155, 159, 161, 175, 128, 160, 163,
461 164, 165, 184, 185, 186, 161, 162, 128,
462 134, 136, 152, 155, 161, 163, 164, 166,
463 170, 133, 143, 151, 255, 139, 143, 154,
464 255, 164, 167, 185, 187, 128, 131, 133,
465 159, 161, 162, 169, 178, 180, 183, 130,
466 135, 137, 139, 148, 151, 153, 155, 157,
467 159, 164, 190, 141, 143, 145, 146, 161,
468 162, 167, 170, 172, 178, 180, 183, 185,
469 188, 128, 137, 139, 155, 161, 163, 165,
470 169, 171, 187, 155, 156, 151, 255, 156,
471 157, 160, 181, 255, 186, 187, 255, 162,
472 255, 160, 168, 161, 167, 158, 255, 160,
473 132, 135, 133, 134, 176, 255, 170, 181,
474 186, 191, 176, 180, 182, 183, 186, 189,
475 134, 140, 136, 138, 142, 161, 163, 255,
476 130, 137, 136, 255, 144, 170, 176, 178,
477 160, 191, 128, 138, 174, 175, 177, 255,
478 148, 150, 164, 167, 173, 176, 185, 189,
479 190, 192, 255, 144, 146, 175, 141, 255,
480 166, 176, 178, 255, 186, 138, 170, 180,
481 181, 160, 161, 162, 164, 165, 166, 167,
482 168, 169, 170, 171, 172, 173, 174, 175,
483 176, 177, 178, 179, 180, 181, 182, 184,
484 186, 187, 188, 189, 190, 183, 185, 154,
485 164, 168, 128, 149, 128, 152, 189, 132,
486 185, 144, 152, 161, 177, 255, 169, 177,
487 129, 132, 141, 142, 145, 146, 179, 181,
488 186, 188, 190, 255, 142, 156, 157, 159,
489 161, 176, 177, 133, 138, 143, 144, 147,
490 168, 170, 176, 178, 179, 181, 182, 184,
491 185, 158, 153, 156, 178, 180, 189, 133,
492 141, 143, 145, 147, 168, 170, 176, 178,
493 179, 181, 185, 144, 185, 160, 161, 189,
494 133, 140, 143, 144, 147, 168, 170, 176,
495 178, 179, 181, 185, 177, 156, 157, 159,
496 161, 131, 156, 133, 138, 142, 144, 146,
497 149, 153, 154, 158, 159, 163, 164, 168,
498 170, 174, 185, 144, 189, 133, 140, 142,
499 144, 146, 168, 170, 185, 152, 154, 160,
500 161, 128, 189, 133, 140, 142, 144, 146,
501 168, 170, 179, 181, 185, 158, 160, 161,
502 177, 178, 189, 133, 140, 142, 144, 146,
503 186, 142, 148, 150, 159, 161, 186, 191,
504 189, 133, 150, 154, 177, 179, 187, 128,
505 134, 129, 176, 178, 179, 132, 138, 141,
506 165, 167, 189, 129, 130, 135, 136, 148,
507 151, 153, 159, 161, 163, 170, 171, 173,
508 176, 178, 179, 134, 128, 132, 156, 159,
509 128, 128, 135, 137, 172, 136, 140, 128,
510 129, 130, 131, 137, 138, 139, 140, 141,
511 142, 143, 144, 153, 154, 155, 156, 157,
512 158, 159, 160, 161, 162, 163, 164, 165,
513 166, 167, 168, 169, 170, 172, 173, 174,
514 175, 176, 177, 178, 179, 180, 181, 182,
515 184, 188, 189, 190, 191, 132, 152, 185,
516 187, 191, 128, 170, 161, 144, 149, 154,
517 157, 165, 166, 174, 176, 181, 255, 130,
518 141, 143, 159, 155, 255, 128, 140, 142,
519 145, 160, 177, 128, 145, 160, 172, 174,
520 176, 151, 156, 170, 128, 168, 176, 255,
521 138, 255, 128, 150, 160, 255, 149, 255,
522 167, 133, 179, 133, 139, 131, 160, 174,
523 175, 186, 255, 166, 255, 128, 163, 141,
524 143, 154, 189, 169, 172, 174, 177, 181,
525 182, 129, 130, 132, 133, 134, 176, 177,
526 178, 179, 180, 181, 182, 183, 177, 191,
527 165, 170, 175, 177, 180, 255, 168, 174,
528 176, 255, 128, 134, 136, 142, 144, 150,
529 152, 158, 128, 129, 130, 131, 132, 133,
530 134, 135, 144, 145, 255, 133, 135, 161,
531 169, 177, 181, 184, 188, 160, 151, 154,
532 128, 146, 147, 148, 152, 153, 154, 155,
533 156, 158, 159, 160, 161, 162, 163, 164,
534 165, 166, 167, 168, 169, 170, 171, 172,
535 173, 174, 175, 176, 129, 255, 141, 143,
536 160, 169, 172, 255, 191, 128, 174, 130,
537 134, 139, 163, 255, 130, 179, 187, 189,
538 178, 183, 138, 165, 176, 255, 135, 159,
539 189, 255, 132, 178, 143, 160, 164, 166,
540 175, 186, 190, 128, 168, 186, 128, 130,
541 132, 139, 160, 182, 190, 255, 176, 178,
542 180, 183, 184, 190, 255, 128, 130, 155,
543 157, 160, 170, 178, 180, 128, 162, 164,
544 169, 171, 172, 173, 174, 175, 180, 181,
545 182, 183, 185, 186, 187, 188, 189, 190,
546 191, 165, 179, 157, 190, 128, 134, 147,
547 151, 159, 168, 170, 182, 184, 188, 176,
548 180, 182, 255, 161, 186, 144, 145, 146,
549 147, 148, 150, 151, 152, 155, 157, 158,
550 160, 170, 171, 172, 175, 161, 169, 128,
551 129, 130, 131, 133, 138, 139, 140, 141,
552 142, 143, 144, 145, 146, 147, 148, 149,
553 152, 156, 157, 160, 161, 162, 163, 164,
554 166, 168, 169, 170, 171, 172, 173, 174,
555 176, 177, 153, 155, 178, 179, 145, 255,
556 139, 143, 182, 255, 158, 175, 128, 144,
557 147, 149, 151, 153, 179, 128, 135, 137,
558 164, 128, 130, 131, 132, 133, 134, 135,
559 136, 138, 139, 140, 141, 144, 145, 146,
560 147, 150, 151, 152, 153, 154, 156, 162,
561 163, 171, 176, 177, 178, 131, 183, 131,
562 175, 144, 168, 131, 166, 182, 144, 178,
563 131, 178, 154, 156, 129, 132, 128, 145,
564 147, 171, 159, 255, 144, 157, 161, 135,
565 138, 128, 175, 135, 132, 133, 128, 174,
566 152, 155, 132, 128, 170, 128, 153, 160,
567 190, 192, 255, 128, 136, 138, 174, 128,
568 178, 255, 160, 168, 169, 171, 172, 173,
569 174, 188, 189, 190, 191, 161, 167, 144,
570 173, 128, 131, 163, 183, 189, 255, 133,
571 143, 145, 255, 147, 159, 128, 176, 177,
572 178, 128, 136, 144, 153, 144, 145, 146,
573 147, 148, 149, 154, 155, 156, 157, 158,
574 159, 150, 153, 131, 140, 255, 160, 163,
575 164, 165, 184, 185, 186, 161, 162, 133,
576 255, 170, 181, 183, 186, 128, 150, 152,
577 182, 184, 255, 192, 255, 128, 255, 173,
578 130, 133, 146, 159, 165, 171, 175, 255,
579 181, 190, 184, 185, 192, 255, 140, 134,
580 138, 142, 161, 163, 255, 182, 130, 136,
581 137, 176, 151, 152, 154, 160, 190, 136,
582 144, 192, 255, 135, 129, 130, 132, 133,
583 144, 170, 176, 178, 144, 154, 160, 191,
584 128, 169, 174, 255, 148, 169, 157, 158,
585 189, 190, 192, 255, 144, 255, 139, 140,
586 178, 255, 186, 128, 181, 160, 161, 162,
587 163, 164, 165, 166, 167, 168, 169, 170,
588 171, 172, 173, 174, 175, 176, 177, 178,
589 179, 180, 181, 182, 183, 184, 185, 186,
590 187, 188, 189, 190, 191, 128, 173, 128,
591 155, 160, 180, 182, 189, 148, 161, 163,
592 255, 176, 164, 165, 132, 169, 177, 141,
593 142, 145, 146, 179, 181, 186, 187, 158,
594 133, 134, 137, 138, 143, 150, 152, 155,
595 164, 165, 178, 255, 188, 129, 131, 133,
596 138, 143, 144, 147, 168, 170, 176, 178,
597 179, 181, 182, 184, 185, 190, 255, 157,
598 131, 134, 137, 138, 142, 144, 146, 152,
599 159, 165, 182, 255, 129, 131, 133, 141,
600 143, 145, 147, 168, 170, 176, 178, 179,
601 181, 185, 188, 255, 134, 138, 142, 143,
602 145, 159, 164, 165, 176, 184, 186, 255,
603 129, 131, 133, 140, 143, 144, 147, 168,
604 170, 176, 178, 179, 181, 185, 188, 191,
605 177, 128, 132, 135, 136, 139, 141, 150,
606 151, 156, 157, 159, 163, 166, 175, 156,
607 130, 131, 133, 138, 142, 144, 146, 149,
608 153, 154, 158, 159, 163, 164, 168, 170,
609 174, 185, 190, 191, 144, 151, 128, 130,
610 134, 136, 138, 141, 166, 175, 128, 131,
611 133, 140, 142, 144, 146, 168, 170, 185,
612 189, 255, 133, 137, 151, 142, 148, 155,
613 159, 164, 165, 176, 255, 128, 131, 133,
614 140, 142, 144, 146, 168, 170, 179, 181,
615 185, 188, 191, 158, 128, 132, 134, 136,
616 138, 141, 149, 150, 160, 163, 166, 175,
617 177, 178, 129, 131, 133, 140, 142, 144,
618 146, 186, 189, 255, 133, 137, 143, 147,
619 152, 158, 164, 165, 176, 185, 192, 255,
620 189, 130, 131, 133, 150, 154, 177, 179,
621 187, 138, 150, 128, 134, 143, 148, 152,
622 159, 166, 175, 178, 179, 129, 186, 128,
623 142, 144, 153, 132, 138, 141, 165, 167,
624 129, 130, 135, 136, 148, 151, 153, 159,
625 161, 163, 170, 171, 173, 185, 187, 189,
626 134, 128, 132, 136, 141, 144, 153, 156,
627 159, 128, 181, 183, 185, 152, 153, 160,
628 169, 190, 191, 128, 135, 137, 172, 177,
629 191, 128, 132, 134, 151, 153, 188, 134,
630 128, 129, 130, 131, 137, 138, 139, 140,
631 141, 142, 143, 144, 153, 154, 155, 156,
632 157, 158, 159, 160, 161, 162, 163, 164,
633 165, 166, 167, 168, 169, 170, 173, 175,
634 176, 177, 178, 179, 181, 182, 183, 188,
635 189, 190, 191, 132, 152, 172, 184, 185,
636 187, 128, 191, 128, 137, 144, 255, 158,
637 159, 134, 187, 136, 140, 142, 143, 137,
638 151, 153, 142, 143, 158, 159, 137, 177,
639 142, 143, 182, 183, 191, 255, 128, 130,
640 133, 136, 150, 152, 255, 145, 150, 151,
641 155, 156, 160, 168, 178, 255, 128, 143,
642 160, 255, 182, 183, 190, 255, 129, 255,
643 173, 174, 192, 255, 129, 154, 160, 255,
644 171, 173, 185, 255, 128, 140, 142, 148,
645 160, 180, 128, 147, 160, 172, 174, 176,
646 178, 179, 148, 150, 152, 155, 158, 159,
647 170, 255, 139, 141, 144, 153, 160, 255,
648 184, 255, 128, 170, 176, 255, 182, 255,
649 128, 158, 160, 171, 176, 187, 134, 173,
650 176, 180, 128, 171, 176, 255, 138, 143,
651 155, 255, 128, 155, 160, 255, 159, 189,
652 190, 192, 255, 167, 128, 137, 144, 153,
653 176, 189, 140, 143, 154, 170, 180, 255,
654 180, 255, 128, 183, 128, 137, 141, 189,
655 128, 136, 144, 146, 148, 182, 184, 185,
656 128, 181, 187, 191, 150, 151, 158, 159,
657 152, 154, 156, 158, 134, 135, 142, 143,
658 190, 255, 190, 128, 180, 182, 188, 130,
659 132, 134, 140, 144, 147, 150, 155, 160,
660 172, 178, 180, 182, 188, 128, 129, 130,
661 131, 132, 133, 134, 176, 177, 178, 179,
662 180, 181, 182, 183, 191, 255, 129, 147,
663 149, 176, 178, 190, 192, 255, 144, 156,
664 161, 144, 156, 165, 176, 130, 135, 149,
665 164, 166, 168, 138, 147, 152, 157, 170,
666 185, 188, 191, 142, 133, 137, 160, 255,
667 137, 255, 128, 174, 176, 255, 159, 165,
668 170, 180, 255, 167, 173, 128, 165, 176,
669 255, 168, 174, 176, 190, 192, 255, 128,
670 150, 160, 166, 168, 174, 176, 182, 184,
671 190, 128, 134, 136, 142, 144, 150, 152,
672 158, 160, 191, 128, 129, 130, 131, 132,
673 133, 134, 135, 144, 145, 255, 133, 135,
674 161, 175, 177, 181, 184, 188, 160, 151,
675 152, 187, 192, 255, 133, 173, 177, 255,
676 143, 159, 187, 255, 176, 191, 182, 183,
677 184, 191, 192, 255, 150, 255, 128, 146,
678 147, 148, 152, 153, 154, 155, 156, 158,
679 159, 160, 161, 162, 163, 164, 165, 166,
680 167, 168, 169, 170, 171, 172, 173, 174,
681 175, 176, 129, 255, 141, 255, 144, 189,
682 141, 143, 172, 255, 191, 128, 175, 180,
683 189, 151, 159, 162, 255, 175, 137, 138,
684 184, 255, 183, 255, 168, 255, 128, 179,
685 188, 134, 143, 154, 159, 184, 186, 190,
686 255, 128, 173, 176, 255, 148, 159, 189,
687 255, 129, 142, 154, 159, 191, 255, 128,
688 182, 128, 141, 144, 153, 160, 182, 186,
689 255, 128, 130, 155, 157, 160, 175, 178,
690 182, 129, 134, 137, 142, 145, 150, 160,
691 166, 168, 174, 176, 255, 155, 166, 175,
692 128, 170, 172, 173, 176, 185, 158, 159,
693 160, 255, 164, 175, 135, 138, 188, 255,
694 164, 169, 171, 172, 173, 174, 175, 180,
695 181, 182, 183, 184, 185, 187, 188, 189,
696 190, 191, 165, 186, 174, 175, 154, 255,
697 190, 128, 134, 147, 151, 157, 168, 170,
698 182, 184, 188, 128, 129, 131, 132, 134,
699 255, 147, 255, 190, 255, 144, 145, 136,
700 175, 188, 255, 128, 143, 160, 175, 179,
701 180, 141, 143, 176, 180, 182, 255, 189,
702 255, 191, 144, 153, 161, 186, 129, 154,
703 166, 255, 191, 255, 130, 135, 138, 143,
704 146, 151, 154, 156, 144, 145, 146, 147,
705 148, 150, 151, 152, 155, 157, 158, 160,
706 170, 171, 172, 175, 161, 169, 128, 129,
707 130, 131, 133, 135, 138, 139, 140, 141,
708 142, 143, 144, 145, 146, 147, 148, 149,
709 152, 156, 157, 160, 161, 162, 163, 164,
710 166, 168, 169, 170, 171, 172, 173, 174,
711 176, 177, 153, 155, 178, 179, 128, 139,
712 141, 166, 168, 186, 188, 189, 191, 255,
713 142, 143, 158, 255, 187, 255, 128, 180,
714 189, 128, 156, 160, 255, 145, 159, 161,
715 255, 128, 159, 176, 255, 139, 143, 187,
716 255, 128, 157, 160, 255, 144, 132, 135,
717 150, 255, 158, 159, 170, 175, 148, 151,
718 188, 255, 128, 167, 176, 255, 164, 255,
719 183, 255, 128, 149, 160, 167, 136, 188,
720 128, 133, 138, 181, 183, 184, 191, 255,
721 150, 159, 183, 255, 128, 158, 160, 178,
722 180, 181, 128, 149, 160, 185, 128, 183,
723 190, 191, 191, 128, 131, 133, 134, 140,
724 147, 149, 151, 153, 179, 184, 186, 160,
725 188, 128, 156, 128, 135, 137, 166, 128,
726 181, 128, 149, 160, 178, 128, 145, 128,
727 178, 129, 130, 131, 132, 133, 135, 136,
728 138, 139, 140, 141, 144, 145, 146, 147,
729 150, 151, 152, 153, 154, 155, 156, 162,
730 163, 171, 176, 177, 178, 128, 134, 135,
731 165, 176, 190, 144, 168, 176, 185, 128,
732 180, 182, 191, 182, 144, 179, 155, 133,
733 137, 141, 143, 157, 255, 190, 128, 145,
734 147, 183, 136, 128, 134, 138, 141, 143,
735 157, 159, 168, 176, 255, 171, 175, 186,
736 255, 128, 131, 133, 140, 143, 144, 147,
737 168, 170, 176, 178, 179, 181, 185, 188,
738 191, 144, 151, 128, 132, 135, 136, 139,
739 141, 157, 163, 166, 172, 176, 180, 128,
740 138, 144, 153, 134, 136, 143, 154, 255,
741 128, 181, 184, 255, 129, 151, 158, 255,
742 129, 131, 133, 143, 154, 255, 128, 137,
743 128, 153, 157, 171, 176, 185, 160, 255,
744 170, 190, 192, 255, 128, 184, 128, 136,
745 138, 182, 184, 191, 128, 144, 153, 178,
746 255, 168, 144, 145, 183, 255, 128, 142,
747 145, 149, 129, 141, 144, 146, 147, 148,
748 175, 255, 132, 255, 128, 144, 129, 143,
749 144, 153, 145, 152, 135, 255, 160, 168,
750 169, 171, 172, 173, 174, 188, 189, 190,
751 191, 161, 167, 185, 255, 128, 158, 160,
752 169, 144, 173, 176, 180, 128, 131, 144,
753 153, 163, 183, 189, 255, 144, 255, 133,
754 143, 191, 255, 143, 159, 160, 128, 129,
755 255, 159, 160, 171, 172, 255, 173, 255,
756 179, 255, 128, 176, 177, 178, 128, 129,
757 171, 175, 189, 255, 128, 136, 144, 153,
758 157, 158, 133, 134, 137, 144, 145, 146,
759 147, 148, 149, 154, 155, 156, 157, 158,
760 159, 168, 169, 170, 150, 153, 165, 169,
761 173, 178, 187, 255, 131, 132, 140, 169,
762 174, 255, 130, 132, 149, 157, 173, 186,
763 188, 160, 161, 163, 164, 167, 168, 132,
764 134, 149, 157, 186, 139, 140, 191, 255,
765 134, 128, 132, 138, 144, 146, 255, 166,
766 167, 129, 155, 187, 149, 181, 143, 175,
767 137, 169, 131, 140, 141, 192, 255, 128,
768 182, 187, 255, 173, 180, 182, 255, 132,
769 155, 159, 161, 175, 128, 160, 163, 164,
770 165, 184, 185, 186, 161, 162, 128, 134,
771 136, 152, 155, 161, 163, 164, 166, 170,
772 133, 143, 151, 255, 139, 143, 154, 255,
773 164, 167, 185, 187, 128, 131, 133, 159,
774 161, 162, 169, 178, 180, 183, 130, 135,
775 137, 139, 148, 151, 153, 155, 157, 159,
776 164, 190, 141, 143, 145, 146, 161, 162,
777 167, 170, 172, 178, 180, 183, 185, 188,
778 128, 137, 139, 155, 161, 163, 165, 169,
779 171, 187, 155, 156, 151, 255, 156, 157,
780 160, 181, 255, 186, 187, 255, 162, 255,
781 160, 168, 161, 167, 158, 255, 160, 132,
782 135, 133, 134, 176, 255, 128, 191, 154,
783 164, 168, 128, 149, 150, 191, 128, 152,
784 153, 191, 181, 128, 159, 160, 189, 190,
785 191, 189, 128, 131, 132, 185, 186, 191,
786 144, 128, 151, 152, 161, 162, 176, 177,
787 255, 169, 177, 129, 132, 141, 142, 145,
788 146, 179, 181, 186, 188, 190, 191, 192,
789 255, 142, 158, 128, 155, 156, 161, 162,
790 175, 176, 177, 178, 191, 169, 177, 180,
791 183, 128, 132, 133, 138, 139, 142, 143,
792 144, 145, 146, 147, 185, 186, 191, 157,
793 128, 152, 153, 158, 159, 177, 178, 180,
794 181, 191, 142, 146, 169, 177, 180, 189,
795 128, 132, 133, 185, 186, 191, 144, 185,
796 128, 159, 160, 161, 162, 191, 169, 177,
797 180, 189, 128, 132, 133, 140, 141, 142,
798 143, 144, 145, 146, 147, 185, 186, 191,
799 158, 177, 128, 155, 156, 161, 162, 191,
800 131, 145, 155, 157, 128, 132, 133, 138,
801 139, 141, 142, 149, 150, 152, 153, 159,
802 160, 162, 163, 164, 165, 167, 168, 170,
803 171, 173, 174, 185, 186, 191, 144, 128,
804 191, 141, 145, 169, 189, 128, 132, 133,
805 185, 186, 191, 128, 151, 152, 154, 155,
806 159, 160, 161, 162, 191, 128, 141, 145,
807 169, 180, 189, 129, 132, 133, 185, 186,
808 191, 158, 128, 159, 160, 161, 162, 176,
809 177, 178, 179, 191, 141, 145, 189, 128,
810 132, 133, 186, 187, 191, 142, 128, 147,
811 148, 150, 151, 158, 159, 161, 162, 185,
812 186, 191, 178, 188, 128, 132, 133, 150,
813 151, 153, 154, 189, 190, 191, 128, 134,
814 135, 191, 128, 177, 129, 179, 180, 191,
815 128, 131, 137, 141, 152, 160, 164, 166,
816 172, 177, 189, 129, 132, 133, 134, 135,
817 138, 139, 147, 148, 167, 168, 169, 170,
818 179, 180, 191, 133, 128, 134, 135, 155,
819 156, 159, 160, 191, 128, 129, 191, 136,
820 128, 172, 173, 191, 128, 135, 136, 140,
821 141, 191, 191, 128, 170, 171, 190, 161,
822 128, 143, 144, 149, 150, 153, 154, 157,
823 158, 164, 165, 166, 167, 173, 174, 176,
824 177, 180, 181, 255, 130, 141, 143, 159,
825 134, 187, 136, 140, 142, 143, 137, 151,
826 153, 142, 143, 158, 159, 137, 177, 191,
827 142, 143, 182, 183, 192, 255, 129, 151,
828 128, 133, 134, 135, 136, 255, 145, 150,
829 151, 155, 191, 192, 255, 128, 143, 144,
830 159, 160, 255, 182, 183, 190, 191, 192,
831 255, 128, 129, 255, 173, 174, 192, 255,
832 128, 129, 154, 155, 159, 160, 255, 171,
833 173, 185, 191, 192, 255, 141, 128, 145,
834 146, 159, 160, 177, 178, 191, 173, 128,
835 145, 146, 159, 160, 176, 177, 191, 128,
836 179, 180, 191, 151, 156, 128, 191, 128,
837 159, 160, 255, 184, 191, 192, 255, 169,
838 128, 170, 171, 175, 176, 255, 182, 191,
839 192, 255, 128, 158, 159, 191, 128, 143,
840 144, 173, 174, 175, 176, 180, 181, 191,
841 128, 171, 172, 175, 176, 255, 138, 191,
842 192, 255, 128, 150, 151, 159, 160, 255,
843 149, 191, 192, 255, 167, 128, 191, 128,
844 132, 133, 179, 180, 191, 128, 132, 133,
845 139, 140, 191, 128, 130, 131, 160, 161,
846 173, 174, 175, 176, 185, 186, 255, 166,
847 191, 192, 255, 128, 163, 164, 191, 128,
848 140, 141, 143, 144, 153, 154, 189, 190,
849 191, 128, 136, 137, 191, 173, 128, 168,
850 169, 177, 178, 180, 181, 182, 183, 191,
851 0, 127, 192, 255, 150, 151, 158, 159,
852 152, 154, 156, 158, 134, 135, 142, 143,
853 190, 191, 192, 255, 181, 189, 191, 128,
854 190, 133, 181, 128, 129, 130, 140, 141,
855 143, 144, 147, 148, 149, 150, 155, 156,
856 159, 160, 172, 173, 177, 178, 188, 189,
857 191, 177, 191, 128, 190, 128, 143, 144,
858 156, 157, 191, 130, 135, 148, 164, 166,
859 168, 128, 137, 138, 149, 150, 151, 152,
860 157, 158, 169, 170, 185, 186, 187, 188,
861 191, 142, 128, 132, 133, 137, 138, 159,
862 160, 255, 137, 191, 192, 255, 175, 128,
863 255, 159, 165, 170, 175, 177, 180, 191,
864 192, 255, 166, 173, 128, 167, 168, 175,
865 176, 255, 168, 174, 176, 191, 192, 255,
866 167, 175, 183, 191, 128, 150, 151, 159,
867 160, 190, 135, 143, 151, 128, 158, 159,
868 191, 128, 132, 133, 135, 136, 160, 161,
869 169, 170, 176, 177, 181, 182, 183, 184,
870 188, 189, 191, 160, 151, 154, 187, 192,
871 255, 128, 132, 133, 173, 174, 176, 177,
872 255, 143, 159, 187, 191, 192, 255, 128,
873 175, 176, 191, 150, 191, 192, 255, 141,
874 191, 192, 255, 128, 143, 144, 189, 190,
875 191, 141, 143, 160, 169, 172, 191, 192,
876 255, 191, 128, 174, 175, 190, 128, 157,
877 158, 159, 160, 255, 176, 191, 192, 255,
878 128, 150, 151, 159, 160, 161, 162, 255,
879 175, 137, 138, 184, 191, 192, 255, 128,
880 182, 183, 255, 130, 134, 139, 163, 191,
881 192, 255, 128, 129, 130, 179, 180, 191,
882 187, 189, 128, 177, 178, 183, 184, 191,
883 128, 137, 138, 165, 166, 175, 176, 255,
884 135, 159, 189, 191, 192, 255, 128, 131,
885 132, 178, 179, 191, 143, 165, 191, 128,
886 159, 160, 175, 176, 185, 186, 190, 128,
887 168, 169, 191, 131, 186, 128, 139, 140,
888 159, 160, 182, 183, 189, 190, 255, 176,
889 178, 180, 183, 184, 190, 191, 192, 255,
890 129, 128, 130, 131, 154, 155, 157, 158,
891 159, 160, 170, 171, 177, 178, 180, 181,
892 191, 128, 167, 175, 129, 134, 135, 136,
893 137, 142, 143, 144, 145, 150, 151, 159,
894 160, 255, 155, 166, 175, 128, 162, 163,
895 191, 164, 175, 135, 138, 188, 191, 192,
896 255, 174, 175, 154, 191, 192, 255, 157,
897 169, 183, 189, 191, 128, 134, 135, 146,
898 147, 151, 152, 158, 159, 190, 130, 133,
899 128, 255, 178, 191, 192, 255, 128, 146,
900 147, 255, 190, 191, 192, 255, 128, 143,
901 144, 255, 144, 145, 136, 175, 188, 191,
902 192, 255, 181, 128, 175, 176, 255, 189,
903 191, 192, 255, 128, 160, 161, 186, 187,
904 191, 128, 129, 154, 155, 165, 166, 255,
905 191, 192, 255, 128, 129, 130, 135, 136,
906 137, 138, 143, 144, 145, 146, 151, 152,
907 153, 154, 156, 157, 191, 128, 191, 128,
908 129, 130, 131, 133, 138, 139, 140, 141,
909 142, 143, 144, 145, 146, 147, 148, 149,
910 152, 156, 157, 160, 161, 162, 163, 164,
911 166, 168, 169, 170, 171, 172, 173, 174,
912 176, 177, 132, 151, 153, 155, 158, 175,
913 178, 179, 180, 191, 140, 167, 187, 190,
914 128, 255, 142, 143, 158, 191, 192, 255,
915 187, 191, 192, 255, 128, 180, 181, 191,
916 128, 156, 157, 159, 160, 255, 145, 191,
917 192, 255, 128, 159, 160, 175, 176, 255,
918 139, 143, 182, 191, 192, 255, 144, 132,
919 135, 150, 191, 192, 255, 158, 175, 148,
920 151, 188, 191, 192, 255, 128, 167, 168,
921 175, 176, 255, 164, 191, 192, 255, 183,
922 191, 192, 255, 128, 149, 150, 159, 160,
923 167, 168, 191, 136, 182, 188, 128, 133,
924 134, 137, 138, 184, 185, 190, 191, 255,
925 150, 159, 183, 191, 192, 255, 179, 128,
926 159, 160, 181, 182, 191, 128, 149, 150,
927 159, 160, 185, 186, 191, 128, 183, 184,
928 189, 190, 191, 128, 148, 152, 129, 143,
929 144, 179, 180, 191, 128, 159, 160, 188,
930 189, 191, 128, 156, 157, 191, 136, 128,
931 164, 165, 191, 128, 181, 182, 191, 128,
932 149, 150, 159, 160, 178, 179, 191, 128,
933 145, 146, 191, 128, 178, 179, 191, 128,
934 130, 131, 132, 133, 134, 135, 136, 138,
935 139, 140, 141, 144, 145, 146, 147, 150,
936 151, 152, 153, 154, 156, 162, 163, 171,
937 176, 177, 178, 129, 191, 128, 130, 131,
938 183, 184, 191, 128, 130, 131, 175, 176,
939 191, 128, 143, 144, 168, 169, 191, 128,
940 130, 131, 166, 167, 191, 182, 128, 143,
941 144, 178, 179, 191, 128, 130, 131, 178,
942 179, 191, 128, 154, 156, 129, 132, 133,
943 191, 146, 128, 171, 172, 191, 135, 137,
944 142, 158, 128, 168, 169, 175, 176, 255,
945 159, 191, 192, 255, 144, 128, 156, 157,
946 161, 162, 191, 128, 134, 135, 138, 139,
947 191, 128, 175, 176, 191, 134, 128, 131,
948 132, 135, 136, 191, 128, 174, 175, 191,
949 128, 151, 152, 155, 156, 191, 132, 128,
950 191, 128, 170, 171, 191, 128, 153, 154,
951 191, 160, 190, 192, 255, 128, 184, 185,
952 191, 137, 128, 174, 175, 191, 128, 129,
953 177, 178, 255, 144, 191, 192, 255, 128,
954 142, 143, 144, 145, 146, 149, 129, 148,
955 150, 191, 175, 191, 192, 255, 132, 191,
956 192, 255, 128, 144, 129, 143, 145, 191,
957 144, 153, 128, 143, 145, 152, 154, 191,
958 135, 191, 192, 255, 160, 168, 169, 171,
959 172, 173, 174, 188, 189, 190, 191, 128,
960 159, 161, 167, 170, 187, 185, 191, 192,
961 255, 128, 143, 144, 173, 174, 191, 128,
962 131, 132, 162, 163, 183, 184, 188, 189,
963 255, 133, 143, 145, 191, 192, 255, 128,
964 146, 147, 159, 160, 191, 160, 128, 191,
965 128, 129, 191, 192, 255, 159, 160, 171,
966 128, 170, 172, 191, 192, 255, 173, 191,
967 192, 255, 179, 191, 192, 255, 128, 176,
968 177, 178, 129, 191, 128, 129, 130, 191,
969 171, 175, 189, 191, 192, 255, 128, 136,
970 137, 143, 144, 153, 154, 191, 144, 145,
971 146, 147, 148, 149, 154, 155, 156, 157,
972 158, 159, 128, 143, 150, 153, 160, 191,
973 149, 157, 173, 186, 188, 160, 161, 163,
974 164, 167, 168, 132, 134, 149, 157, 186,
975 191, 139, 140, 192, 255, 133, 145, 128,
976 134, 135, 137, 138, 255, 166, 167, 129,
977 155, 187, 149, 181, 143, 175, 137, 169,
978 131, 140, 191, 192, 255, 160, 163, 164,
979 165, 184, 185, 186, 128, 159, 161, 162,
980 166, 191, 133, 191, 192, 255, 132, 160,
981 163, 167, 179, 184, 186, 128, 164, 165,
982 168, 169, 187, 188, 191, 130, 135, 137,
983 139, 144, 147, 151, 153, 155, 157, 159,
984 163, 171, 179, 184, 189, 191, 128, 140,
985 141, 148, 149, 160, 161, 164, 165, 166,
986 167, 190, 138, 164, 170, 128, 155, 156,
987 160, 161, 187, 188, 191, 128, 191, 155,
988 156, 128, 191, 151, 191, 192, 255, 156,
989 157, 160, 128, 191, 181, 191, 192, 255,
990 158, 159, 186, 128, 185, 187, 191, 192,
991 255, 162, 191, 192, 255, 160, 168, 128,
992 159, 161, 167, 169, 191, 158, 191, 192,
993 255, 123, 123, 128, 191, 128, 191, 128,
994 191, 128, 191, 128, 191, 10, 123, 123,
995 128, 191, 128, 191, 128, 191, 123, 123,
996 10, 123, 128, 191, 128, 191, 128, 191,
997 123, 123, 170, 181, 183, 186, 128, 150,
998 152, 182, 184, 255, 192, 255, 128, 255,
999 173, 130, 133, 146, 159, 165, 171, 175,
1000 255, 181, 190, 184, 185, 192, 255, 140,
1001 134, 138, 142, 161, 163, 255, 182, 130,
1002 136, 137, 176, 151, 152, 154, 160, 190,
1003 136, 144, 192, 255, 135, 129, 130, 132,
1004 133, 144, 170, 176, 178, 144, 154, 160,
1005 191, 128, 169, 174, 255, 148, 169, 157,
1006 158, 189, 190, 192, 255, 144, 255, 139,
1007 140, 178, 255, 186, 128, 181, 160, 161,
1008 162, 163, 164, 165, 166, 167, 168, 169,
1009 170, 171, 172, 173, 174, 175, 176, 177,
1010 178, 179, 180, 181, 182, 183, 184, 185,
1011 186, 187, 188, 189, 190, 191, 128, 173,
1012 128, 155, 160, 180, 182, 189, 148, 161,
1013 163, 255, 176, 164, 165, 132, 169, 177,
1014 141, 142, 145, 146, 179, 181, 186, 187,
1015 158, 133, 134, 137, 138, 143, 150, 152,
1016 155, 164, 165, 178, 255, 188, 129, 131,
1017 133, 138, 143, 144, 147, 168, 170, 176,
1018 178, 179, 181, 182, 184, 185, 190, 255,
1019 157, 131, 134, 137, 138, 142, 144, 146,
1020 152, 159, 165, 182, 255, 129, 131, 133,
1021 141, 143, 145, 147, 168, 170, 176, 178,
1022 179, 181, 185, 188, 255, 134, 138, 142,
1023 143, 145, 159, 164, 165, 176, 184, 186,
1024 255, 129, 131, 133, 140, 143, 144, 147,
1025 168, 170, 176, 178, 179, 181, 185, 188,
1026 191, 177, 128, 132, 135, 136, 139, 141,
1027 150, 151, 156, 157, 159, 163, 166, 175,
1028 156, 130, 131, 133, 138, 142, 144, 146,
1029 149, 153, 154, 158, 159, 163, 164, 168,
1030 170, 174, 185, 190, 191, 144, 151, 128,
1031 130, 134, 136, 138, 141, 166, 175, 128,
1032 131, 133, 140, 142, 144, 146, 168, 170,
1033 185, 189, 255, 133, 137, 151, 142, 148,
1034 155, 159, 164, 165, 176, 255, 128, 131,
1035 133, 140, 142, 144, 146, 168, 170, 179,
1036 181, 185, 188, 191, 158, 128, 132, 134,
1037 136, 138, 141, 149, 150, 160, 163, 166,
1038 175, 177, 178, 129, 131, 133, 140, 142,
1039 144, 146, 186, 189, 255, 133, 137, 143,
1040 147, 152, 158, 164, 165, 176, 185, 192,
1041 255, 189, 130, 131, 133, 150, 154, 177,
1042 179, 187, 138, 150, 128, 134, 143, 148,
1043 152, 159, 166, 175, 178, 179, 129, 186,
1044 128, 142, 144, 153, 132, 138, 141, 165,
1045 167, 129, 130, 135, 136, 148, 151, 153,
1046 159, 161, 163, 170, 171, 173, 185, 187,
1047 189, 134, 128, 132, 136, 141, 144, 153,
1048 156, 159, 128, 181, 183, 185, 152, 153,
1049 160, 169, 190, 191, 128, 135, 137, 172,
1050 177, 191, 128, 132, 134, 151, 153, 188,
1051 134, 128, 129, 130, 131, 137, 138, 139,
1052 140, 141, 142, 143, 144, 153, 154, 155,
1053 156, 157, 158, 159, 160, 161, 162, 163,
1054 164, 165, 166, 167, 168, 169, 170, 173,
1055 175, 176, 177, 178, 179, 181, 182, 183,
1056 188, 189, 190, 191, 132, 152, 172, 184,
1057 185, 187, 128, 191, 128, 137, 144, 255,
1058 158, 159, 134, 187, 136, 140, 142, 143,
1059 137, 151, 153, 142, 143, 158, 159, 137,
1060 177, 142, 143, 182, 183, 191, 255, 128,
1061 130, 133, 136, 150, 152, 255, 145, 150,
1062 151, 155, 156, 160, 168, 178, 255, 128,
1063 143, 160, 255, 182, 183, 190, 255, 129,
1064 255, 173, 174, 192, 255, 129, 154, 160,
1065 255, 171, 173, 185, 255, 128, 140, 142,
1066 148, 160, 180, 128, 147, 160, 172, 174,
1067 176, 178, 179, 148, 150, 152, 155, 158,
1068 159, 170, 255, 139, 141, 144, 153, 160,
1069 255, 184, 255, 128, 170, 176, 255, 182,
1070 255, 128, 158, 160, 171, 176, 187, 134,
1071 173, 176, 180, 128, 171, 176, 255, 138,
1072 143, 155, 255, 128, 155, 160, 255, 159,
1073 189, 190, 192, 255, 167, 128, 137, 144,
1074 153, 176, 189, 140, 143, 154, 170, 180,
1075 255, 180, 255, 128, 183, 128, 137, 141,
1076 189, 128, 136, 144, 146, 148, 182, 184,
1077 185, 128, 181, 187, 191, 150, 151, 158,
1078 159, 152, 154, 156, 158, 134, 135, 142,
1079 143, 190, 255, 190, 128, 180, 182, 188,
1080 130, 132, 134, 140, 144, 147, 150, 155,
1081 160, 172, 178, 180, 182, 188, 128, 129,
1082 130, 131, 132, 133, 134, 176, 177, 178,
1083 179, 180, 181, 182, 183, 191, 255, 129,
1084 147, 149, 176, 178, 190, 192, 255, 144,
1085 156, 161, 144, 156, 165, 176, 130, 135,
1086 149, 164, 166, 168, 138, 147, 152, 157,
1087 170, 185, 188, 191, 142, 133, 137, 160,
1088 255, 137, 255, 128, 174, 176, 255, 159,
1089 165, 170, 180, 255, 167, 173, 128, 165,
1090 176, 255, 168, 174, 176, 190, 192, 255,
1091 128, 150, 160, 166, 168, 174, 176, 182,
1092 184, 190, 128, 134, 136, 142, 144, 150,
1093 152, 158, 160, 191, 128, 129, 130, 131,
1094 132, 133, 134, 135, 144, 145, 255, 133,
1095 135, 161, 175, 177, 181, 184, 188, 160,
1096 151, 152, 187, 192, 255, 133, 173, 177,
1097 255, 143, 159, 187, 255, 176, 191, 182,
1098 183, 184, 191, 192, 255, 150, 255, 128,
1099 146, 147, 148, 152, 153, 154, 155, 156,
1100 158, 159, 160, 161, 162, 163, 164, 165,
1101 166, 167, 168, 169, 170, 171, 172, 173,
1102 174, 175, 176, 129, 255, 141, 255, 144,
1103 189, 141, 143, 172, 255, 191, 128, 175,
1104 180, 189, 151, 159, 162, 255, 175, 137,
1105 138, 184, 255, 183, 255, 168, 255, 128,
1106 179, 188, 134, 143, 154, 159, 184, 186,
1107 190, 255, 128, 173, 176, 255, 148, 159,
1108 189, 255, 129, 142, 154, 159, 191, 255,
1109 128, 182, 128, 141, 144, 153, 160, 182,
1110 186, 255, 128, 130, 155, 157, 160, 175,
1111 178, 182, 129, 134, 137, 142, 145, 150,
1112 160, 166, 168, 174, 176, 255, 155, 166,
1113 175, 128, 170, 172, 173, 176, 185, 158,
1114 159, 160, 255, 164, 175, 135, 138, 188,
1115 255, 164, 169, 171, 172, 173, 174, 175,
1116 180, 181, 182, 183, 184, 185, 187, 188,
1117 189, 190, 191, 165, 186, 174, 175, 154,
1118 255, 190, 128, 134, 147, 151, 157, 168,
1119 170, 182, 184, 188, 128, 129, 131, 132,
1120 134, 255, 147, 255, 190, 255, 144, 145,
1121 136, 175, 188, 255, 128, 143, 160, 175,
1122 179, 180, 141, 143, 176, 180, 182, 255,
1123 189, 255, 191, 144, 153, 161, 186, 129,
1124 154, 166, 255, 191, 255, 130, 135, 138,
1125 143, 146, 151, 154, 156, 144, 145, 146,
1126 147, 148, 150, 151, 152, 155, 157, 158,
1127 160, 170, 171, 172, 175, 161, 169, 128,
1128 129, 130, 131, 133, 135, 138, 139, 140,
1129 141, 142, 143, 144, 145, 146, 147, 148,
1130 149, 152, 156, 157, 160, 161, 162, 163,
1131 164, 166, 168, 169, 170, 171, 172, 173,
1132 174, 176, 177, 153, 155, 178, 179, 128,
1133 139, 141, 166, 168, 186, 188, 189, 191,
1134 255, 142, 143, 158, 255, 187, 255, 128,
1135 180, 189, 128, 156, 160, 255, 145, 159,
1136 161, 255, 128, 159, 176, 255, 139, 143,
1137 187, 255, 128, 157, 160, 255, 144, 132,
1138 135, 150, 255, 158, 159, 170, 175, 148,
1139 151, 188, 255, 128, 167, 176, 255, 164,
1140 255, 183, 255, 128, 149, 160, 167, 136,
1141 188, 128, 133, 138, 181, 183, 184, 191,
1142 255, 150, 159, 183, 255, 128, 158, 160,
1143 178, 180, 181, 128, 149, 160, 185, 128,
1144 183, 190, 191, 191, 128, 131, 133, 134,
1145 140, 147, 149, 151, 153, 179, 184, 186,
1146 160, 188, 128, 156, 128, 135, 137, 166,
1147 128, 181, 128, 149, 160, 178, 128, 145,
1148 128, 178, 129, 130, 131, 132, 133, 135,
1149 136, 138, 139, 140, 141, 144, 145, 146,
1150 147, 150, 151, 152, 153, 154, 155, 156,
1151 162, 163, 171, 176, 177, 178, 128, 134,
1152 135, 165, 176, 190, 144, 168, 176, 185,
1153 128, 180, 182, 191, 182, 144, 179, 155,
1154 133, 137, 141, 143, 157, 255, 190, 128,
1155 145, 147, 183, 136, 128, 134, 138, 141,
1156 143, 157, 159, 168, 176, 255, 171, 175,
1157 186, 255, 128, 131, 133, 140, 143, 144,
1158 147, 168, 170, 176, 178, 179, 181, 185,
1159 188, 191, 144, 151, 128, 132, 135, 136,
1160 139, 141, 157, 163, 166, 172, 176, 180,
1161 128, 138, 144, 153, 134, 136, 143, 154,
1162 255, 128, 181, 184, 255, 129, 151, 158,
1163 255, 129, 131, 133, 143, 154, 255, 128,
1164 137, 128, 153, 157, 171, 176, 185, 160,
1165 255, 170, 190, 192, 255, 128, 184, 128,
1166 136, 138, 182, 184, 191, 128, 144, 153,
1167 178, 255, 168, 144, 145, 183, 255, 128,
1168 142, 145, 149, 129, 141, 144, 146, 147,
1169 148, 175, 255, 132, 255, 128, 144, 129,
1170 143, 144, 153, 145, 152, 135, 255, 160,
1171 168, 169, 171, 172, 173, 174, 188, 189,
1172 190, 191, 161, 167, 185, 255, 128, 158,
1173 160, 169, 144, 173, 176, 180, 128, 131,
1174 144, 153, 163, 183, 189, 255, 144, 255,
1175 133, 143, 191, 255, 143, 159, 160, 128,
1176 129, 255, 159, 160, 171, 172, 255, 173,
1177 255, 179, 255, 128, 176, 177, 178, 128,
1178 129, 171, 175, 189, 255, 128, 136, 144,
1179 153, 157, 158, 133, 134, 137, 144, 145,
1180 146, 147, 148, 149, 154, 155, 156, 157,
1181 158, 159, 168, 169, 170, 150, 153, 165,
1182 169, 173, 178, 187, 255, 131, 132, 140,
1183 169, 174, 255, 130, 132, 149, 157, 173,
1184 186, 188, 160, 161, 163, 164, 167, 168,
1185 132, 134, 149, 157, 186, 139, 140, 191,
1186 255, 134, 128, 132, 138, 144, 146, 255,
1187 166, 167, 129, 155, 187, 149, 181, 143,
1188 175, 137, 169, 131, 140, 141, 192, 255,
1189 128, 182, 187, 255, 173, 180, 182, 255,
1190 132, 155, 159, 161, 175, 128, 160, 163,
1191 164, 165, 184, 185, 186, 161, 162, 128,
1192 134, 136, 152, 155, 161, 163, 164, 166,
1193 170, 133, 143, 151, 255, 139, 143, 154,
1194 255, 164, 167, 185, 187, 128, 131, 133,
1195 159, 161, 162, 169, 178, 180, 183, 130,
1196 135, 137, 139, 148, 151, 153, 155, 157,
1197 159, 164, 190, 141, 143, 145, 146, 161,
1198 162, 167, 170, 172, 178, 180, 183, 185,
1199 188, 128, 137, 139, 155, 161, 163, 165,
1200 169, 171, 187, 155, 156, 151, 255, 156,
1201 157, 160, 181, 255, 186, 187, 255, 162,
1202 255, 160, 168, 161, 167, 158, 255, 160,
1203 132, 135, 133, 134, 176, 255, 128, 191,
1204 154, 164, 168, 128, 149, 150, 191, 128,
1205 152, 153, 191, 181, 128, 159, 160, 189,
1206 190, 191, 189, 128, 131, 132, 185, 186,
1207 191, 144, 128, 151, 152, 161, 162, 176,
1208 177, 255, 169, 177, 129, 132, 141, 142,
1209 145, 146, 179, 181, 186, 188, 190, 191,
1210 192, 255, 142, 158, 128, 155, 156, 161,
1211 162, 175, 176, 177, 178, 191, 169, 177,
1212 180, 183, 128, 132, 133, 138, 139, 142,
1213 143, 144, 145, 146, 147, 185, 186, 191,
1214 157, 128, 152, 153, 158, 159, 177, 178,
1215 180, 181, 191, 142, 146, 169, 177, 180,
1216 189, 128, 132, 133, 185, 186, 191, 144,
1217 185, 128, 159, 160, 161, 162, 191, 169,
1218 177, 180, 189, 128, 132, 133, 140, 141,
1219 142, 143, 144, 145, 146, 147, 185, 186,
1220 191, 158, 177, 128, 155, 156, 161, 162,
1221 191, 131, 145, 155, 157, 128, 132, 133,
1222 138, 139, 141, 142, 149, 150, 152, 153,
1223 159, 160, 162, 163, 164, 165, 167, 168,
1224 170, 171, 173, 174, 185, 186, 191, 144,
1225 128, 191, 141, 145, 169, 189, 128, 132,
1226 133, 185, 186, 191, 128, 151, 152, 154,
1227 155, 159, 160, 161, 162, 191, 128, 141,
1228 145, 169, 180, 189, 129, 132, 133, 185,
1229 186, 191, 158, 128, 159, 160, 161, 162,
1230 176, 177, 178, 179, 191, 141, 145, 189,
1231 128, 132, 133, 186, 187, 191, 142, 128,
1232 147, 148, 150, 151, 158, 159, 161, 162,
1233 185, 186, 191, 178, 188, 128, 132, 133,
1234 150, 151, 153, 154, 189, 190, 191, 128,
1235 134, 135, 191, 128, 177, 129, 179, 180,
1236 191, 128, 131, 137, 141, 152, 160, 164,
1237 166, 172, 177, 189, 129, 132, 133, 134,
1238 135, 138, 139, 147, 148, 167, 168, 169,
1239 170, 179, 180, 191, 133, 128, 134, 135,
1240 155, 156, 159, 160, 191, 128, 129, 191,
1241 136, 128, 172, 173, 191, 128, 135, 136,
1242 140, 141, 191, 191, 128, 170, 171, 190,
1243 161, 128, 143, 144, 149, 150, 153, 154,
1244 157, 158, 164, 165, 166, 167, 173, 174,
1245 176, 177, 180, 181, 255, 130, 141, 143,
1246 159, 134, 187, 136, 140, 142, 143, 137,
1247 151, 153, 142, 143, 158, 159, 137, 177,
1248 191, 142, 143, 182, 183, 192, 255, 129,
1249 151, 128, 133, 134, 135, 136, 255, 145,
1250 150, 151, 155, 191, 192, 255, 128, 143,
1251 144, 159, 160, 255, 182, 183, 190, 191,
1252 192, 255, 128, 129, 255, 173, 174, 192,
1253 255, 128, 129, 154, 155, 159, 160, 255,
1254 171, 173, 185, 191, 192, 255, 141, 128,
1255 145, 146, 159, 160, 177, 178, 191, 173,
1256 128, 145, 146, 159, 160, 176, 177, 191,
1257 128, 179, 180, 191, 151, 156, 128, 191,
1258 128, 159, 160, 255, 184, 191, 192, 255,
1259 169, 128, 170, 171, 175, 176, 255, 182,
1260 191, 192, 255, 128, 158, 159, 191, 128,
1261 143, 144, 173, 174, 175, 176, 180, 181,
1262 191, 128, 171, 172, 175, 176, 255, 138,
1263 191, 192, 255, 128, 150, 151, 159, 160,
1264 255, 149, 191, 192, 255, 167, 128, 191,
1265 128, 132, 133, 179, 180, 191, 128, 132,
1266 133, 139, 140, 191, 128, 130, 131, 160,
1267 161, 173, 174, 175, 176, 185, 186, 255,
1268 166, 191, 192, 255, 128, 163, 164, 191,
1269 128, 140, 141, 143, 144, 153, 154, 189,
1270 190, 191, 128, 136, 137, 191, 173, 128,
1271 168, 169, 177, 178, 180, 181, 182, 183,
1272 191, 0, 127, 192, 255, 150, 151, 158,
1273 159, 152, 154, 156, 158, 134, 135, 142,
1274 143, 190, 191, 192, 255, 181, 189, 191,
1275 128, 190, 133, 181, 128, 129, 130, 140,
1276 141, 143, 144, 147, 148, 149, 150, 155,
1277 156, 159, 160, 172, 173, 177, 178, 188,
1278 189, 191, 177, 191, 128, 190, 128, 143,
1279 144, 156, 157, 191, 130, 135, 148, 164,
1280 166, 168, 128, 137, 138, 149, 150, 151,
1281 152, 157, 158, 169, 170, 185, 186, 187,
1282 188, 191, 142, 128, 132, 133, 137, 138,
1283 159, 160, 255, 137, 191, 192, 255, 175,
1284 128, 255, 159, 165, 170, 175, 177, 180,
1285 191, 192, 255, 166, 173, 128, 167, 168,
1286 175, 176, 255, 168, 174, 176, 191, 192,
1287 255, 167, 175, 183, 191, 128, 150, 151,
1288 159, 160, 190, 135, 143, 151, 128, 158,
1289 159, 191, 128, 132, 133, 135, 136, 160,
1290 161, 169, 170, 176, 177, 181, 182, 183,
1291 184, 188, 189, 191, 160, 151, 154, 187,
1292 192, 255, 128, 132, 133, 173, 174, 176,
1293 177, 255, 143, 159, 187, 191, 192, 255,
1294 128, 175, 176, 191, 150, 191, 192, 255,
1295 141, 191, 192, 255, 128, 143, 144, 189,
1296 190, 191, 141, 143, 160, 169, 172, 191,
1297 192, 255, 191, 128, 174, 175, 190, 128,
1298 157, 158, 159, 160, 255, 176, 191, 192,
1299 255, 128, 150, 151, 159, 160, 161, 162,
1300 255, 175, 137, 138, 184, 191, 192, 255,
1301 128, 182, 183, 255, 130, 134, 139, 163,
1302 191, 192, 255, 128, 129, 130, 179, 180,
1303 191, 187, 189, 128, 177, 178, 183, 184,
1304 191, 128, 137, 138, 165, 166, 175, 176,
1305 255, 135, 159, 189, 191, 192, 255, 128,
1306 131, 132, 178, 179, 191, 143, 165, 191,
1307 128, 159, 160, 175, 176, 185, 186, 190,
1308 128, 168, 169, 191, 131, 186, 128, 139,
1309 140, 159, 160, 182, 183, 189, 190, 255,
1310 176, 178, 180, 183, 184, 190, 191, 192,
1311 255, 129, 128, 130, 131, 154, 155, 157,
1312 158, 159, 160, 170, 171, 177, 178, 180,
1313 181, 191, 128, 167, 175, 129, 134, 135,
1314 136, 137, 142, 143, 144, 145, 150, 151,
1315 159, 160, 255, 155, 166, 175, 128, 162,
1316 163, 191, 164, 175, 135, 138, 188, 191,
1317 192, 255, 174, 175, 154, 191, 192, 255,
1318 157, 169, 183, 189, 191, 128, 134, 135,
1319 146, 147, 151, 152, 158, 159, 190, 130,
1320 133, 128, 255, 178, 191, 192, 255, 128,
1321 146, 147, 255, 190, 191, 192, 255, 128,
1322 143, 144, 255, 144, 145, 136, 175, 188,
1323 191, 192, 255, 181, 128, 175, 176, 255,
1324 189, 191, 192, 255, 128, 160, 161, 186,
1325 187, 191, 128, 129, 154, 155, 165, 166,
1326 255, 191, 192, 255, 128, 129, 130, 135,
1327 136, 137, 138, 143, 144, 145, 146, 151,
1328 152, 153, 154, 156, 157, 191, 128, 191,
1329 128, 129, 130, 131, 133, 138, 139, 140,
1330 141, 142, 143, 144, 145, 146, 147, 148,
1331 149, 152, 156, 157, 160, 161, 162, 163,
1332 164, 166, 168, 169, 170, 171, 172, 173,
1333 174, 176, 177, 132, 151, 153, 155, 158,
1334 175, 178, 179, 180, 191, 140, 167, 187,
1335 190, 128, 255, 142, 143, 158, 191, 192,
1336 255, 187, 191, 192, 255, 128, 180, 181,
1337 191, 128, 156, 157, 159, 160, 255, 145,
1338 191, 192, 255, 128, 159, 160, 175, 176,
1339 255, 139, 143, 182, 191, 192, 255, 144,
1340 132, 135, 150, 191, 192, 255, 158, 175,
1341 148, 151, 188, 191, 192, 255, 128, 167,
1342 168, 175, 176, 255, 164, 191, 192, 255,
1343 183, 191, 192, 255, 128, 149, 150, 159,
1344 160, 167, 168, 191, 136, 182, 188, 128,
1345 133, 134, 137, 138, 184, 185, 190, 191,
1346 255, 150, 159, 183, 191, 192, 255, 179,
1347 128, 159, 160, 181, 182, 191, 128, 149,
1348 150, 159, 160, 185, 186, 191, 128, 183,
1349 184, 189, 190, 191, 128, 148, 152, 129,
1350 143, 144, 179, 180, 191, 128, 159, 160,
1351 188, 189, 191, 128, 156, 157, 191, 136,
1352 128, 164, 165, 191, 128, 181, 182, 191,
1353 128, 149, 150, 159, 160, 178, 179, 191,
1354 128, 145, 146, 191, 128, 178, 179, 191,
1355 128, 130, 131, 132, 133, 134, 135, 136,
1356 138, 139, 140, 141, 144, 145, 146, 147,
1357 150, 151, 152, 153, 154, 156, 162, 163,
1358 171, 176, 177, 178, 129, 191, 128, 130,
1359 131, 183, 184, 191, 128, 130, 131, 175,
1360 176, 191, 128, 143, 144, 168, 169, 191,
1361 128, 130, 131, 166, 167, 191, 182, 128,
1362 143, 144, 178, 179, 191, 128, 130, 131,
1363 178, 179, 191, 128, 154, 156, 129, 132,
1364 133, 191, 146, 128, 171, 172, 191, 135,
1365 137, 142, 158, 128, 168, 169, 175, 176,
1366 255, 159, 191, 192, 255, 144, 128, 156,
1367 157, 161, 162, 191, 128, 134, 135, 138,
1368 139, 191, 128, 175, 176, 191, 134, 128,
1369 131, 132, 135, 136, 191, 128, 174, 175,
1370 191, 128, 151, 152, 155, 156, 191, 132,
1371 128, 191, 128, 170, 171, 191, 128, 153,
1372 154, 191, 160, 190, 192, 255, 128, 184,
1373 185, 191, 137, 128, 174, 175, 191, 128,
1374 129, 177, 178, 255, 144, 191, 192, 255,
1375 128, 142, 143, 144, 145, 146, 149, 129,
1376 148, 150, 191, 175, 191, 192, 255, 132,
1377 191, 192, 255, 128, 144, 129, 143, 145,
1378 191, 144, 153, 128, 143, 145, 152, 154,
1379 191, 135, 191, 192, 255, 160, 168, 169,
1380 171, 172, 173, 174, 188, 189, 190, 191,
1381 128, 159, 161, 167, 170, 187, 185, 191,
1382 192, 255, 128, 143, 144, 173, 174, 191,
1383 128, 131, 132, 162, 163, 183, 184, 188,
1384 189, 255, 133, 143, 145, 191, 192, 255,
1385 128, 146, 147, 159, 160, 191, 160, 128,
1386 191, 128, 129, 191, 192, 255, 159, 160,
1387 171, 128, 170, 172, 191, 192, 255, 173,
1388 191, 192, 255, 179, 191, 192, 255, 128,
1389 176, 177, 178, 129, 191, 128, 129, 130,
1390 191, 171, 175, 189, 191, 192, 255, 128,
1391 136, 137, 143, 144, 153, 154, 191, 144,
1392 145, 146, 147, 148, 149, 154, 155, 156,
1393 157, 158, 159, 128, 143, 150, 153, 160,
1394 191, 149, 157, 173, 186, 188, 160, 161,
1395 163, 164, 167, 168, 132, 134, 149, 157,
1396 186, 191, 139, 140, 192, 255, 133, 145,
1397 128, 134, 135, 137, 138, 255, 166, 167,
1398 129, 155, 187, 149, 181, 143, 175, 137,
1399 169, 131, 140, 191, 192, 255, 160, 163,
1400 164, 165, 184, 185, 186, 128, 159, 161,
1401 162, 166, 191, 133, 191, 192, 255, 132,
1402 160, 163, 167, 179, 184, 186, 128, 164,
1403 165, 168, 169, 187, 188, 191, 130, 135,
1404 137, 139, 144, 147, 151, 153, 155, 157,
1405 159, 163, 171, 179, 184, 189, 191, 128,
1406 140, 141, 148, 149, 160, 161, 164, 165,
1407 166, 167, 190, 138, 164, 170, 128, 155,
1408 156, 160, 161, 187, 188, 191, 128, 191,
1409 155, 156, 128, 191, 151, 191, 192, 255,
1410 156, 157, 160, 128, 191, 181, 191, 192,
1411 255, 158, 159, 186, 128, 185, 187, 191,
1412 192, 255, 162, 191, 192, 255, 160, 168,
1413 128, 159, 161, 167, 169, 191, 158, 191,
1414 192, 255, 9, 10, 13, 32, 33, 34,
1415 35, 37, 38, 46, 47, 60, 61, 62,
1416 64, 92, 95, 123, 124, 125, 126, 127,
1417 194, 195, 198, 199, 203, 204, 205, 206,
1418 207, 210, 212, 213, 214, 215, 216, 217,
1419 219, 220, 221, 222, 223, 224, 225, 226,
1420 227, 228, 233, 234, 237, 238, 239, 240,
1421 0, 39, 40, 45, 48, 57, 58, 63,
1422 65, 90, 91, 96, 97, 122, 192, 193,
1423 196, 218, 229, 236, 241, 247, 9, 32,
1424 10, 61, 10, 38, 46, 42, 47, 42,
1425 46, 69, 101, 48, 57, 60, 61, 61,
1426 62, 61, 45, 95, 194, 195, 198, 199,
1427 203, 204, 205, 206, 207, 210, 212, 213,
1428 214, 215, 216, 217, 219, 220, 221, 222,
1429 223, 224, 225, 226, 227, 228, 233, 234,
1430 237, 239, 240, 243, 48, 57, 65, 90,
1431 97, 122, 196, 218, 229, 236, 124, 125,
1432 128, 191, 170, 181, 186, 128, 191, 151,
1433 183, 128, 255, 192, 255, 0, 127, 173,
1434 130, 133, 146, 159, 165, 171, 175, 191,
1435 192, 255, 181, 190, 128, 175, 176, 183,
1436 184, 185, 186, 191, 134, 139, 141, 162,
1437 128, 135, 136, 255, 182, 130, 137, 176,
1438 151, 152, 154, 160, 136, 191, 192, 255,
1439 128, 143, 144, 170, 171, 175, 176, 178,
1440 179, 191, 128, 159, 160, 191, 176, 128,
1441 138, 139, 173, 174, 255, 148, 150, 164,
1442 167, 173, 176, 185, 189, 190, 192, 255,
1443 144, 128, 145, 146, 175, 176, 191, 128,
1444 140, 141, 255, 166, 176, 178, 191, 192,
1445 255, 186, 128, 137, 138, 170, 171, 179,
1446 180, 181, 182, 191, 160, 161, 162, 164,
1447 165, 166, 167, 168, 169, 170, 171, 172,
1448 173, 174, 175, 176, 177, 178, 179, 180,
1449 181, 182, 183, 184, 185, 186, 187, 188,
1450 189, 190, 128, 191, 128, 129, 130, 131,
1451 137, 138, 139, 140, 141, 142, 143, 144,
1452 153, 154, 155, 156, 157, 158, 159, 160,
1453 161, 162, 163, 164, 165, 166, 167, 168,
1454 169, 170, 171, 172, 173, 174, 175, 176,
1455 177, 178, 179, 180, 182, 183, 184, 188,
1456 189, 190, 191, 132, 187, 129, 130, 132,
1457 133, 134, 176, 177, 178, 179, 180, 181,
1458 182, 183, 128, 191, 128, 129, 130, 131,
1459 132, 133, 134, 135, 144, 136, 143, 145,
1460 191, 192, 255, 182, 183, 184, 128, 191,
1461 128, 191, 191, 128, 190, 192, 255, 128,
1462 146, 147, 148, 152, 153, 154, 155, 156,
1463 158, 159, 160, 161, 162, 163, 164, 165,
1464 166, 167, 168, 169, 170, 171, 172, 173,
1465 174, 175, 176, 129, 191, 192, 255, 158,
1466 159, 128, 157, 160, 191, 192, 255, 128,
1467 191, 164, 169, 171, 172, 173, 174, 175,
1468 180, 181, 182, 183, 184, 185, 187, 188,
1469 189, 190, 191, 128, 163, 165, 186, 144,
1470 145, 146, 147, 148, 150, 151, 152, 155,
1471 157, 158, 160, 170, 171, 172, 175, 128,
1472 159, 161, 169, 173, 191, 128, 191, 10,
1473 13, 34, 36, 37, 92, 128, 191, 192,
1474 223, 224, 239, 240, 247, 248, 255, 10,
1475 13, 34, 36, 37, 92, 128, 191, 192,
1476 223, 224, 239, 240, 247, 248, 255, 10,
1477 13, 34, 36, 37, 92, 128, 191, 192,
1478 223, 224, 239, 240, 247, 248, 255, 10,
1479 13, 34, 36, 37, 92, 128, 191, 192,
1480 223, 224, 239, 240, 247, 248, 255, 10,
1481 13, 36, 37, 92, 128, 191, 192, 223,
1482 224, 239, 240, 247, 248, 255, 36, 37,
1483 92, 123, 192, 223, 224, 239, 240, 247,
1484 10, 13, 34, 36, 37, 92, 123, 128,
1485 191, 192, 223, 224, 239, 240, 247, 248,
1486 255, 10, 13, 34, 36, 37, 92, 123,
1487 128, 191, 192, 223, 224, 239, 240, 247,
1488 248, 255, 10, 13, 34, 36, 37, 92,
1489 123, 128, 191, 192, 223, 224, 239, 240,
1490 247, 248, 255, 10, 13, 34, 36, 37,
1491 92, 128, 191, 192, 223, 224, 239, 240,
1492 247, 248, 255, 36, 37, 92, 123, 192,
1493 223, 224, 239, 240, 247, 10, 13, 34,
1494 36, 37, 92, 123, 128, 191, 192, 223,
1495 224, 239, 240, 247, 248, 255, 10, 13,
1496 34, 36, 37, 92, 128, 191, 192, 223,
1497 224, 239, 240, 247, 248, 255, 10, 13,
1498 34, 36, 37, 92, 128, 191, 192, 223,
1499 224, 239, 240, 247, 248, 255, 10, 13,
1500 34, 36, 37, 92, 128, 191, 192, 223,
1501 224, 239, 240, 247, 248, 255, 10, 13,
1502 34, 36, 37, 92, 128, 191, 192, 223,
1503 224, 239, 240, 247, 248, 255, 10, 13,
1504 34, 36, 37, 92, 128, 191, 192, 223,
1505 224, 239, 240, 247, 248, 255, 10, 13,
1506 34, 36, 37, 92, 128, 191, 192, 223,
1507 224, 239, 240, 247, 248, 255, 10, 13,
1508 34, 36, 37, 92, 128, 191, 192, 223,
1509 224, 239, 240, 247, 248, 255, 123, 126,
1510 123, 126, 128, 191, 128, 191, 128, 191,
1511 10, 13, 36, 37, 128, 191, 192, 223,
1512 224, 239, 240, 247, 248, 255, 10, 13,
1513 36, 37, 128, 191, 192, 223, 224, 239,
1514 240, 247, 248, 255, 10, 13, 36, 37,
1515 128, 191, 192, 223, 224, 239, 240, 247,
1516 248, 255, 10, 13, 36, 37, 128, 191,
1517 192, 223, 224, 239, 240, 247, 248, 255,
1518 126, 126, 128, 191, 128, 191, 128, 191,
1519 10, 13, 36, 37, 128, 191, 192, 223,
1520 224, 239, 240, 247, 248, 255, 10, 13,
1521 36, 37, 128, 191, 192, 223, 224, 239,
1522 240, 247, 248, 255, 126, 126, 128, 191,
1523 128, 191, 128, 191, 95, 194, 195, 198,
1524 199, 203, 204, 205, 206, 207, 210, 212,
1525 213, 214, 215, 216, 217, 219, 220, 221,
1526 222, 223, 224, 225, 226, 227, 228, 233,
1527 234, 237, 238, 239, 240, 65, 90, 97,
1528 122, 128, 191, 192, 193, 196, 218, 229,
1529 236, 241, 247, 248, 255, 45, 95, 194,
1530 195, 198, 199, 203, 204, 205, 206, 207,
1531 210, 212, 213, 214, 215, 216, 217, 219,
1532 220, 221, 222, 223, 224, 225, 226, 227,
1533 228, 233, 234, 237, 239, 240, 243, 48,
1534 57, 65, 90, 97, 122, 196, 218, 229,
1535 236, 128, 191, 170, 181, 186, 128, 191,
1536 151, 183, 128, 255, 192, 255, 0, 127,
1537 173, 130, 133, 146, 159, 165, 171, 175,
1538 191, 192, 255, 181, 190, 128, 175, 176,
1539 183, 184, 185, 186, 191, 134, 139, 141,
1540 162, 128, 135, 136, 255, 182, 130, 137,
1541 176, 151, 152, 154, 160, 136, 191, 192,
1542 255, 128, 143, 144, 170, 171, 175, 176,
1543 178, 179, 191, 128, 159, 160, 191, 176,
1544 128, 138, 139, 173, 174, 255, 148, 150,
1545 164, 167, 173, 176, 185, 189, 190, 192,
1546 255, 144, 128, 145, 146, 175, 176, 191,
1547 128, 140, 141, 255, 166, 176, 178, 191,
1548 192, 255, 186, 128, 137, 138, 170, 171,
1549 179, 180, 181, 182, 191, 160, 161, 162,
1550 164, 165, 166, 167, 168, 169, 170, 171,
1551 172, 173, 174, 175, 176, 177, 178, 179,
1552 180, 181, 182, 183, 184, 185, 186, 187,
1553 188, 189, 190, 128, 191, 128, 129, 130,
1554 131, 137, 138, 139, 140, 141, 142, 143,
1555 144, 153, 154, 155, 156, 157, 158, 159,
1556 160, 161, 162, 163, 164, 165, 166, 167,
1557 168, 169, 170, 171, 172, 173, 174, 175,
1558 176, 177, 178, 179, 180, 182, 183, 184,
1559 188, 189, 190, 191, 132, 187, 129, 130,
1560 132, 133, 134, 176, 177, 178, 179, 180,
1561 181, 182, 183, 128, 191, 128, 129, 130,
1562 131, 132, 133, 134, 135, 144, 136, 143,
1563 145, 191, 192, 255, 182, 183, 184, 128,
1564 191, 128, 191, 191, 128, 190, 192, 255,
1565 128, 146, 147, 148, 152, 153, 154, 155,
1566 156, 158, 159, 160, 161, 162, 163, 164,
1567 165, 166, 167, 168, 169, 170, 171, 172,
1568 173, 174, 175, 176, 129, 191, 192, 255,
1569 158, 159, 128, 157, 160, 191, 192, 255,
1570 128, 191, 164, 169, 171, 172, 173, 174,
1571 175, 180, 181, 182, 183, 184, 185, 187,
1572 188, 189, 190, 191, 128, 163, 165, 186,
1573 144, 145, 146, 147, 148, 150, 151, 152,
1574 155, 157, 158, 160, 170, 171, 172, 175,
1575 128, 159, 161, 169, 173, 191, 128, 191,
1576}
1577
1578var _hcltok_single_lengths []byte = []byte{
1579 0, 1, 1, 1, 2, 3, 2, 0,
1580 32, 31, 36, 1, 4, 0, 0, 0,
1581 0, 1, 2, 1, 1, 1, 1, 0,
1582 1, 1, 0, 0, 2, 0, 0, 0,
1583 1, 32, 0, 0, 0, 0, 1, 3,
1584 1, 1, 1, 0, 2, 0, 1, 1,
1585 2, 0, 3, 0, 1, 0, 2, 1,
1586 2, 0, 0, 5, 1, 4, 0, 0,
1587 1, 43, 0, 0, 0, 2, 3, 2,
1588 1, 1, 0, 0, 0, 0, 0, 0,
1589 0, 0, 0, 0, 0, 0, 0, 0,
1590 0, 0, 0, 0, 0, 1, 1, 0,
1591 0, 0, 0, 0, 0, 0, 0, 4,
1592 1, 0, 15, 0, 0, 0, 1, 6,
1593 1, 0, 0, 1, 0, 2, 0, 0,
1594 0, 9, 0, 1, 1, 0, 0, 0,
1595 3, 0, 1, 0, 28, 0, 0, 0,
1596 1, 0, 1, 0, 0, 0, 1, 0,
1597 0, 0, 0, 0, 0, 0, 1, 0,
1598 2, 0, 0, 18, 0, 0, 1, 0,
1599 0, 0, 0, 0, 0, 0, 0, 1,
1600 0, 0, 0, 16, 36, 0, 0, 0,
1601 0, 1, 0, 0, 0, 0, 0, 1,
1602 0, 0, 0, 0, 0, 0, 2, 0,
1603 0, 0, 0, 0, 1, 0, 0, 0,
1604 0, 0, 0, 0, 28, 0, 0, 0,
1605 1, 1, 1, 1, 0, 0, 2, 0,
1606 1, 0, 0, 0, 0, 0, 0, 0,
1607 0, 0, 1, 1, 4, 0, 0, 2,
1608 2, 0, 11, 0, 0, 0, 0, 0,
1609 0, 0, 1, 1, 3, 0, 0, 4,
1610 0, 0, 0, 18, 0, 0, 0, 1,
1611 4, 1, 4, 1, 0, 3, 2, 2,
1612 2, 1, 0, 0, 1, 8, 0, 0,
1613 0, 4, 12, 0, 2, 0, 3, 0,
1614 1, 0, 2, 0, 1, 2, 0, 3,
1615 1, 2, 0, 0, 0, 0, 0, 1,
1616 1, 0, 0, 1, 28, 3, 0, 1,
1617 1, 2, 1, 0, 1, 1, 2, 1,
1618 1, 2, 1, 1, 0, 2, 1, 1,
1619 1, 1, 0, 0, 6, 1, 1, 0,
1620 0, 46, 1, 1, 0, 0, 0, 0,
1621 2, 1, 0, 0, 0, 1, 0, 0,
1622 0, 0, 0, 0, 0, 13, 2, 0,
1623 0, 0, 9, 0, 1, 28, 0, 1,
1624 3, 0, 2, 0, 0, 0, 1, 0,
1625 1, 1, 2, 0, 18, 2, 0, 0,
1626 16, 35, 0, 0, 0, 1, 0, 28,
1627 0, 0, 0, 0, 1, 0, 2, 0,
1628 0, 1, 0, 0, 1, 0, 0, 1,
1629 0, 0, 0, 0, 1, 11, 0, 0,
1630 0, 0, 4, 0, 12, 1, 7, 0,
1631 4, 0, 0, 0, 0, 1, 2, 1,
1632 1, 1, 1, 0, 1, 1, 0, 0,
1633 2, 0, 0, 0, 1, 32, 0, 0,
1634 0, 0, 1, 3, 1, 1, 1, 0,
1635 2, 0, 1, 1, 2, 0, 3, 0,
1636 1, 0, 2, 1, 2, 0, 0, 5,
1637 1, 4, 0, 0, 1, 43, 0, 0,
1638 0, 2, 3, 2, 1, 1, 0, 0,
1639 0, 0, 0, 0, 0, 0, 0, 0,
1640 0, 0, 0, 0, 0, 0, 0, 0,
1641 0, 1, 1, 0, 0, 0, 0, 0,
1642 0, 0, 0, 4, 1, 0, 15, 0,
1643 0, 0, 1, 6, 1, 0, 0, 1,
1644 0, 2, 0, 0, 0, 9, 0, 1,
1645 1, 0, 0, 0, 3, 0, 1, 0,
1646 28, 0, 0, 0, 1, 0, 1, 0,
1647 0, 0, 1, 0, 0, 0, 0, 0,
1648 0, 0, 1, 0, 2, 0, 0, 18,
1649 0, 0, 1, 0, 0, 0, 0, 0,
1650 0, 0, 0, 1, 0, 0, 0, 16,
1651 36, 0, 0, 0, 0, 1, 0, 0,
1652 0, 0, 0, 1, 0, 0, 0, 0,
1653 0, 0, 2, 0, 0, 0, 0, 0,
1654 1, 0, 0, 0, 0, 0, 0, 0,
1655 28, 0, 0, 0, 1, 1, 1, 1,
1656 0, 0, 2, 0, 1, 0, 0, 0,
1657 0, 0, 0, 0, 0, 0, 1, 1,
1658 4, 0, 0, 2, 2, 0, 11, 0,
1659 0, 0, 0, 0, 0, 0, 1, 1,
1660 3, 0, 0, 4, 0, 0, 0, 18,
1661 0, 0, 0, 1, 4, 1, 4, 1,
1662 0, 3, 2, 2, 2, 1, 0, 0,
1663 1, 8, 0, 0, 0, 4, 12, 0,
1664 2, 0, 3, 0, 1, 0, 2, 0,
1665 1, 2, 0, 0, 3, 0, 1, 1,
1666 1, 2, 2, 4, 1, 6, 2, 4,
1667 2, 4, 1, 4, 0, 6, 1, 3,
1668 1, 2, 0, 2, 11, 1, 1, 1,
1669 0, 1, 1, 0, 2, 0, 3, 3,
1670 2, 1, 0, 0, 0, 1, 0, 1,
1671 0, 1, 1, 0, 2, 0, 0, 1,
1672 0, 0, 0, 0, 0, 0, 0, 1,
1673 0, 0, 0, 0, 0, 0, 0, 1,
1674 0, 0, 0, 4, 3, 2, 2, 0,
1675 6, 1, 0, 1, 1, 0, 2, 0,
1676 4, 3, 0, 1, 1, 0, 0, 0,
1677 0, 0, 0, 0, 1, 0, 0, 0,
1678 1, 0, 3, 0, 2, 0, 0, 0,
1679 3, 0, 2, 1, 1, 3, 1, 0,
1680 0, 0, 0, 0, 5, 2, 0, 0,
1681 0, 0, 0, 0, 1, 0, 0, 1,
1682 1, 0, 0, 35, 4, 0, 0, 0,
1683 0, 0, 0, 0, 1, 0, 0, 0,
1684 0, 0, 0, 3, 0, 1, 0, 0,
1685 3, 0, 0, 1, 0, 0, 0, 0,
1686 28, 0, 0, 0, 0, 1, 0, 3,
1687 1, 4, 0, 1, 0, 0, 1, 0,
1688 0, 1, 0, 0, 0, 0, 1, 1,
1689 0, 7, 0, 0, 2, 2, 0, 11,
1690 0, 0, 0, 0, 0, 1, 1, 3,
1691 0, 0, 4, 0, 0, 0, 12, 1,
1692 4, 1, 5, 2, 0, 3, 2, 2,
1693 2, 1, 7, 0, 7, 17, 3, 0,
1694 2, 0, 3, 0, 0, 1, 0, 2,
1695 0, 1, 1, 0, 0, 0, 0, 0,
1696 1, 1, 1, 0, 0, 0, 1, 1,
1697 1, 1, 0, 0, 0, 1, 1, 4,
1698 0, 0, 0, 0, 1, 2, 1, 1,
1699 1, 1, 0, 1, 1, 0, 0, 2,
1700 0, 0, 0, 1, 32, 0, 0, 0,
1701 0, 1, 3, 1, 1, 1, 0, 2,
1702 0, 1, 1, 2, 0, 3, 0, 1,
1703 0, 2, 1, 2, 0, 0, 5, 1,
1704 4, 0, 0, 1, 43, 0, 0, 0,
1705 2, 3, 2, 1, 1, 0, 0, 0,
1706 0, 0, 0, 0, 0, 0, 0, 0,
1707 0, 0, 0, 0, 0, 0, 0, 0,
1708 1, 1, 0, 0, 0, 0, 0, 0,
1709 0, 0, 4, 1, 0, 15, 0, 0,
1710 0, 1, 6, 1, 0, 0, 1, 0,
1711 2, 0, 0, 0, 9, 0, 1, 1,
1712 0, 0, 0, 3, 0, 1, 0, 28,
1713 0, 0, 0, 1, 0, 1, 0, 0,
1714 0, 1, 0, 0, 0, 0, 0, 0,
1715 0, 1, 0, 2, 0, 0, 18, 0,
1716 0, 1, 0, 0, 0, 0, 0, 0,
1717 0, 0, 1, 0, 0, 0, 16, 36,
1718 0, 0, 0, 0, 1, 0, 0, 0,
1719 0, 0, 1, 0, 0, 0, 0, 0,
1720 0, 2, 0, 0, 0, 0, 0, 1,
1721 0, 0, 0, 0, 0, 0, 0, 28,
1722 0, 0, 0, 1, 1, 1, 1, 0,
1723 0, 2, 0, 1, 0, 0, 0, 0,
1724 0, 0, 0, 0, 0, 1, 1, 4,
1725 0, 0, 2, 2, 0, 11, 0, 0,
1726 0, 0, 0, 0, 0, 1, 1, 3,
1727 0, 0, 4, 0, 0, 0, 18, 0,
1728 0, 0, 1, 4, 1, 4, 1, 0,
1729 3, 2, 2, 2, 1, 0, 0, 1,
1730 8, 0, 0, 0, 4, 12, 0, 2,
1731 0, 3, 0, 1, 0, 2, 0, 1,
1732 2, 0, 0, 3, 0, 1, 1, 1,
1733 2, 2, 4, 1, 6, 2, 4, 2,
1734 4, 1, 4, 0, 6, 1, 3, 1,
1735 2, 0, 2, 11, 1, 1, 1, 0,
1736 1, 1, 0, 2, 0, 3, 3, 2,
1737 1, 0, 0, 0, 1, 0, 1, 0,
1738 1, 1, 0, 2, 0, 0, 1, 0,
1739 0, 0, 0, 0, 0, 0, 1, 0,
1740 0, 0, 0, 0, 0, 0, 1, 0,
1741 0, 0, 4, 3, 2, 2, 0, 6,
1742 1, 0, 1, 1, 0, 2, 0, 4,
1743 3, 0, 1, 1, 0, 0, 0, 0,
1744 0, 0, 0, 1, 0, 0, 0, 1,
1745 0, 3, 0, 2, 0, 0, 0, 3,
1746 0, 2, 1, 1, 3, 1, 0, 0,
1747 0, 0, 0, 5, 2, 0, 0, 0,
1748 0, 0, 0, 1, 0, 0, 1, 1,
1749 0, 0, 35, 4, 0, 0, 0, 0,
1750 0, 0, 0, 1, 0, 0, 0, 0,
1751 0, 0, 3, 0, 1, 0, 0, 3,
1752 0, 0, 1, 0, 0, 0, 0, 28,
1753 0, 0, 0, 0, 1, 0, 3, 1,
1754 4, 0, 1, 0, 0, 1, 0, 0,
1755 1, 0, 0, 0, 0, 1, 1, 0,
1756 7, 0, 0, 2, 2, 0, 11, 0,
1757 0, 0, 0, 0, 1, 1, 3, 0,
1758 0, 4, 0, 0, 0, 12, 1, 4,
1759 1, 5, 2, 0, 3, 2, 2, 2,
1760 1, 7, 0, 7, 17, 3, 0, 2,
1761 0, 3, 0, 0, 1, 0, 2, 0,
1762 54, 2, 1, 1, 1, 1, 1, 2,
1763 1, 3, 2, 2, 1, 34, 1, 1,
1764 0, 3, 2, 0, 0, 0, 1, 2,
1765 4, 1, 0, 1, 0, 0, 0, 0,
1766 1, 1, 1, 0, 0, 1, 30, 47,
1767 13, 9, 3, 0, 1, 28, 2, 0,
1768 18, 16, 0, 6, 6, 6, 6, 5,
1769 4, 7, 7, 7, 6, 4, 7, 6,
1770 6, 6, 6, 6, 6, 6, 1, 1,
1771 1, 1, 0, 0, 0, 4, 4, 4,
1772 4, 1, 1, 0, 0, 0, 4, 2,
1773 1, 1, 0, 0, 0, 33, 34, 0,
1774 3, 2, 0, 0, 0, 1, 2, 4,
1775 1, 0, 1, 0, 0, 0, 0, 1,
1776 1, 1, 0, 0, 1, 30, 47, 13,
1777 9, 3, 0, 1, 28, 2, 0, 18,
1778 16, 0,
1779}
1780
1781var _hcltok_range_lengths []byte = []byte{
1782 0, 0, 0, 0, 0, 1, 1, 1,
1783 5, 5, 5, 0, 0, 3, 0, 1,
1784 1, 4, 2, 3, 0, 1, 0, 2,
1785 2, 4, 2, 2, 3, 1, 1, 1,
1786 1, 0, 1, 1, 2, 2, 1, 4,
1787 6, 9, 6, 8, 5, 8, 7, 10,
1788 4, 6, 4, 7, 7, 5, 5, 4,
1789 5, 1, 2, 8, 4, 3, 3, 3,
1790 0, 3, 1, 2, 1, 2, 2, 3,
1791 3, 1, 3, 2, 2, 1, 2, 2,
1792 2, 3, 4, 4, 3, 1, 2, 1,
1793 3, 2, 2, 2, 2, 2, 3, 3,
1794 1, 1, 2, 1, 3, 2, 2, 3,
1795 2, 7, 0, 1, 4, 1, 2, 4,
1796 2, 1, 2, 0, 2, 2, 3, 5,
1797 5, 1, 4, 1, 1, 2, 2, 1,
1798 0, 0, 1, 1, 1, 1, 1, 2,
1799 2, 2, 2, 1, 1, 1, 4, 2,
1800 2, 3, 1, 4, 4, 6, 1, 3,
1801 1, 1, 2, 1, 1, 1, 5, 3,
1802 1, 1, 1, 2, 3, 3, 1, 2,
1803 2, 1, 4, 1, 2, 5, 2, 1,
1804 1, 0, 2, 2, 2, 2, 2, 2,
1805 2, 2, 2, 1, 1, 2, 4, 2,
1806 1, 2, 2, 2, 6, 1, 1, 2,
1807 1, 2, 1, 1, 1, 2, 2, 2,
1808 1, 3, 2, 5, 2, 8, 6, 2,
1809 2, 2, 2, 3, 1, 3, 1, 2,
1810 1, 3, 2, 2, 3, 1, 1, 1,
1811 1, 1, 1, 1, 2, 2, 4, 1,
1812 2, 1, 0, 1, 1, 1, 1, 0,
1813 1, 2, 3, 1, 3, 3, 1, 0,
1814 3, 0, 2, 3, 1, 0, 0, 0,
1815 0, 2, 2, 2, 2, 1, 5, 2,
1816 2, 5, 7, 5, 0, 1, 0, 1,
1817 1, 1, 1, 1, 0, 1, 1, 0,
1818 3, 3, 1, 1, 2, 1, 3, 5,
1819 1, 1, 2, 2, 1, 1, 1, 1,
1820 2, 6, 3, 7, 2, 6, 1, 6,
1821 2, 8, 0, 4, 2, 5, 2, 3,
1822 3, 3, 1, 2, 8, 2, 0, 2,
1823 1, 2, 1, 5, 2, 1, 3, 3,
1824 0, 2, 1, 2, 1, 0, 1, 1,
1825 3, 1, 1, 2, 3, 0, 0, 3,
1826 2, 4, 1, 4, 1, 1, 3, 1,
1827 1, 1, 1, 2, 2, 1, 3, 1,
1828 4, 3, 3, 1, 1, 5, 2, 1,
1829 1, 2, 1, 2, 1, 3, 2, 0,
1830 1, 1, 1, 1, 1, 1, 1, 2,
1831 1, 1, 1, 1, 1, 1, 1, 0,
1832 1, 1, 2, 2, 1, 1, 1, 3,
1833 2, 1, 0, 2, 1, 1, 1, 1,
1834 0, 3, 0, 1, 1, 4, 2, 3,
1835 0, 1, 0, 2, 2, 4, 2, 2,
1836 3, 1, 1, 1, 1, 0, 1, 1,
1837 2, 2, 1, 4, 6, 9, 6, 8,
1838 5, 8, 7, 10, 4, 6, 4, 7,
1839 7, 5, 5, 4, 5, 1, 2, 8,
1840 4, 3, 3, 3, 0, 3, 1, 2,
1841 1, 2, 2, 3, 3, 1, 3, 2,
1842 2, 1, 2, 2, 2, 3, 4, 4,
1843 3, 1, 2, 1, 3, 2, 2, 2,
1844 2, 2, 3, 3, 1, 1, 2, 1,
1845 3, 2, 2, 3, 2, 7, 0, 1,
1846 4, 1, 2, 4, 2, 1, 2, 0,
1847 2, 2, 3, 5, 5, 1, 4, 1,
1848 1, 2, 2, 1, 0, 0, 1, 1,
1849 1, 1, 1, 2, 2, 2, 2, 1,
1850 1, 1, 4, 2, 2, 3, 1, 4,
1851 4, 6, 1, 3, 1, 1, 2, 1,
1852 1, 1, 5, 3, 1, 1, 1, 2,
1853 3, 3, 1, 2, 2, 1, 4, 1,
1854 2, 5, 2, 1, 1, 0, 2, 2,
1855 2, 2, 2, 2, 2, 2, 2, 1,
1856 1, 2, 4, 2, 1, 2, 2, 2,
1857 6, 1, 1, 2, 1, 2, 1, 1,
1858 1, 2, 2, 2, 1, 3, 2, 5,
1859 2, 8, 6, 2, 2, 2, 2, 3,
1860 1, 3, 1, 2, 1, 3, 2, 2,
1861 3, 1, 1, 1, 1, 1, 1, 1,
1862 2, 2, 4, 1, 2, 1, 0, 1,
1863 1, 1, 1, 0, 1, 2, 3, 1,
1864 3, 3, 1, 0, 3, 0, 2, 3,
1865 1, 0, 0, 0, 0, 2, 2, 2,
1866 2, 1, 5, 2, 2, 5, 7, 5,
1867 0, 1, 0, 1, 1, 1, 1, 1,
1868 0, 1, 1, 1, 2, 2, 3, 3,
1869 4, 7, 5, 7, 5, 3, 3, 7,
1870 3, 13, 1, 3, 5, 3, 5, 3,
1871 6, 5, 2, 2, 8, 4, 1, 2,
1872 3, 2, 10, 2, 2, 0, 2, 3,
1873 3, 1, 2, 3, 3, 1, 2, 3,
1874 3, 4, 4, 2, 1, 2, 2, 3,
1875 2, 2, 5, 3, 2, 3, 2, 1,
1876 3, 3, 6, 2, 2, 5, 2, 5,
1877 1, 1, 2, 4, 1, 11, 1, 3,
1878 8, 4, 2, 1, 0, 4, 3, 3,
1879 3, 2, 9, 1, 1, 4, 3, 2,
1880 2, 2, 3, 4, 2, 3, 2, 4,
1881 3, 2, 2, 3, 3, 4, 3, 3,
1882 4, 2, 5, 4, 8, 7, 1, 2,
1883 1, 3, 1, 2, 5, 1, 2, 2,
1884 2, 2, 1, 3, 2, 2, 3, 3,
1885 1, 9, 1, 5, 1, 3, 2, 2,
1886 3, 2, 3, 3, 3, 1, 3, 3,
1887 2, 2, 4, 5, 3, 3, 4, 3,
1888 3, 3, 2, 2, 2, 4, 2, 2,
1889 1, 3, 3, 3, 3, 3, 3, 2,
1890 2, 3, 2, 3, 3, 2, 3, 2,
1891 3, 1, 2, 2, 2, 2, 2, 2,
1892 2, 2, 2, 2, 2, 3, 2, 3,
1893 2, 3, 5, 3, 3, 1, 2, 3,
1894 2, 2, 1, 2, 3, 4, 3, 0,
1895 3, 0, 2, 3, 1, 0, 0, 0,
1896 0, 2, 3, 2, 4, 6, 4, 1,
1897 1, 2, 1, 2, 1, 3, 2, 3,
1898 2, 0, 0, 1, 1, 1, 1, 1,
1899 0, 0, 0, 1, 1, 1, 0, 0,
1900 0, 0, 1, 1, 1, 0, 0, 0,
1901 3, 0, 1, 1, 4, 2, 3, 0,
1902 1, 0, 2, 2, 4, 2, 2, 3,
1903 1, 1, 1, 1, 0, 1, 1, 2,
1904 2, 1, 4, 6, 9, 6, 8, 5,
1905 8, 7, 10, 4, 6, 4, 7, 7,
1906 5, 5, 4, 5, 1, 2, 8, 4,
1907 3, 3, 3, 0, 3, 1, 2, 1,
1908 2, 2, 3, 3, 1, 3, 2, 2,
1909 1, 2, 2, 2, 3, 4, 4, 3,
1910 1, 2, 1, 3, 2, 2, 2, 2,
1911 2, 3, 3, 1, 1, 2, 1, 3,
1912 2, 2, 3, 2, 7, 0, 1, 4,
1913 1, 2, 4, 2, 1, 2, 0, 2,
1914 2, 3, 5, 5, 1, 4, 1, 1,
1915 2, 2, 1, 0, 0, 1, 1, 1,
1916 1, 1, 2, 2, 2, 2, 1, 1,
1917 1, 4, 2, 2, 3, 1, 4, 4,
1918 6, 1, 3, 1, 1, 2, 1, 1,
1919 1, 5, 3, 1, 1, 1, 2, 3,
1920 3, 1, 2, 2, 1, 4, 1, 2,
1921 5, 2, 1, 1, 0, 2, 2, 2,
1922 2, 2, 2, 2, 2, 2, 1, 1,
1923 2, 4, 2, 1, 2, 2, 2, 6,
1924 1, 1, 2, 1, 2, 1, 1, 1,
1925 2, 2, 2, 1, 3, 2, 5, 2,
1926 8, 6, 2, 2, 2, 2, 3, 1,
1927 3, 1, 2, 1, 3, 2, 2, 3,
1928 1, 1, 1, 1, 1, 1, 1, 2,
1929 2, 4, 1, 2, 1, 0, 1, 1,
1930 1, 1, 0, 1, 2, 3, 1, 3,
1931 3, 1, 0, 3, 0, 2, 3, 1,
1932 0, 0, 0, 0, 2, 2, 2, 2,
1933 1, 5, 2, 2, 5, 7, 5, 0,
1934 1, 0, 1, 1, 1, 1, 1, 0,
1935 1, 1, 1, 2, 2, 3, 3, 4,
1936 7, 5, 7, 5, 3, 3, 7, 3,
1937 13, 1, 3, 5, 3, 5, 3, 6,
1938 5, 2, 2, 8, 4, 1, 2, 3,
1939 2, 10, 2, 2, 0, 2, 3, 3,
1940 1, 2, 3, 3, 1, 2, 3, 3,
1941 4, 4, 2, 1, 2, 2, 3, 2,
1942 2, 5, 3, 2, 3, 2, 1, 3,
1943 3, 6, 2, 2, 5, 2, 5, 1,
1944 1, 2, 4, 1, 11, 1, 3, 8,
1945 4, 2, 1, 0, 4, 3, 3, 3,
1946 2, 9, 1, 1, 4, 3, 2, 2,
1947 2, 3, 4, 2, 3, 2, 4, 3,
1948 2, 2, 3, 3, 4, 3, 3, 4,
1949 2, 5, 4, 8, 7, 1, 2, 1,
1950 3, 1, 2, 5, 1, 2, 2, 2,
1951 2, 1, 3, 2, 2, 3, 3, 1,
1952 9, 1, 5, 1, 3, 2, 2, 3,
1953 2, 3, 3, 3, 1, 3, 3, 2,
1954 2, 4, 5, 3, 3, 4, 3, 3,
1955 3, 2, 2, 2, 4, 2, 2, 1,
1956 3, 3, 3, 3, 3, 3, 2, 2,
1957 3, 2, 3, 3, 2, 3, 2, 3,
1958 1, 2, 2, 2, 2, 2, 2, 2,
1959 2, 2, 2, 2, 3, 2, 3, 2,
1960 3, 5, 3, 3, 1, 2, 3, 2,
1961 2, 1, 2, 3, 4, 3, 0, 3,
1962 0, 2, 3, 1, 0, 0, 0, 0,
1963 2, 3, 2, 4, 6, 4, 1, 1,
1964 2, 1, 2, 1, 3, 2, 3, 2,
1965 11, 0, 0, 0, 0, 0, 0, 0,
1966 0, 1, 0, 0, 0, 5, 0, 0,
1967 1, 1, 1, 0, 1, 1, 5, 4,
1968 2, 0, 1, 0, 2, 2, 5, 2,
1969 3, 5, 3, 2, 3, 5, 1, 1,
1970 1, 3, 1, 1, 2, 2, 3, 1,
1971 2, 3, 1, 5, 5, 5, 5, 5,
1972 3, 5, 5, 5, 5, 3, 5, 5,
1973 5, 5, 5, 5, 5, 5, 0, 0,
1974 0, 0, 1, 1, 1, 5, 5, 5,
1975 5, 0, 0, 1, 1, 1, 5, 6,
1976 0, 0, 1, 1, 1, 8, 5, 1,
1977 1, 1, 0, 1, 1, 5, 4, 2,
1978 0, 1, 0, 2, 2, 5, 2, 3,
1979 5, 3, 2, 3, 5, 1, 1, 1,
1980 3, 1, 1, 2, 2, 3, 1, 2,
1981 3, 1,
1982}
1983
1984var _hcltok_index_offsets []int16 = []int16{
1985 0, 0, 2, 4, 6, 9, 14, 18,
1986 20, 58, 95, 137, 139, 144, 148, 149,
1987 151, 153, 159, 164, 169, 171, 174, 176,
1988 179, 183, 189, 192, 195, 201, 203, 205,
1989 207, 210, 243, 245, 247, 250, 253, 256,
1990 264, 272, 283, 291, 300, 308, 317, 326,
1991 338, 345, 352, 360, 368, 377, 383, 391,
1992 397, 405, 407, 410, 424, 430, 438, 442,
1993 446, 448, 495, 497, 500, 502, 507, 513,
1994 519, 524, 527, 531, 534, 537, 539, 542,
1995 545, 548, 552, 557, 562, 566, 568, 571,
1996 573, 577, 580, 583, 586, 589, 593, 598,
1997 602, 604, 606, 609, 611, 615, 618, 621,
1998 629, 633, 641, 657, 659, 664, 666, 670,
1999 681, 685, 687, 690, 692, 695, 700, 704,
2000 710, 716, 727, 732, 735, 738, 741, 744,
2001 746, 750, 751, 754, 756, 786, 788, 790,
2002 793, 797, 800, 804, 806, 808, 810, 816,
2003 819, 822, 826, 828, 833, 838, 845, 848,
2004 852, 856, 858, 861, 881, 883, 885, 892,
2005 896, 898, 900, 902, 905, 909, 913, 915,
2006 919, 922, 924, 929, 947, 986, 992, 995,
2007 997, 999, 1001, 1004, 1007, 1010, 1013, 1016,
2008 1020, 1023, 1026, 1029, 1031, 1033, 1036, 1043,
2009 1046, 1048, 1051, 1054, 1057, 1065, 1067, 1069,
2010 1072, 1074, 1077, 1079, 1081, 1111, 1114, 1117,
2011 1120, 1123, 1128, 1132, 1139, 1142, 1151, 1160,
2012 1163, 1167, 1170, 1173, 1177, 1179, 1183, 1185,
2013 1188, 1190, 1194, 1198, 1202, 1210, 1212, 1214,
2014 1218, 1222, 1224, 1237, 1239, 1242, 1245, 1250,
2015 1252, 1255, 1257, 1259, 1262, 1267, 1269, 1271,
2016 1276, 1278, 1281, 1285, 1305, 1309, 1313, 1315,
2017 1317, 1325, 1327, 1334, 1339, 1341, 1345, 1348,
2018 1351, 1354, 1358, 1361, 1364, 1368, 1378, 1384,
2019 1387, 1390, 1400, 1420, 1426, 1429, 1431, 1435,
2020 1437, 1440, 1442, 1446, 1448, 1450, 1454, 1456,
2021 1460, 1465, 1471, 1473, 1475, 1478, 1480, 1484,
2022 1491, 1494, 1496, 1499, 1503, 1533, 1538, 1540,
2023 1543, 1547, 1556, 1561, 1569, 1573, 1581, 1585,
2024 1593, 1597, 1608, 1610, 1616, 1619, 1627, 1631,
2025 1636, 1641, 1646, 1648, 1651, 1666, 1670, 1672,
2026 1675, 1677, 1726, 1729, 1736, 1739, 1741, 1745,
2027 1749, 1752, 1756, 1758, 1761, 1763, 1765, 1767,
2028 1769, 1773, 1775, 1777, 1780, 1784, 1798, 1801,
2029 1805, 1808, 1813, 1824, 1829, 1832, 1862, 1866,
2030 1869, 1874, 1876, 1880, 1883, 1886, 1888, 1893,
2031 1895, 1901, 1906, 1912, 1914, 1934, 1942, 1945,
2032 1947, 1965, 2003, 2005, 2008, 2010, 2015, 2018,
2033 2047, 2049, 2051, 2053, 2055, 2058, 2060, 2064,
2034 2067, 2069, 2072, 2074, 2076, 2079, 2081, 2083,
2035 2085, 2087, 2089, 2092, 2095, 2098, 2111, 2113,
2036 2117, 2120, 2122, 2127, 2130, 2144, 2147, 2156,
2037 2158, 2163, 2167, 2168, 2170, 2172, 2178, 2183,
2038 2188, 2190, 2193, 2195, 2198, 2202, 2208, 2211,
2039 2214, 2220, 2222, 2224, 2226, 2229, 2262, 2264,
2040 2266, 2269, 2272, 2275, 2283, 2291, 2302, 2310,
2041 2319, 2327, 2336, 2345, 2357, 2364, 2371, 2379,
2042 2387, 2396, 2402, 2410, 2416, 2424, 2426, 2429,
2043 2443, 2449, 2457, 2461, 2465, 2467, 2514, 2516,
2044 2519, 2521, 2526, 2532, 2538, 2543, 2546, 2550,
2045 2553, 2556, 2558, 2561, 2564, 2567, 2571, 2576,
2046 2581, 2585, 2587, 2590, 2592, 2596, 2599, 2602,
2047 2605, 2608, 2612, 2617, 2621, 2623, 2625, 2628,
2048 2630, 2634, 2637, 2640, 2648, 2652, 2660, 2676,
2049 2678, 2683, 2685, 2689, 2700, 2704, 2706, 2709,
2050 2711, 2714, 2719, 2723, 2729, 2735, 2746, 2751,
2051 2754, 2757, 2760, 2763, 2765, 2769, 2770, 2773,
2052 2775, 2805, 2807, 2809, 2812, 2816, 2819, 2823,
2053 2825, 2827, 2829, 2835, 2838, 2841, 2845, 2847,
2054 2852, 2857, 2864, 2867, 2871, 2875, 2877, 2880,
2055 2900, 2902, 2904, 2911, 2915, 2917, 2919, 2921,
2056 2924, 2928, 2932, 2934, 2938, 2941, 2943, 2948,
2057 2966, 3005, 3011, 3014, 3016, 3018, 3020, 3023,
2058 3026, 3029, 3032, 3035, 3039, 3042, 3045, 3048,
2059 3050, 3052, 3055, 3062, 3065, 3067, 3070, 3073,
2060 3076, 3084, 3086, 3088, 3091, 3093, 3096, 3098,
2061 3100, 3130, 3133, 3136, 3139, 3142, 3147, 3151,
2062 3158, 3161, 3170, 3179, 3182, 3186, 3189, 3192,
2063 3196, 3198, 3202, 3204, 3207, 3209, 3213, 3217,
2064 3221, 3229, 3231, 3233, 3237, 3241, 3243, 3256,
2065 3258, 3261, 3264, 3269, 3271, 3274, 3276, 3278,
2066 3281, 3286, 3288, 3290, 3295, 3297, 3300, 3304,
2067 3324, 3328, 3332, 3334, 3336, 3344, 3346, 3353,
2068 3358, 3360, 3364, 3367, 3370, 3373, 3377, 3380,
2069 3383, 3387, 3397, 3403, 3406, 3409, 3419, 3439,
2070 3445, 3448, 3450, 3454, 3456, 3459, 3461, 3465,
2071 3467, 3469, 3473, 3475, 3477, 3483, 3486, 3491,
2072 3496, 3502, 3512, 3520, 3532, 3539, 3549, 3555,
2073 3567, 3573, 3591, 3594, 3602, 3608, 3618, 3625,
2074 3632, 3640, 3648, 3651, 3656, 3676, 3682, 3685,
2075 3689, 3693, 3697, 3709, 3712, 3717, 3718, 3724,
2076 3731, 3737, 3740, 3743, 3747, 3751, 3754, 3757,
2077 3762, 3766, 3772, 3778, 3781, 3785, 3788, 3791,
2078 3796, 3799, 3802, 3808, 3812, 3815, 3819, 3822,
2079 3825, 3829, 3833, 3840, 3843, 3846, 3852, 3855,
2080 3862, 3864, 3866, 3869, 3878, 3883, 3897, 3901,
2081 3905, 3920, 3926, 3929, 3932, 3934, 3939, 3945,
2082 3949, 3957, 3963, 3973, 3976, 3979, 3984, 3988,
2083 3991, 3994, 3997, 4001, 4006, 4010, 4014, 4017,
2084 4022, 4027, 4030, 4036, 4040, 4046, 4051, 4055,
2085 4059, 4067, 4070, 4078, 4084, 4094, 4105, 4108,
2086 4111, 4113, 4117, 4119, 4122, 4133, 4137, 4140,
2087 4143, 4146, 4149, 4151, 4155, 4159, 4162, 4166,
2088 4171, 4174, 4184, 4186, 4227, 4233, 4237, 4240,
2089 4243, 4247, 4250, 4254, 4258, 4263, 4265, 4269,
2090 4273, 4276, 4279, 4284, 4293, 4297, 4302, 4307,
2091 4311, 4318, 4322, 4325, 4329, 4332, 4337, 4340,
2092 4343, 4373, 4377, 4381, 4385, 4389, 4394, 4398,
2093 4404, 4408, 4416, 4419, 4424, 4428, 4431, 4436,
2094 4439, 4443, 4446, 4449, 4452, 4455, 4458, 4462,
2095 4466, 4469, 4479, 4482, 4485, 4490, 4496, 4499,
2096 4514, 4517, 4521, 4527, 4531, 4535, 4538, 4542,
2097 4549, 4552, 4555, 4561, 4564, 4568, 4573, 4589,
2098 4591, 4599, 4601, 4609, 4615, 4617, 4621, 4624,
2099 4627, 4630, 4634, 4645, 4648, 4660, 4684, 4692,
2100 4694, 4698, 4701, 4706, 4709, 4711, 4716, 4719,
2101 4725, 4728, 4730, 4732, 4734, 4736, 4738, 4740,
2102 4742, 4744, 4746, 4748, 4750, 4752, 4754, 4756,
2103 4758, 4760, 4762, 4764, 4766, 4768, 4770, 4772,
2104 4777, 4781, 4782, 4784, 4786, 4792, 4797, 4802,
2105 4804, 4807, 4809, 4812, 4816, 4822, 4825, 4828,
2106 4834, 4836, 4838, 4840, 4843, 4876, 4878, 4880,
2107 4883, 4886, 4889, 4897, 4905, 4916, 4924, 4933,
2108 4941, 4950, 4959, 4971, 4978, 4985, 4993, 5001,
2109 5010, 5016, 5024, 5030, 5038, 5040, 5043, 5057,
2110 5063, 5071, 5075, 5079, 5081, 5128, 5130, 5133,
2111 5135, 5140, 5146, 5152, 5157, 5160, 5164, 5167,
2112 5170, 5172, 5175, 5178, 5181, 5185, 5190, 5195,
2113 5199, 5201, 5204, 5206, 5210, 5213, 5216, 5219,
2114 5222, 5226, 5231, 5235, 5237, 5239, 5242, 5244,
2115 5248, 5251, 5254, 5262, 5266, 5274, 5290, 5292,
2116 5297, 5299, 5303, 5314, 5318, 5320, 5323, 5325,
2117 5328, 5333, 5337, 5343, 5349, 5360, 5365, 5368,
2118 5371, 5374, 5377, 5379, 5383, 5384, 5387, 5389,
2119 5419, 5421, 5423, 5426, 5430, 5433, 5437, 5439,
2120 5441, 5443, 5449, 5452, 5455, 5459, 5461, 5466,
2121 5471, 5478, 5481, 5485, 5489, 5491, 5494, 5514,
2122 5516, 5518, 5525, 5529, 5531, 5533, 5535, 5538,
2123 5542, 5546, 5548, 5552, 5555, 5557, 5562, 5580,
2124 5619, 5625, 5628, 5630, 5632, 5634, 5637, 5640,
2125 5643, 5646, 5649, 5653, 5656, 5659, 5662, 5664,
2126 5666, 5669, 5676, 5679, 5681, 5684, 5687, 5690,
2127 5698, 5700, 5702, 5705, 5707, 5710, 5712, 5714,
2128 5744, 5747, 5750, 5753, 5756, 5761, 5765, 5772,
2129 5775, 5784, 5793, 5796, 5800, 5803, 5806, 5810,
2130 5812, 5816, 5818, 5821, 5823, 5827, 5831, 5835,
2131 5843, 5845, 5847, 5851, 5855, 5857, 5870, 5872,
2132 5875, 5878, 5883, 5885, 5888, 5890, 5892, 5895,
2133 5900, 5902, 5904, 5909, 5911, 5914, 5918, 5938,
2134 5942, 5946, 5948, 5950, 5958, 5960, 5967, 5972,
2135 5974, 5978, 5981, 5984, 5987, 5991, 5994, 5997,
2136 6001, 6011, 6017, 6020, 6023, 6033, 6053, 6059,
2137 6062, 6064, 6068, 6070, 6073, 6075, 6079, 6081,
2138 6083, 6087, 6089, 6091, 6097, 6100, 6105, 6110,
2139 6116, 6126, 6134, 6146, 6153, 6163, 6169, 6181,
2140 6187, 6205, 6208, 6216, 6222, 6232, 6239, 6246,
2141 6254, 6262, 6265, 6270, 6290, 6296, 6299, 6303,
2142 6307, 6311, 6323, 6326, 6331, 6332, 6338, 6345,
2143 6351, 6354, 6357, 6361, 6365, 6368, 6371, 6376,
2144 6380, 6386, 6392, 6395, 6399, 6402, 6405, 6410,
2145 6413, 6416, 6422, 6426, 6429, 6433, 6436, 6439,
2146 6443, 6447, 6454, 6457, 6460, 6466, 6469, 6476,
2147 6478, 6480, 6483, 6492, 6497, 6511, 6515, 6519,
2148 6534, 6540, 6543, 6546, 6548, 6553, 6559, 6563,
2149 6571, 6577, 6587, 6590, 6593, 6598, 6602, 6605,
2150 6608, 6611, 6615, 6620, 6624, 6628, 6631, 6636,
2151 6641, 6644, 6650, 6654, 6660, 6665, 6669, 6673,
2152 6681, 6684, 6692, 6698, 6708, 6719, 6722, 6725,
2153 6727, 6731, 6733, 6736, 6747, 6751, 6754, 6757,
2154 6760, 6763, 6765, 6769, 6773, 6776, 6780, 6785,
2155 6788, 6798, 6800, 6841, 6847, 6851, 6854, 6857,
2156 6861, 6864, 6868, 6872, 6877, 6879, 6883, 6887,
2157 6890, 6893, 6898, 6907, 6911, 6916, 6921, 6925,
2158 6932, 6936, 6939, 6943, 6946, 6951, 6954, 6957,
2159 6987, 6991, 6995, 6999, 7003, 7008, 7012, 7018,
2160 7022, 7030, 7033, 7038, 7042, 7045, 7050, 7053,
2161 7057, 7060, 7063, 7066, 7069, 7072, 7076, 7080,
2162 7083, 7093, 7096, 7099, 7104, 7110, 7113, 7128,
2163 7131, 7135, 7141, 7145, 7149, 7152, 7156, 7163,
2164 7166, 7169, 7175, 7178, 7182, 7187, 7203, 7205,
2165 7213, 7215, 7223, 7229, 7231, 7235, 7238, 7241,
2166 7244, 7248, 7259, 7262, 7274, 7298, 7306, 7308,
2167 7312, 7315, 7320, 7323, 7325, 7330, 7333, 7339,
2168 7342, 7408, 7411, 7413, 7415, 7417, 7419, 7421,
2169 7424, 7426, 7431, 7434, 7437, 7439, 7479, 7481,
2170 7483, 7485, 7490, 7494, 7495, 7497, 7499, 7506,
2171 7513, 7520, 7522, 7524, 7526, 7529, 7532, 7538,
2172 7541, 7546, 7553, 7558, 7561, 7565, 7572, 7604,
2173 7653, 7668, 7681, 7686, 7688, 7692, 7723, 7729,
2174 7731, 7752, 7772, 7774, 7786, 7798, 7810, 7822,
2175 7833, 7841, 7854, 7867, 7880, 7892, 7900, 7913,
2176 7925, 7937, 7949, 7961, 7973, 7985, 7997, 7999,
2177 8001, 8003, 8005, 8007, 8009, 8011, 8021, 8031,
2178 8041, 8051, 8053, 8055, 8057, 8059, 8061, 8071,
2179 8080, 8082, 8084, 8086, 8088, 8090, 8132, 8172,
2180 8174, 8179, 8183, 8184, 8186, 8188, 8195, 8202,
2181 8209, 8211, 8213, 8215, 8218, 8221, 8227, 8230,
2182 8235, 8242, 8247, 8250, 8254, 8261, 8293, 8342,
2183 8357, 8370, 8375, 8377, 8381, 8412, 8418, 8420,
2184 8441, 8461,
2185}
2186
2187var _hcltok_indicies []int16 = []int16{
2188 2, 1, 4, 3, 6, 5, 6, 7,
2189 5, 9, 11, 11, 10, 8, 12, 12,
2190 10, 8, 10, 8, 13, 14, 15, 16,
2191 18, 19, 20, 21, 22, 23, 24, 25,
2192 26, 27, 28, 29, 30, 31, 32, 33,
2193 34, 35, 36, 37, 38, 39, 40, 42,
2194 43, 44, 45, 46, 14, 14, 17, 17,
2195 41, 3, 14, 15, 16, 18, 19, 20,
2196 21, 22, 23, 24, 25, 26, 27, 28,
2197 29, 30, 31, 32, 33, 34, 35, 36,
2198 37, 38, 39, 40, 42, 43, 44, 45,
2199 46, 14, 14, 17, 17, 41, 3, 47,
2200 48, 14, 14, 49, 16, 18, 19, 20,
2201 19, 50, 51, 23, 52, 25, 26, 53,
2202 54, 55, 56, 57, 58, 59, 60, 61,
2203 62, 63, 64, 65, 40, 42, 66, 44,
2204 67, 68, 69, 14, 14, 14, 17, 41,
2205 3, 47, 3, 14, 14, 14, 14, 3,
2206 14, 14, 14, 3, 14, 3, 14, 3,
2207 14, 3, 3, 3, 3, 3, 14, 3,
2208 3, 3, 3, 14, 14, 14, 14, 14,
2209 3, 3, 14, 3, 3, 14, 3, 14,
2210 3, 3, 14, 3, 3, 3, 14, 14,
2211 14, 14, 14, 14, 3, 14, 14, 3,
2212 14, 14, 3, 3, 3, 3, 3, 3,
2213 14, 14, 3, 3, 14, 3, 14, 14,
2214 14, 3, 70, 71, 72, 73, 17, 74,
2215 75, 76, 77, 78, 79, 80, 81, 82,
2216 83, 84, 85, 86, 87, 88, 89, 90,
2217 91, 92, 93, 94, 95, 96, 97, 98,
2218 99, 100, 3, 14, 3, 14, 3, 14,
2219 14, 3, 14, 14, 3, 3, 3, 14,
2220 3, 3, 3, 3, 3, 3, 3, 14,
2221 3, 3, 3, 3, 3, 3, 3, 14,
2222 14, 14, 14, 14, 14, 14, 14, 14,
2223 14, 14, 3, 3, 3, 3, 3, 3,
2224 3, 3, 14, 14, 14, 14, 14, 14,
2225 14, 14, 14, 3, 3, 3, 3, 3,
2226 3, 3, 3, 14, 14, 14, 14, 14,
2227 14, 14, 14, 14, 3, 14, 14, 14,
2228 14, 14, 14, 14, 14, 3, 14, 14,
2229 14, 14, 14, 14, 14, 14, 14, 14,
2230 14, 3, 14, 14, 14, 14, 14, 14,
2231 3, 14, 14, 14, 14, 14, 14, 3,
2232 3, 3, 3, 3, 3, 3, 3, 14,
2233 14, 14, 14, 14, 14, 14, 14, 3,
2234 14, 14, 14, 14, 14, 14, 14, 14,
2235 3, 14, 14, 14, 14, 14, 3, 3,
2236 3, 3, 3, 3, 3, 3, 14, 14,
2237 14, 14, 14, 14, 3, 14, 14, 14,
2238 14, 14, 14, 14, 3, 14, 3, 14,
2239 14, 3, 14, 14, 14, 14, 14, 14,
2240 14, 14, 14, 14, 14, 14, 14, 3,
2241 14, 14, 14, 14, 14, 3, 14, 14,
2242 14, 14, 14, 14, 14, 3, 14, 14,
2243 14, 3, 14, 14, 14, 3, 14, 3,
2244 101, 102, 103, 104, 105, 106, 107, 108,
2245 109, 110, 111, 112, 113, 114, 115, 116,
2246 117, 19, 118, 119, 120, 121, 122, 123,
2247 124, 125, 126, 127, 128, 129, 130, 131,
2248 132, 133, 134, 135, 17, 18, 136, 137,
2249 138, 139, 140, 17, 19, 17, 3, 14,
2250 3, 14, 14, 3, 3, 14, 3, 3,
2251 3, 3, 14, 3, 3, 3, 3, 3,
2252 14, 3, 3, 3, 3, 3, 14, 14,
2253 14, 14, 14, 3, 3, 3, 14, 3,
2254 3, 3, 14, 14, 14, 3, 3, 3,
2255 14, 14, 3, 3, 3, 14, 14, 14,
2256 3, 3, 3, 14, 14, 14, 14, 3,
2257 14, 14, 14, 14, 3, 3, 3, 3,
2258 3, 14, 14, 14, 14, 3, 3, 14,
2259 14, 14, 3, 3, 14, 14, 14, 14,
2260 3, 14, 14, 3, 14, 14, 3, 3,
2261 3, 14, 14, 14, 3, 3, 3, 3,
2262 14, 14, 14, 14, 14, 3, 3, 3,
2263 3, 14, 3, 14, 14, 3, 14, 14,
2264 3, 14, 3, 14, 14, 14, 3, 14,
2265 14, 3, 3, 3, 14, 3, 3, 3,
2266 3, 3, 3, 3, 14, 14, 14, 14,
2267 3, 14, 14, 14, 14, 14, 14, 14,
2268 3, 141, 142, 143, 144, 145, 146, 147,
2269 148, 149, 17, 150, 151, 152, 153, 154,
2270 3, 14, 3, 3, 3, 3, 3, 14,
2271 14, 3, 14, 14, 14, 3, 14, 14,
2272 14, 14, 14, 14, 14, 14, 14, 14,
2273 3, 14, 14, 14, 3, 3, 14, 14,
2274 14, 3, 3, 14, 3, 3, 14, 14,
2275 14, 14, 14, 3, 3, 3, 3, 14,
2276 14, 14, 14, 14, 14, 3, 14, 14,
2277 14, 14, 14, 3, 155, 112, 156, 157,
2278 158, 17, 159, 160, 19, 17, 3, 14,
2279 14, 14, 14, 3, 3, 3, 14, 3,
2280 3, 14, 14, 14, 3, 3, 3, 14,
2281 14, 3, 122, 3, 19, 17, 17, 161,
2282 3, 17, 3, 14, 19, 162, 163, 19,
2283 164, 165, 19, 60, 166, 167, 168, 169,
2284 170, 19, 171, 172, 173, 19, 174, 175,
2285 176, 18, 177, 178, 179, 18, 180, 19,
2286 17, 3, 3, 14, 14, 3, 3, 3,
2287 14, 14, 14, 14, 3, 14, 14, 3,
2288 3, 3, 3, 14, 14, 3, 3, 14,
2289 14, 3, 3, 3, 3, 3, 3, 14,
2290 14, 14, 3, 3, 3, 14, 3, 3,
2291 3, 14, 14, 3, 14, 14, 14, 14,
2292 3, 14, 14, 14, 14, 3, 14, 14,
2293 14, 14, 14, 14, 3, 3, 3, 14,
2294 14, 14, 14, 3, 181, 182, 3, 17,
2295 3, 14, 3, 3, 14, 19, 183, 184,
2296 185, 186, 60, 187, 188, 58, 189, 190,
2297 191, 192, 193, 194, 195, 196, 197, 17,
2298 3, 3, 14, 3, 14, 14, 14, 14,
2299 14, 14, 14, 3, 14, 14, 14, 3,
2300 14, 3, 3, 14, 3, 14, 3, 3,
2301 14, 14, 14, 14, 3, 14, 14, 14,
2302 3, 3, 14, 14, 14, 14, 3, 14,
2303 14, 3, 3, 14, 14, 14, 14, 14,
2304 3, 198, 199, 200, 201, 202, 203, 204,
2305 205, 206, 207, 208, 204, 209, 210, 211,
2306 212, 41, 3, 213, 214, 19, 215, 216,
2307 217, 218, 219, 220, 221, 222, 223, 19,
2308 17, 224, 225, 226, 227, 19, 228, 229,
2309 230, 231, 232, 233, 234, 235, 236, 237,
2310 238, 239, 240, 241, 242, 19, 147, 17,
2311 243, 3, 14, 14, 14, 14, 14, 3,
2312 3, 3, 14, 3, 14, 14, 3, 14,
2313 3, 14, 14, 3, 3, 3, 14, 14,
2314 14, 3, 3, 3, 14, 14, 14, 3,
2315 3, 3, 3, 14, 3, 3, 14, 3,
2316 3, 14, 14, 14, 3, 3, 14, 3,
2317 14, 14, 14, 3, 14, 14, 14, 14,
2318 14, 14, 3, 3, 3, 14, 14, 3,
2319 14, 14, 3, 14, 14, 3, 14, 14,
2320 3, 14, 14, 14, 14, 14, 14, 14,
2321 3, 14, 3, 14, 3, 14, 14, 3,
2322 14, 3, 14, 14, 3, 14, 3, 14,
2323 3, 244, 215, 245, 246, 247, 248, 249,
2324 250, 251, 252, 253, 101, 254, 19, 255,
2325 256, 257, 19, 258, 132, 259, 260, 261,
2326 262, 263, 264, 265, 266, 19, 3, 3,
2327 3, 14, 14, 14, 3, 14, 14, 3,
2328 14, 14, 3, 3, 3, 3, 3, 14,
2329 14, 14, 14, 3, 14, 14, 14, 14,
2330 14, 14, 3, 3, 3, 14, 14, 14,
2331 14, 14, 14, 14, 14, 14, 3, 14,
2332 14, 14, 14, 14, 14, 14, 14, 3,
2333 14, 14, 3, 3, 3, 3, 14, 14,
2334 14, 3, 3, 3, 14, 3, 3, 3,
2335 14, 14, 3, 14, 14, 14, 3, 14,
2336 3, 3, 3, 14, 14, 3, 14, 14,
2337 14, 3, 14, 14, 14, 3, 3, 3,
2338 3, 14, 19, 184, 267, 268, 17, 19,
2339 17, 3, 3, 14, 3, 14, 19, 267,
2340 17, 3, 19, 269, 17, 3, 3, 14,
2341 19, 270, 271, 272, 175, 273, 274, 19,
2342 275, 276, 277, 17, 3, 3, 14, 14,
2343 14, 3, 14, 14, 3, 14, 14, 14,
2344 14, 3, 3, 14, 3, 3, 14, 14,
2345 3, 14, 3, 19, 17, 3, 278, 19,
2346 279, 3, 17, 3, 14, 3, 14, 280,
2347 19, 281, 282, 3, 14, 3, 3, 3,
2348 14, 14, 14, 14, 3, 283, 284, 285,
2349 19, 286, 287, 288, 289, 290, 291, 292,
2350 293, 294, 295, 296, 297, 298, 299, 17,
2351 3, 14, 14, 14, 3, 3, 3, 3,
2352 14, 14, 3, 3, 14, 3, 3, 3,
2353 3, 3, 3, 3, 14, 3, 14, 3,
2354 3, 3, 3, 3, 3, 14, 14, 14,
2355 14, 14, 3, 3, 14, 3, 3, 3,
2356 14, 3, 3, 14, 3, 3, 14, 3,
2357 3, 14, 3, 3, 3, 14, 14, 14,
2358 3, 3, 3, 14, 14, 14, 14, 3,
2359 300, 19, 301, 19, 302, 303, 304, 305,
2360 17, 3, 14, 14, 14, 14, 14, 3,
2361 3, 3, 14, 3, 3, 14, 14, 14,
2362 14, 14, 14, 14, 14, 14, 14, 3,
2363 14, 14, 14, 14, 14, 14, 14, 14,
2364 14, 14, 14, 14, 14, 14, 14, 14,
2365 14, 14, 14, 3, 14, 14, 14, 14,
2366 14, 3, 306, 19, 17, 3, 14, 307,
2367 19, 103, 17, 3, 14, 308, 3, 17,
2368 3, 14, 19, 309, 17, 3, 3, 14,
2369 310, 3, 19, 311, 17, 3, 3, 14,
2370 14, 14, 14, 3, 14, 14, 14, 14,
2371 3, 14, 14, 14, 14, 14, 3, 3,
2372 14, 3, 14, 14, 14, 3, 14, 3,
2373 14, 14, 14, 3, 3, 3, 3, 3,
2374 3, 3, 14, 14, 14, 3, 14, 3,
2375 3, 3, 14, 14, 14, 14, 3, 312,
2376 313, 72, 314, 315, 316, 317, 318, 319,
2377 320, 321, 322, 323, 324, 325, 326, 327,
2378 328, 329, 330, 331, 332, 334, 335, 336,
2379 337, 338, 339, 333, 3, 14, 14, 14,
2380 14, 3, 14, 3, 14, 14, 3, 14,
2381 14, 14, 3, 3, 3, 3, 3, 3,
2382 3, 3, 3, 14, 14, 14, 14, 14,
2383 3, 14, 14, 14, 14, 14, 14, 14,
2384 3, 14, 14, 14, 3, 14, 14, 14,
2385 14, 14, 14, 14, 3, 14, 14, 14,
2386 3, 14, 14, 14, 14, 14, 14, 14,
2387 3, 14, 14, 14, 3, 14, 14, 14,
2388 14, 14, 14, 14, 14, 14, 14, 3,
2389 14, 3, 14, 14, 14, 14, 14, 3,
2390 14, 14, 3, 14, 14, 14, 14, 14,
2391 14, 14, 3, 14, 14, 14, 3, 14,
2392 14, 14, 14, 3, 14, 14, 14, 14,
2393 3, 14, 14, 14, 14, 3, 14, 3,
2394 14, 14, 3, 14, 14, 14, 14, 14,
2395 14, 14, 14, 14, 14, 14, 14, 14,
2396 14, 3, 14, 14, 14, 3, 14, 3,
2397 14, 14, 3, 14, 3, 340, 341, 342,
2398 104, 105, 106, 107, 108, 343, 110, 111,
2399 112, 113, 114, 115, 344, 345, 170, 346,
2400 261, 120, 347, 122, 232, 272, 125, 348,
2401 349, 350, 351, 352, 353, 354, 355, 356,
2402 357, 134, 358, 19, 17, 18, 19, 137,
2403 138, 139, 140, 17, 17, 3, 14, 14,
2404 3, 14, 14, 14, 14, 14, 14, 3,
2405 3, 3, 14, 3, 14, 14, 14, 14,
2406 3, 14, 14, 14, 3, 14, 14, 3,
2407 14, 14, 14, 3, 3, 14, 14, 14,
2408 3, 3, 14, 14, 3, 14, 3, 14,
2409 3, 14, 14, 14, 3, 3, 14, 14,
2410 3, 14, 14, 3, 14, 14, 14, 3,
2411 359, 143, 145, 146, 147, 148, 149, 17,
2412 360, 151, 361, 153, 362, 3, 14, 14,
2413 3, 3, 3, 3, 14, 3, 3, 14,
2414 14, 14, 14, 14, 3, 363, 112, 364,
2415 157, 158, 17, 159, 160, 19, 17, 3,
2416 14, 14, 14, 14, 3, 3, 3, 14,
2417 19, 162, 163, 19, 365, 366, 222, 311,
2418 166, 167, 168, 367, 170, 368, 369, 370,
2419 371, 372, 373, 374, 375, 376, 377, 178,
2420 179, 18, 378, 19, 17, 3, 3, 3,
2421 3, 14, 14, 14, 3, 3, 3, 3,
2422 3, 14, 14, 3, 14, 14, 14, 3,
2423 14, 14, 3, 3, 3, 14, 14, 3,
2424 14, 14, 14, 14, 3, 14, 3, 14,
2425 14, 14, 14, 14, 3, 3, 3, 3,
2426 3, 14, 14, 14, 14, 14, 14, 3,
2427 14, 3, 19, 183, 184, 379, 186, 60,
2428 187, 188, 58, 189, 190, 380, 17, 193,
2429 381, 195, 196, 197, 17, 3, 14, 14,
2430 14, 14, 14, 14, 14, 3, 14, 14,
2431 3, 14, 3, 382, 383, 200, 201, 202,
2432 384, 204, 205, 385, 386, 387, 204, 209,
2433 210, 211, 212, 41, 3, 213, 214, 19,
2434 215, 216, 218, 388, 220, 389, 222, 223,
2435 19, 17, 390, 225, 226, 227, 19, 228,
2436 229, 230, 231, 232, 233, 234, 235, 391,
2437 237, 238, 392, 240, 241, 242, 19, 147,
2438 17, 243, 3, 3, 14, 3, 3, 14,
2439 3, 14, 14, 14, 14, 14, 3, 14,
2440 14, 3, 393, 394, 395, 396, 397, 398,
2441 399, 400, 250, 401, 322, 402, 216, 403,
2442 404, 405, 406, 407, 404, 408, 409, 410,
2443 261, 411, 263, 412, 413, 274, 3, 14,
2444 3, 14, 3, 14, 3, 14, 3, 14,
2445 14, 3, 14, 3, 14, 14, 14, 3,
2446 14, 14, 3, 3, 14, 14, 14, 3,
2447 14, 3, 14, 3, 14, 14, 3, 14,
2448 3, 14, 3, 14, 3, 14, 3, 14,
2449 3, 3, 3, 14, 14, 14, 3, 14,
2450 14, 3, 19, 270, 232, 414, 404, 415,
2451 274, 19, 416, 417, 277, 17, 3, 14,
2452 3, 14, 14, 14, 3, 3, 3, 14,
2453 14, 3, 280, 19, 281, 418, 3, 14,
2454 14, 3, 19, 286, 287, 288, 289, 290,
2455 291, 292, 293, 294, 295, 419, 17, 3,
2456 3, 3, 14, 19, 420, 19, 268, 303,
2457 304, 305, 17, 3, 3, 14, 422, 422,
2458 422, 422, 421, 422, 422, 422, 421, 422,
2459 421, 422, 422, 421, 421, 421, 421, 421,
2460 421, 422, 421, 421, 421, 421, 422, 422,
2461 422, 422, 422, 421, 421, 422, 421, 421,
2462 422, 421, 422, 421, 421, 422, 421, 421,
2463 421, 422, 422, 422, 422, 422, 422, 421,
2464 422, 422, 421, 422, 422, 421, 421, 421,
2465 421, 421, 421, 422, 422, 421, 421, 422,
2466 421, 422, 422, 422, 421, 423, 424, 425,
2467 426, 427, 428, 429, 430, 431, 432, 433,
2468 434, 435, 436, 437, 438, 439, 440, 441,
2469 442, 443, 444, 445, 446, 447, 448, 449,
2470 450, 451, 452, 453, 454, 421, 422, 421,
2471 422, 421, 422, 422, 421, 422, 422, 421,
2472 421, 421, 422, 421, 421, 421, 421, 421,
2473 421, 421, 422, 421, 421, 421, 421, 421,
2474 421, 421, 422, 422, 422, 422, 422, 422,
2475 422, 422, 422, 422, 422, 421, 421, 421,
2476 421, 421, 421, 421, 421, 422, 422, 422,
2477 422, 422, 422, 422, 422, 422, 421, 421,
2478 421, 421, 421, 421, 421, 421, 422, 422,
2479 422, 422, 422, 422, 422, 422, 422, 421,
2480 422, 422, 422, 422, 422, 422, 422, 422,
2481 421, 422, 422, 422, 422, 422, 422, 422,
2482 422, 422, 422, 422, 421, 422, 422, 422,
2483 422, 422, 422, 421, 422, 422, 422, 422,
2484 422, 422, 421, 421, 421, 421, 421, 421,
2485 421, 421, 422, 422, 422, 422, 422, 422,
2486 422, 422, 421, 422, 422, 422, 422, 422,
2487 422, 422, 422, 421, 422, 422, 422, 422,
2488 422, 421, 421, 421, 421, 421, 421, 421,
2489 421, 422, 422, 422, 422, 422, 422, 421,
2490 422, 422, 422, 422, 422, 422, 422, 421,
2491 422, 421, 422, 422, 421, 422, 422, 422,
2492 422, 422, 422, 422, 422, 422, 422, 422,
2493 422, 422, 421, 422, 422, 422, 422, 422,
2494 421, 422, 422, 422, 422, 422, 422, 422,
2495 421, 422, 422, 422, 421, 422, 422, 422,
2496 421, 422, 421, 455, 456, 457, 458, 459,
2497 460, 461, 462, 463, 464, 465, 466, 467,
2498 468, 469, 470, 471, 472, 473, 474, 475,
2499 476, 477, 478, 479, 480, 481, 482, 483,
2500 484, 485, 486, 487, 488, 489, 490, 427,
2501 491, 492, 493, 494, 495, 496, 427, 472,
2502 427, 421, 422, 421, 422, 422, 421, 421,
2503 422, 421, 421, 421, 421, 422, 421, 421,
2504 421, 421, 421, 422, 421, 421, 421, 421,
2505 421, 422, 422, 422, 422, 422, 421, 421,
2506 421, 422, 421, 421, 421, 422, 422, 422,
2507 421, 421, 421, 422, 422, 421, 421, 421,
2508 422, 422, 422, 421, 421, 421, 422, 422,
2509 422, 422, 421, 422, 422, 422, 422, 421,
2510 421, 421, 421, 421, 422, 422, 422, 422,
2511 421, 421, 422, 422, 422, 421, 421, 422,
2512 422, 422, 422, 421, 422, 422, 421, 422,
2513 422, 421, 421, 421, 422, 422, 422, 421,
2514 421, 421, 421, 422, 422, 422, 422, 422,
2515 421, 421, 421, 421, 422, 421, 422, 422,
2516 421, 422, 422, 421, 422, 421, 422, 422,
2517 422, 421, 422, 422, 421, 421, 421, 422,
2518 421, 421, 421, 421, 421, 421, 421, 422,
2519 422, 422, 422, 421, 422, 422, 422, 422,
2520 422, 422, 422, 421, 497, 498, 499, 500,
2521 501, 502, 503, 504, 505, 427, 506, 507,
2522 508, 509, 510, 421, 422, 421, 421, 421,
2523 421, 421, 422, 422, 421, 422, 422, 422,
2524 421, 422, 422, 422, 422, 422, 422, 422,
2525 422, 422, 422, 421, 422, 422, 422, 421,
2526 421, 422, 422, 422, 421, 421, 422, 421,
2527 421, 422, 422, 422, 422, 422, 421, 421,
2528 421, 421, 422, 422, 422, 422, 422, 422,
2529 421, 422, 422, 422, 422, 422, 421, 511,
2530 466, 512, 513, 514, 427, 515, 516, 472,
2531 427, 421, 422, 422, 422, 422, 421, 421,
2532 421, 422, 421, 421, 422, 422, 422, 421,
2533 421, 421, 422, 422, 421, 477, 421, 472,
2534 427, 427, 517, 421, 427, 421, 422, 472,
2535 518, 519, 472, 520, 521, 472, 522, 523,
2536 524, 525, 526, 527, 472, 528, 529, 530,
2537 472, 531, 532, 533, 491, 534, 535, 536,
2538 491, 537, 472, 427, 421, 421, 422, 422,
2539 421, 421, 421, 422, 422, 422, 422, 421,
2540 422, 422, 421, 421, 421, 421, 422, 422,
2541 421, 421, 422, 422, 421, 421, 421, 421,
2542 421, 421, 422, 422, 422, 421, 421, 421,
2543 422, 421, 421, 421, 422, 422, 421, 422,
2544 422, 422, 422, 421, 422, 422, 422, 422,
2545 421, 422, 422, 422, 422, 422, 422, 421,
2546 421, 421, 422, 422, 422, 422, 421, 538,
2547 539, 421, 427, 421, 422, 421, 421, 422,
2548 472, 540, 541, 542, 543, 522, 544, 545,
2549 546, 547, 548, 549, 550, 551, 552, 553,
2550 554, 555, 427, 421, 421, 422, 421, 422,
2551 422, 422, 422, 422, 422, 422, 421, 422,
2552 422, 422, 421, 422, 421, 421, 422, 421,
2553 422, 421, 421, 422, 422, 422, 422, 421,
2554 422, 422, 422, 421, 421, 422, 422, 422,
2555 422, 421, 422, 422, 421, 421, 422, 422,
2556 422, 422, 422, 421, 556, 557, 558, 559,
2557 560, 561, 562, 563, 564, 565, 566, 562,
2558 568, 569, 570, 571, 567, 421, 572, 573,
2559 472, 574, 575, 576, 577, 578, 579, 580,
2560 581, 582, 472, 427, 583, 584, 585, 586,
2561 472, 587, 588, 589, 590, 591, 592, 593,
2562 594, 595, 596, 597, 598, 599, 600, 601,
2563 472, 503, 427, 602, 421, 422, 422, 422,
2564 422, 422, 421, 421, 421, 422, 421, 422,
2565 422, 421, 422, 421, 422, 422, 421, 421,
2566 421, 422, 422, 422, 421, 421, 421, 422,
2567 422, 422, 421, 421, 421, 421, 422, 421,
2568 421, 422, 421, 421, 422, 422, 422, 421,
2569 421, 422, 421, 422, 422, 422, 421, 422,
2570 422, 422, 422, 422, 422, 421, 421, 421,
2571 422, 422, 421, 422, 422, 421, 422, 422,
2572 421, 422, 422, 421, 422, 422, 422, 422,
2573 422, 422, 422, 421, 422, 421, 422, 421,
2574 422, 422, 421, 422, 421, 422, 422, 421,
2575 422, 421, 422, 421, 603, 574, 604, 605,
2576 606, 607, 608, 609, 610, 611, 612, 455,
2577 613, 472, 614, 615, 616, 472, 617, 487,
2578 618, 619, 620, 621, 622, 623, 624, 625,
2579 472, 421, 421, 421, 422, 422, 422, 421,
2580 422, 422, 421, 422, 422, 421, 421, 421,
2581 421, 421, 422, 422, 422, 422, 421, 422,
2582 422, 422, 422, 422, 422, 421, 421, 421,
2583 422, 422, 422, 422, 422, 422, 422, 422,
2584 422, 421, 422, 422, 422, 422, 422, 422,
2585 422, 422, 421, 422, 422, 421, 421, 421,
2586 421, 422, 422, 422, 421, 421, 421, 422,
2587 421, 421, 421, 422, 422, 421, 422, 422,
2588 422, 421, 422, 421, 421, 421, 422, 422,
2589 421, 422, 422, 422, 421, 422, 422, 422,
2590 421, 421, 421, 421, 422, 472, 541, 626,
2591 627, 427, 472, 427, 421, 421, 422, 421,
2592 422, 472, 626, 427, 421, 472, 628, 427,
2593 421, 421, 422, 472, 629, 630, 631, 532,
2594 632, 633, 472, 634, 635, 636, 427, 421,
2595 421, 422, 422, 422, 421, 422, 422, 421,
2596 422, 422, 422, 422, 421, 421, 422, 421,
2597 421, 422, 422, 421, 422, 421, 472, 427,
2598 421, 637, 472, 638, 421, 427, 421, 422,
2599 421, 422, 639, 472, 640, 641, 421, 422,
2600 421, 421, 421, 422, 422, 422, 422, 421,
2601 642, 643, 644, 472, 645, 646, 647, 648,
2602 649, 650, 651, 652, 653, 654, 655, 656,
2603 657, 658, 427, 421, 422, 422, 422, 421,
2604 421, 421, 421, 422, 422, 421, 421, 422,
2605 421, 421, 421, 421, 421, 421, 421, 422,
2606 421, 422, 421, 421, 421, 421, 421, 421,
2607 422, 422, 422, 422, 422, 421, 421, 422,
2608 421, 421, 421, 422, 421, 421, 422, 421,
2609 421, 422, 421, 421, 422, 421, 421, 421,
2610 422, 422, 422, 421, 421, 421, 422, 422,
2611 422, 422, 421, 659, 472, 660, 472, 661,
2612 662, 663, 664, 427, 421, 422, 422, 422,
2613 422, 422, 421, 421, 421, 422, 421, 421,
2614 422, 422, 422, 422, 422, 422, 422, 422,
2615 422, 422, 421, 422, 422, 422, 422, 422,
2616 422, 422, 422, 422, 422, 422, 422, 422,
2617 422, 422, 422, 422, 422, 422, 421, 422,
2618 422, 422, 422, 422, 421, 665, 472, 427,
2619 421, 422, 666, 472, 457, 427, 421, 422,
2620 667, 421, 427, 421, 422, 472, 668, 427,
2621 421, 421, 422, 669, 421, 472, 670, 427,
2622 421, 421, 422, 672, 671, 422, 422, 422,
2623 422, 672, 671, 422, 672, 671, 672, 672,
2624 422, 672, 671, 422, 672, 422, 672, 671,
2625 422, 672, 422, 672, 422, 671, 672, 672,
2626 672, 672, 672, 672, 672, 672, 671, 422,
2627 422, 672, 672, 422, 672, 422, 672, 671,
2628 672, 672, 672, 672, 672, 422, 672, 422,
2629 672, 422, 672, 671, 672, 672, 422, 672,
2630 422, 672, 671, 672, 672, 672, 672, 672,
2631 422, 672, 422, 672, 671, 422, 422, 672,
2632 422, 672, 671, 672, 672, 672, 422, 672,
2633 422, 672, 422, 672, 422, 672, 671, 672,
2634 422, 672, 422, 672, 671, 422, 672, 672,
2635 672, 672, 422, 672, 422, 672, 422, 672,
2636 422, 672, 422, 672, 422, 672, 671, 422,
2637 672, 671, 672, 672, 672, 422, 672, 422,
2638 672, 671, 672, 422, 672, 422, 672, 671,
2639 422, 672, 672, 672, 672, 422, 672, 422,
2640 672, 671, 422, 672, 422, 672, 422, 672,
2641 671, 672, 672, 422, 672, 422, 672, 671,
2642 422, 672, 422, 672, 422, 672, 422, 671,
2643 672, 672, 672, 422, 672, 422, 672, 671,
2644 422, 672, 671, 672, 672, 422, 672, 671,
2645 672, 672, 672, 422, 672, 672, 672, 672,
2646 672, 672, 422, 422, 672, 422, 672, 422,
2647 672, 422, 672, 671, 672, 422, 672, 422,
2648 672, 671, 422, 672, 671, 672, 422, 672,
2649 671, 672, 422, 672, 671, 422, 422, 672,
2650 671, 422, 672, 422, 672, 422, 672, 422,
2651 672, 422, 672, 422, 671, 672, 672, 422,
2652 672, 672, 672, 672, 422, 422, 672, 672,
2653 672, 672, 672, 422, 672, 672, 672, 672,
2654 672, 671, 422, 672, 672, 422, 672, 422,
2655 671, 672, 672, 422, 672, 671, 422, 422,
2656 672, 422, 671, 672, 672, 671, 422, 672,
2657 422, 671, 672, 671, 422, 672, 422, 672,
2658 422, 671, 672, 672, 671, 422, 672, 422,
2659 672, 422, 672, 671, 672, 422, 672, 422,
2660 672, 671, 422, 672, 671, 422, 422, 672,
2661 671, 672, 422, 671, 672, 671, 422, 672,
2662 422, 672, 422, 671, 672, 671, 422, 422,
2663 672, 671, 672, 422, 672, 422, 672, 671,
2664 422, 672, 422, 671, 672, 671, 422, 422,
2665 672, 422, 671, 672, 671, 422, 422, 672,
2666 671, 672, 422, 672, 671, 672, 422, 672,
2667 671, 672, 422, 672, 422, 672, 422, 671,
2668 672, 671, 422, 422, 672, 671, 672, 422,
2669 672, 422, 672, 671, 422, 672, 671, 672,
2670 672, 422, 672, 422, 672, 671, 671, 422,
2671 671, 422, 672, 672, 422, 672, 672, 672,
2672 672, 672, 672, 672, 671, 422, 672, 672,
2673 672, 422, 671, 672, 672, 672, 422, 672,
2674 422, 672, 422, 672, 422, 672, 422, 672,
2675 671, 422, 422, 672, 671, 672, 422, 672,
2676 671, 422, 422, 672, 422, 422, 422, 672,
2677 422, 672, 422, 672, 422, 672, 422, 671,
2678 422, 672, 422, 672, 422, 671, 672, 671,
2679 422, 672, 422, 671, 672, 422, 672, 672,
2680 672, 671, 422, 672, 422, 422, 672, 422,
2681 671, 672, 672, 671, 422, 672, 672, 672,
2682 672, 422, 672, 422, 671, 672, 672, 672,
2683 422, 672, 671, 672, 422, 672, 422, 672,
2684 422, 672, 422, 672, 671, 672, 672, 422,
2685 672, 671, 422, 672, 422, 672, 422, 671,
2686 672, 672, 671, 422, 672, 422, 671, 672,
2687 671, 422, 672, 671, 422, 672, 422, 672,
2688 671, 672, 672, 672, 671, 422, 422, 422,
2689 672, 671, 422, 672, 422, 671, 672, 671,
2690 422, 672, 422, 672, 422, 671, 672, 672,
2691 672, 671, 422, 672, 422, 671, 672, 672,
2692 672, 672, 671, 422, 672, 422, 672, 671,
2693 422, 422, 672, 422, 672, 671, 672, 422,
2694 672, 422, 671, 672, 672, 671, 422, 672,
2695 422, 672, 671, 422, 672, 672, 672, 422,
2696 672, 422, 671, 422, 672, 671, 672, 422,
2697 422, 672, 422, 672, 422, 671, 672, 672,
2698 672, 672, 671, 422, 672, 422, 672, 422,
2699 672, 422, 672, 422, 672, 671, 672, 672,
2700 672, 422, 672, 422, 672, 422, 672, 422,
2701 671, 672, 672, 422, 422, 672, 671, 672,
2702 422, 672, 672, 671, 422, 672, 422, 672,
2703 671, 422, 422, 672, 672, 672, 672, 422,
2704 672, 422, 672, 422, 671, 672, 672, 422,
2705 671, 672, 671, 422, 672, 422, 671, 672,
2706 671, 422, 672, 422, 671, 672, 422, 672,
2707 672, 671, 422, 672, 672, 422, 671, 672,
2708 671, 422, 672, 422, 672, 671, 672, 422,
2709 672, 422, 671, 672, 671, 422, 672, 422,
2710 672, 422, 672, 422, 672, 422, 672, 671,
2711 673, 671, 674, 675, 676, 677, 678, 679,
2712 680, 681, 682, 683, 684, 676, 685, 686,
2713 687, 688, 689, 676, 690, 691, 692, 693,
2714 694, 695, 696, 697, 698, 699, 700, 701,
2715 702, 703, 704, 676, 705, 673, 685, 673,
2716 706, 673, 671, 672, 672, 672, 672, 422,
2717 671, 672, 672, 671, 422, 672, 671, 422,
2718 422, 672, 671, 422, 672, 422, 671, 672,
2719 671, 422, 422, 672, 422, 671, 672, 672,
2720 671, 422, 672, 672, 672, 671, 422, 672,
2721 422, 672, 672, 671, 422, 422, 672, 422,
2722 671, 672, 671, 422, 672, 671, 422, 422,
2723 672, 422, 672, 671, 422, 672, 422, 422,
2724 672, 422, 672, 422, 671, 672, 672, 671,
2725 422, 672, 672, 422, 672, 671, 422, 672,
2726 422, 672, 671, 422, 672, 422, 671, 422,
2727 672, 672, 672, 422, 672, 671, 672, 422,
2728 672, 671, 422, 672, 671, 672, 422, 672,
2729 671, 422, 672, 671, 422, 672, 422, 672,
2730 671, 422, 672, 671, 422, 672, 671, 707,
2731 708, 709, 710, 711, 712, 713, 714, 715,
2732 716, 717, 718, 678, 719, 720, 721, 722,
2733 723, 720, 724, 725, 726, 727, 728, 729,
2734 730, 731, 732, 673, 671, 672, 422, 672,
2735 671, 672, 422, 672, 671, 672, 422, 672,
2736 671, 672, 422, 672, 671, 422, 672, 422,
2737 672, 671, 672, 422, 672, 671, 672, 422,
2738 422, 422, 672, 671, 672, 422, 672, 671,
2739 672, 672, 672, 672, 422, 672, 422, 671,
2740 672, 671, 422, 422, 672, 422, 672, 671,
2741 672, 422, 672, 671, 422, 672, 671, 672,
2742 672, 422, 672, 671, 422, 672, 671, 672,
2743 422, 672, 671, 422, 672, 671, 422, 672,
2744 671, 422, 672, 671, 672, 671, 422, 422,
2745 672, 671, 672, 422, 672, 671, 422, 672,
2746 422, 671, 672, 671, 422, 676, 733, 673,
2747 676, 734, 676, 735, 685, 673, 671, 672,
2748 671, 422, 672, 671, 422, 676, 734, 685,
2749 673, 671, 676, 736, 673, 685, 673, 671,
2750 672, 671, 422, 676, 737, 694, 738, 720,
2751 739, 732, 676, 740, 741, 742, 673, 685,
2752 673, 671, 672, 671, 422, 672, 422, 672,
2753 671, 422, 672, 422, 672, 422, 671, 672,
2754 672, 671, 422, 672, 422, 672, 671, 422,
2755 672, 671, 676, 685, 427, 671, 743, 676,
2756 744, 685, 673, 671, 427, 672, 671, 422,
2757 672, 671, 422, 745, 676, 746, 747, 673,
2758 671, 422, 672, 671, 672, 672, 671, 422,
2759 422, 672, 422, 672, 671, 676, 748, 749,
2760 750, 751, 752, 753, 754, 755, 756, 757,
2761 758, 673, 685, 673, 671, 672, 422, 672,
2762 672, 672, 672, 672, 672, 672, 422, 672,
2763 422, 672, 672, 672, 672, 672, 672, 671,
2764 422, 672, 672, 422, 672, 422, 671, 672,
2765 422, 672, 672, 672, 422, 672, 672, 422,
2766 672, 672, 422, 672, 672, 422, 672, 672,
2767 671, 422, 676, 759, 676, 735, 760, 761,
2768 762, 673, 685, 673, 671, 672, 671, 422,
2769 672, 672, 672, 422, 672, 672, 672, 422,
2770 672, 422, 672, 671, 422, 422, 422, 422,
2771 672, 672, 422, 422, 422, 422, 422, 672,
2772 672, 672, 672, 672, 672, 672, 422, 672,
2773 422, 672, 422, 671, 672, 672, 672, 422,
2774 672, 422, 672, 671, 685, 427, 763, 676,
2775 685, 427, 672, 671, 422, 764, 676, 765,
2776 685, 427, 672, 671, 422, 672, 422, 766,
2777 685, 673, 671, 427, 672, 671, 422, 676,
2778 767, 673, 685, 673, 671, 672, 671, 422,
2779 768, 769, 768, 770, 771, 768, 772, 768,
2780 773, 768, 771, 774, 775, 774, 777, 776,
2781 778, 779, 778, 780, 781, 776, 782, 776,
2782 783, 778, 784, 779, 785, 780, 787, 786,
2783 788, 789, 789, 786, 790, 786, 791, 788,
2784 792, 789, 793, 789, 795, 795, 795, 795,
2785 794, 795, 795, 795, 794, 795, 794, 795,
2786 795, 794, 794, 794, 794, 794, 794, 795,
2787 794, 794, 794, 794, 795, 795, 795, 795,
2788 795, 794, 794, 795, 794, 794, 795, 794,
2789 795, 794, 794, 795, 794, 794, 794, 795,
2790 795, 795, 795, 795, 795, 794, 795, 795,
2791 794, 795, 795, 794, 794, 794, 794, 794,
2792 794, 795, 795, 794, 794, 795, 794, 795,
2793 795, 795, 794, 797, 798, 799, 800, 801,
2794 802, 803, 804, 805, 806, 807, 808, 809,
2795 810, 811, 812, 813, 814, 815, 816, 817,
2796 818, 819, 820, 821, 822, 823, 824, 825,
2797 826, 827, 828, 794, 795, 794, 795, 794,
2798 795, 795, 794, 795, 795, 794, 794, 794,
2799 795, 794, 794, 794, 794, 794, 794, 794,
2800 795, 794, 794, 794, 794, 794, 794, 794,
2801 795, 795, 795, 795, 795, 795, 795, 795,
2802 795, 795, 795, 794, 794, 794, 794, 794,
2803 794, 794, 794, 795, 795, 795, 795, 795,
2804 795, 795, 795, 795, 794, 794, 794, 794,
2805 794, 794, 794, 794, 795, 795, 795, 795,
2806 795, 795, 795, 795, 795, 794, 795, 795,
2807 795, 795, 795, 795, 795, 795, 794, 795,
2808 795, 795, 795, 795, 795, 795, 795, 795,
2809 795, 795, 794, 795, 795, 795, 795, 795,
2810 795, 794, 795, 795, 795, 795, 795, 795,
2811 794, 794, 794, 794, 794, 794, 794, 794,
2812 795, 795, 795, 795, 795, 795, 795, 795,
2813 794, 795, 795, 795, 795, 795, 795, 795,
2814 795, 794, 795, 795, 795, 795, 795, 794,
2815 794, 794, 794, 794, 794, 794, 794, 795,
2816 795, 795, 795, 795, 795, 794, 795, 795,
2817 795, 795, 795, 795, 795, 794, 795, 794,
2818 795, 795, 794, 795, 795, 795, 795, 795,
2819 795, 795, 795, 795, 795, 795, 795, 795,
2820 794, 795, 795, 795, 795, 795, 794, 795,
2821 795, 795, 795, 795, 795, 795, 794, 795,
2822 795, 795, 794, 795, 795, 795, 794, 795,
2823 794, 829, 830, 831, 832, 833, 834, 835,
2824 836, 837, 838, 839, 840, 841, 842, 843,
2825 844, 845, 846, 847, 848, 849, 850, 851,
2826 852, 853, 854, 855, 856, 857, 858, 859,
2827 860, 861, 862, 863, 864, 801, 865, 866,
2828 867, 868, 869, 870, 801, 846, 801, 794,
2829 795, 794, 795, 795, 794, 794, 795, 794,
2830 794, 794, 794, 795, 794, 794, 794, 794,
2831 794, 795, 794, 794, 794, 794, 794, 795,
2832 795, 795, 795, 795, 794, 794, 794, 795,
2833 794, 794, 794, 795, 795, 795, 794, 794,
2834 794, 795, 795, 794, 794, 794, 795, 795,
2835 795, 794, 794, 794, 795, 795, 795, 795,
2836 794, 795, 795, 795, 795, 794, 794, 794,
2837 794, 794, 795, 795, 795, 795, 794, 794,
2838 795, 795, 795, 794, 794, 795, 795, 795,
2839 795, 794, 795, 795, 794, 795, 795, 794,
2840 794, 794, 795, 795, 795, 794, 794, 794,
2841 794, 795, 795, 795, 795, 795, 794, 794,
2842 794, 794, 795, 794, 795, 795, 794, 795,
2843 795, 794, 795, 794, 795, 795, 795, 794,
2844 795, 795, 794, 794, 794, 795, 794, 794,
2845 794, 794, 794, 794, 794, 795, 795, 795,
2846 795, 794, 795, 795, 795, 795, 795, 795,
2847 795, 794, 871, 872, 873, 874, 875, 876,
2848 877, 878, 879, 801, 880, 881, 882, 883,
2849 884, 794, 795, 794, 794, 794, 794, 794,
2850 795, 795, 794, 795, 795, 795, 794, 795,
2851 795, 795, 795, 795, 795, 795, 795, 795,
2852 795, 794, 795, 795, 795, 794, 794, 795,
2853 795, 795, 794, 794, 795, 794, 794, 795,
2854 795, 795, 795, 795, 794, 794, 794, 794,
2855 795, 795, 795, 795, 795, 795, 794, 795,
2856 795, 795, 795, 795, 794, 885, 840, 886,
2857 887, 888, 801, 889, 890, 846, 801, 794,
2858 795, 795, 795, 795, 794, 794, 794, 795,
2859 794, 794, 795, 795, 795, 794, 794, 794,
2860 795, 795, 794, 851, 794, 846, 801, 801,
2861 891, 794, 801, 794, 795, 846, 892, 893,
2862 846, 894, 895, 846, 896, 897, 898, 899,
2863 900, 901, 846, 902, 903, 904, 846, 905,
2864 906, 907, 865, 908, 909, 910, 865, 911,
2865 846, 801, 794, 794, 795, 795, 794, 794,
2866 794, 795, 795, 795, 795, 794, 795, 795,
2867 794, 794, 794, 794, 795, 795, 794, 794,
2868 795, 795, 794, 794, 794, 794, 794, 794,
2869 795, 795, 795, 794, 794, 794, 795, 794,
2870 794, 794, 795, 795, 794, 795, 795, 795,
2871 795, 794, 795, 795, 795, 795, 794, 795,
2872 795, 795, 795, 795, 795, 794, 794, 794,
2873 795, 795, 795, 795, 794, 912, 913, 794,
2874 801, 794, 795, 794, 794, 795, 846, 914,
2875 915, 916, 917, 896, 918, 919, 920, 921,
2876 922, 923, 924, 925, 926, 927, 928, 929,
2877 801, 794, 794, 795, 794, 795, 795, 795,
2878 795, 795, 795, 795, 794, 795, 795, 795,
2879 794, 795, 794, 794, 795, 794, 795, 794,
2880 794, 795, 795, 795, 795, 794, 795, 795,
2881 795, 794, 794, 795, 795, 795, 795, 794,
2882 795, 795, 794, 794, 795, 795, 795, 795,
2883 795, 794, 930, 931, 932, 933, 934, 935,
2884 936, 937, 938, 939, 940, 936, 942, 943,
2885 944, 945, 941, 794, 946, 947, 846, 948,
2886 949, 950, 951, 952, 953, 954, 955, 956,
2887 846, 801, 957, 958, 959, 960, 846, 961,
2888 962, 963, 964, 965, 966, 967, 968, 969,
2889 970, 971, 972, 973, 974, 975, 846, 877,
2890 801, 976, 794, 795, 795, 795, 795, 795,
2891 794, 794, 794, 795, 794, 795, 795, 794,
2892 795, 794, 795, 795, 794, 794, 794, 795,
2893 795, 795, 794, 794, 794, 795, 795, 795,
2894 794, 794, 794, 794, 795, 794, 794, 795,
2895 794, 794, 795, 795, 795, 794, 794, 795,
2896 794, 795, 795, 795, 794, 795, 795, 795,
2897 795, 795, 795, 794, 794, 794, 795, 795,
2898 794, 795, 795, 794, 795, 795, 794, 795,
2899 795, 794, 795, 795, 795, 795, 795, 795,
2900 795, 794, 795, 794, 795, 794, 795, 795,
2901 794, 795, 794, 795, 795, 794, 795, 794,
2902 795, 794, 977, 948, 978, 979, 980, 981,
2903 982, 983, 984, 985, 986, 829, 987, 846,
2904 988, 989, 990, 846, 991, 861, 992, 993,
2905 994, 995, 996, 997, 998, 999, 846, 794,
2906 794, 794, 795, 795, 795, 794, 795, 795,
2907 794, 795, 795, 794, 794, 794, 794, 794,
2908 795, 795, 795, 795, 794, 795, 795, 795,
2909 795, 795, 795, 794, 794, 794, 795, 795,
2910 795, 795, 795, 795, 795, 795, 795, 794,
2911 795, 795, 795, 795, 795, 795, 795, 795,
2912 794, 795, 795, 794, 794, 794, 794, 795,
2913 795, 795, 794, 794, 794, 795, 794, 794,
2914 794, 795, 795, 794, 795, 795, 795, 794,
2915 795, 794, 794, 794, 795, 795, 794, 795,
2916 795, 795, 794, 795, 795, 795, 794, 794,
2917 794, 794, 795, 846, 915, 1000, 1001, 801,
2918 846, 801, 794, 794, 795, 794, 795, 846,
2919 1000, 801, 794, 846, 1002, 801, 794, 794,
2920 795, 846, 1003, 1004, 1005, 906, 1006, 1007,
2921 846, 1008, 1009, 1010, 801, 794, 794, 795,
2922 795, 795, 794, 795, 795, 794, 795, 795,
2923 795, 795, 794, 794, 795, 794, 794, 795,
2924 795, 794, 795, 794, 846, 801, 794, 1011,
2925 846, 1012, 794, 801, 794, 795, 794, 795,
2926 1013, 846, 1014, 1015, 794, 795, 794, 794,
2927 794, 795, 795, 795, 795, 794, 1016, 1017,
2928 1018, 846, 1019, 1020, 1021, 1022, 1023, 1024,
2929 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032,
2930 801, 794, 795, 795, 795, 794, 794, 794,
2931 794, 795, 795, 794, 794, 795, 794, 794,
2932 794, 794, 794, 794, 794, 795, 794, 795,
2933 794, 794, 794, 794, 794, 794, 795, 795,
2934 795, 795, 795, 794, 794, 795, 794, 794,
2935 794, 795, 794, 794, 795, 794, 794, 795,
2936 794, 794, 795, 794, 794, 794, 795, 795,
2937 795, 794, 794, 794, 795, 795, 795, 795,
2938 794, 1033, 846, 1034, 846, 1035, 1036, 1037,
2939 1038, 801, 794, 795, 795, 795, 795, 795,
2940 794, 794, 794, 795, 794, 794, 795, 795,
2941 795, 795, 795, 795, 795, 795, 795, 795,
2942 794, 795, 795, 795, 795, 795, 795, 795,
2943 795, 795, 795, 795, 795, 795, 795, 795,
2944 795, 795, 795, 795, 794, 795, 795, 795,
2945 795, 795, 794, 1039, 846, 801, 794, 795,
2946 1040, 846, 831, 801, 794, 795, 1041, 794,
2947 801, 794, 795, 846, 1042, 801, 794, 794,
2948 795, 1043, 794, 846, 1044, 801, 794, 794,
2949 795, 1046, 1045, 795, 795, 795, 795, 1046,
2950 1045, 795, 1046, 1045, 1046, 1046, 795, 1046,
2951 1045, 795, 1046, 795, 1046, 1045, 795, 1046,
2952 795, 1046, 795, 1045, 1046, 1046, 1046, 1046,
2953 1046, 1046, 1046, 1046, 1045, 795, 795, 1046,
2954 1046, 795, 1046, 795, 1046, 1045, 1046, 1046,
2955 1046, 1046, 1046, 795, 1046, 795, 1046, 795,
2956 1046, 1045, 1046, 1046, 795, 1046, 795, 1046,
2957 1045, 1046, 1046, 1046, 1046, 1046, 795, 1046,
2958 795, 1046, 1045, 795, 795, 1046, 795, 1046,
2959 1045, 1046, 1046, 1046, 795, 1046, 795, 1046,
2960 795, 1046, 795, 1046, 1045, 1046, 795, 1046,
2961 795, 1046, 1045, 795, 1046, 1046, 1046, 1046,
2962 795, 1046, 795, 1046, 795, 1046, 795, 1046,
2963 795, 1046, 795, 1046, 1045, 795, 1046, 1045,
2964 1046, 1046, 1046, 795, 1046, 795, 1046, 1045,
2965 1046, 795, 1046, 795, 1046, 1045, 795, 1046,
2966 1046, 1046, 1046, 795, 1046, 795, 1046, 1045,
2967 795, 1046, 795, 1046, 795, 1046, 1045, 1046,
2968 1046, 795, 1046, 795, 1046, 1045, 795, 1046,
2969 795, 1046, 795, 1046, 795, 1045, 1046, 1046,
2970 1046, 795, 1046, 795, 1046, 1045, 795, 1046,
2971 1045, 1046, 1046, 795, 1046, 1045, 1046, 1046,
2972 1046, 795, 1046, 1046, 1046, 1046, 1046, 1046,
2973 795, 795, 1046, 795, 1046, 795, 1046, 795,
2974 1046, 1045, 1046, 795, 1046, 795, 1046, 1045,
2975 795, 1046, 1045, 1046, 795, 1046, 1045, 1046,
2976 795, 1046, 1045, 795, 795, 1046, 1045, 795,
2977 1046, 795, 1046, 795, 1046, 795, 1046, 795,
2978 1046, 795, 1045, 1046, 1046, 795, 1046, 1046,
2979 1046, 1046, 795, 795, 1046, 1046, 1046, 1046,
2980 1046, 795, 1046, 1046, 1046, 1046, 1046, 1045,
2981 795, 1046, 1046, 795, 1046, 795, 1045, 1046,
2982 1046, 795, 1046, 1045, 795, 795, 1046, 795,
2983 1045, 1046, 1046, 1045, 795, 1046, 795, 1045,
2984 1046, 1045, 795, 1046, 795, 1046, 795, 1045,
2985 1046, 1046, 1045, 795, 1046, 795, 1046, 795,
2986 1046, 1045, 1046, 795, 1046, 795, 1046, 1045,
2987 795, 1046, 1045, 795, 795, 1046, 1045, 1046,
2988 795, 1045, 1046, 1045, 795, 1046, 795, 1046,
2989 795, 1045, 1046, 1045, 795, 795, 1046, 1045,
2990 1046, 795, 1046, 795, 1046, 1045, 795, 1046,
2991 795, 1045, 1046, 1045, 795, 795, 1046, 795,
2992 1045, 1046, 1045, 795, 795, 1046, 1045, 1046,
2993 795, 1046, 1045, 1046, 795, 1046, 1045, 1046,
2994 795, 1046, 795, 1046, 795, 1045, 1046, 1045,
2995 795, 795, 1046, 1045, 1046, 795, 1046, 795,
2996 1046, 1045, 795, 1046, 1045, 1046, 1046, 795,
2997 1046, 795, 1046, 1045, 1045, 795, 1045, 795,
2998 1046, 1046, 795, 1046, 1046, 1046, 1046, 1046,
2999 1046, 1046, 1045, 795, 1046, 1046, 1046, 795,
3000 1045, 1046, 1046, 1046, 795, 1046, 795, 1046,
3001 795, 1046, 795, 1046, 795, 1046, 1045, 795,
3002 795, 1046, 1045, 1046, 795, 1046, 1045, 795,
3003 795, 1046, 795, 795, 795, 1046, 795, 1046,
3004 795, 1046, 795, 1046, 795, 1045, 795, 1046,
3005 795, 1046, 795, 1045, 1046, 1045, 795, 1046,
3006 795, 1045, 1046, 795, 1046, 1046, 1046, 1045,
3007 795, 1046, 795, 795, 1046, 795, 1045, 1046,
3008 1046, 1045, 795, 1046, 1046, 1046, 1046, 795,
3009 1046, 795, 1045, 1046, 1046, 1046, 795, 1046,
3010 1045, 1046, 795, 1046, 795, 1046, 795, 1046,
3011 795, 1046, 1045, 1046, 1046, 795, 1046, 1045,
3012 795, 1046, 795, 1046, 795, 1045, 1046, 1046,
3013 1045, 795, 1046, 795, 1045, 1046, 1045, 795,
3014 1046, 1045, 795, 1046, 795, 1046, 1045, 1046,
3015 1046, 1046, 1045, 795, 795, 795, 1046, 1045,
3016 795, 1046, 795, 1045, 1046, 1045, 795, 1046,
3017 795, 1046, 795, 1045, 1046, 1046, 1046, 1045,
3018 795, 1046, 795, 1045, 1046, 1046, 1046, 1046,
3019 1045, 795, 1046, 795, 1046, 1045, 795, 795,
3020 1046, 795, 1046, 1045, 1046, 795, 1046, 795,
3021 1045, 1046, 1046, 1045, 795, 1046, 795, 1046,
3022 1045, 795, 1046, 1046, 1046, 795, 1046, 795,
3023 1045, 795, 1046, 1045, 1046, 795, 795, 1046,
3024 795, 1046, 795, 1045, 1046, 1046, 1046, 1046,
3025 1045, 795, 1046, 795, 1046, 795, 1046, 795,
3026 1046, 795, 1046, 1045, 1046, 1046, 1046, 795,
3027 1046, 795, 1046, 795, 1046, 795, 1045, 1046,
3028 1046, 795, 795, 1046, 1045, 1046, 795, 1046,
3029 1046, 1045, 795, 1046, 795, 1046, 1045, 795,
3030 795, 1046, 1046, 1046, 1046, 795, 1046, 795,
3031 1046, 795, 1045, 1046, 1046, 795, 1045, 1046,
3032 1045, 795, 1046, 795, 1045, 1046, 1045, 795,
3033 1046, 795, 1045, 1046, 795, 1046, 1046, 1045,
3034 795, 1046, 1046, 795, 1045, 1046, 1045, 795,
3035 1046, 795, 1046, 1045, 1046, 795, 1046, 795,
3036 1045, 1046, 1045, 795, 1046, 795, 1046, 795,
3037 1046, 795, 1046, 795, 1046, 1045, 1047, 1045,
3038 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
3039 1056, 1057, 1058, 1050, 1059, 1060, 1061, 1062,
3040 1063, 1050, 1064, 1065, 1066, 1067, 1068, 1069,
3041 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077,
3042 1078, 1050, 1079, 1047, 1059, 1047, 1080, 1047,
3043 1045, 1046, 1046, 1046, 1046, 795, 1045, 1046,
3044 1046, 1045, 795, 1046, 1045, 795, 795, 1046,
3045 1045, 795, 1046, 795, 1045, 1046, 1045, 795,
3046 795, 1046, 795, 1045, 1046, 1046, 1045, 795,
3047 1046, 1046, 1046, 1045, 795, 1046, 795, 1046,
3048 1046, 1045, 795, 795, 1046, 795, 1045, 1046,
3049 1045, 795, 1046, 1045, 795, 795, 1046, 795,
3050 1046, 1045, 795, 1046, 795, 795, 1046, 795,
3051 1046, 795, 1045, 1046, 1046, 1045, 795, 1046,
3052 1046, 795, 1046, 1045, 795, 1046, 795, 1046,
3053 1045, 795, 1046, 795, 1045, 795, 1046, 1046,
3054 1046, 795, 1046, 1045, 1046, 795, 1046, 1045,
3055 795, 1046, 1045, 1046, 795, 1046, 1045, 795,
3056 1046, 1045, 795, 1046, 795, 1046, 1045, 795,
3057 1046, 1045, 795, 1046, 1045, 1081, 1082, 1083,
3058 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091,
3059 1092, 1052, 1093, 1094, 1095, 1096, 1097, 1094,
3060 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105,
3061 1106, 1047, 1045, 1046, 795, 1046, 1045, 1046,
3062 795, 1046, 1045, 1046, 795, 1046, 1045, 1046,
3063 795, 1046, 1045, 795, 1046, 795, 1046, 1045,
3064 1046, 795, 1046, 1045, 1046, 795, 795, 795,
3065 1046, 1045, 1046, 795, 1046, 1045, 1046, 1046,
3066 1046, 1046, 795, 1046, 795, 1045, 1046, 1045,
3067 795, 795, 1046, 795, 1046, 1045, 1046, 795,
3068 1046, 1045, 795, 1046, 1045, 1046, 1046, 795,
3069 1046, 1045, 795, 1046, 1045, 1046, 795, 1046,
3070 1045, 795, 1046, 1045, 795, 1046, 1045, 795,
3071 1046, 1045, 1046, 1045, 795, 795, 1046, 1045,
3072 1046, 795, 1046, 1045, 795, 1046, 795, 1045,
3073 1046, 1045, 795, 1050, 1107, 1047, 1050, 1108,
3074 1050, 1109, 1059, 1047, 1045, 1046, 1045, 795,
3075 1046, 1045, 795, 1050, 1108, 1059, 1047, 1045,
3076 1050, 1110, 1047, 1059, 1047, 1045, 1046, 1045,
3077 795, 1050, 1111, 1068, 1112, 1094, 1113, 1106,
3078 1050, 1114, 1115, 1116, 1047, 1059, 1047, 1045,
3079 1046, 1045, 795, 1046, 795, 1046, 1045, 795,
3080 1046, 795, 1046, 795, 1045, 1046, 1046, 1045,
3081 795, 1046, 795, 1046, 1045, 795, 1046, 1045,
3082 1050, 1059, 801, 1045, 1117, 1050, 1118, 1059,
3083 1047, 1045, 801, 1046, 1045, 795, 1046, 1045,
3084 795, 1119, 1050, 1120, 1121, 1047, 1045, 795,
3085 1046, 1045, 1046, 1046, 1045, 795, 795, 1046,
3086 795, 1046, 1045, 1050, 1122, 1123, 1124, 1125,
3087 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1047,
3088 1059, 1047, 1045, 1046, 795, 1046, 1046, 1046,
3089 1046, 1046, 1046, 1046, 795, 1046, 795, 1046,
3090 1046, 1046, 1046, 1046, 1046, 1045, 795, 1046,
3091 1046, 795, 1046, 795, 1045, 1046, 795, 1046,
3092 1046, 1046, 795, 1046, 1046, 795, 1046, 1046,
3093 795, 1046, 1046, 795, 1046, 1046, 1045, 795,
3094 1050, 1133, 1050, 1109, 1134, 1135, 1136, 1047,
3095 1059, 1047, 1045, 1046, 1045, 795, 1046, 1046,
3096 1046, 795, 1046, 1046, 1046, 795, 1046, 795,
3097 1046, 1045, 795, 795, 795, 795, 1046, 1046,
3098 795, 795, 795, 795, 795, 1046, 1046, 1046,
3099 1046, 1046, 1046, 1046, 795, 1046, 795, 1046,
3100 795, 1045, 1046, 1046, 1046, 795, 1046, 795,
3101 1046, 1045, 1059, 801, 1137, 1050, 1059, 801,
3102 1046, 1045, 795, 1138, 1050, 1139, 1059, 801,
3103 1046, 1045, 795, 1046, 795, 1140, 1059, 1047,
3104 1045, 801, 1046, 1045, 795, 1050, 1141, 1047,
3105 1059, 1047, 1045, 1046, 1045, 795, 1142, 1143,
3106 1144, 1142, 1145, 1146, 1147, 1148, 1149, 1150,
3107 1151, 1152, 1153, 1154, 672, 672, 422, 1155,
3108 1156, 1157, 1158, 672, 1161, 1162, 1164, 1165,
3109 1166, 1160, 1167, 1168, 1169, 1170, 1171, 1172,
3110 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180,
3111 1181, 1182, 1183, 1184, 1185, 1186, 1188, 1189,
3112 1190, 1191, 1192, 1193, 672, 1148, 10, 1148,
3113 422, 1148, 422, 1160, 1163, 1187, 1194, 1159,
3114 1142, 1142, 1195, 1143, 1196, 1198, 1197, 2,
3115 1, 1199, 1197, 1200, 1197, 5, 1, 1197,
3116 6, 5, 9, 11, 11, 10, 1202, 1203,
3117 1204, 1197, 1205, 1206, 1197, 1207, 1197, 422,
3118 422, 1209, 1210, 491, 472, 1211, 472, 1212,
3119 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220,
3120 1221, 1222, 546, 1223, 522, 1224, 1225, 1226,
3121 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234,
3122 1235, 422, 422, 422, 427, 567, 1208, 1236,
3123 1197, 1237, 1197, 672, 1238, 422, 422, 422,
3124 672, 1238, 672, 672, 422, 1238, 422, 1238,
3125 422, 1238, 422, 672, 672, 672, 672, 672,
3126 1238, 422, 672, 672, 672, 422, 672, 422,
3127 1238, 422, 672, 672, 672, 672, 422, 1238,
3128 672, 422, 672, 422, 672, 422, 672, 672,
3129 422, 672, 1238, 422, 672, 422, 672, 422,
3130 672, 1238, 672, 422, 1238, 672, 422, 672,
3131 422, 1238, 672, 672, 672, 672, 672, 1238,
3132 422, 422, 672, 422, 672, 1238, 672, 422,
3133 1238, 672, 672, 1238, 422, 422, 672, 422,
3134 672, 422, 672, 1238, 1239, 1240, 1241, 1242,
3135 1243, 1244, 1245, 1246, 1247, 1248, 1249, 717,
3136 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257,
3137 1258, 1259, 1260, 1261, 1260, 1262, 1263, 1264,
3138 1265, 1266, 673, 1238, 1267, 1268, 1269, 1270,
3139 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278,
3140 1279, 1280, 1281, 1282, 1283, 1284, 1285, 727,
3141 1286, 1287, 1288, 694, 1289, 1290, 1291, 1292,
3142 1293, 1294, 673, 1295, 1296, 1297, 1298, 1299,
3143 1300, 1301, 1302, 676, 1303, 673, 676, 1304,
3144 1305, 1306, 1307, 685, 1238, 1308, 1309, 1310,
3145 1311, 705, 1312, 1313, 685, 1314, 1315, 1316,
3146 1317, 1318, 673, 1238, 1319, 1278, 1320, 1321,
3147 1322, 685, 1323, 1324, 676, 673, 685, 427,
3148 1238, 1288, 673, 676, 685, 427, 685, 427,
3149 1325, 685, 1238, 427, 676, 1326, 1327, 676,
3150 1328, 1329, 683, 1330, 1331, 1332, 1333, 1334,
3151 1284, 1335, 1336, 1337, 1338, 1339, 1340, 1341,
3152 1342, 1343, 1344, 1345, 1346, 1303, 1347, 676,
3153 685, 427, 1238, 1348, 1349, 685, 673, 1238,
3154 427, 673, 1238, 676, 1350, 733, 1351, 1352,
3155 1353, 1354, 1355, 1356, 1357, 1358, 673, 1359,
3156 1360, 1361, 1362, 1363, 1364, 673, 685, 1238,
3157 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373,
3158 1374, 1375, 1376, 1372, 1378, 1379, 1380, 1381,
3159 1365, 1377, 1365, 1238, 1365, 1238, 1382, 1382,
3160 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390,
3161 1387, 771, 1391, 1391, 1391, 1392, 1393, 1386,
3162 1391, 772, 773, 1394, 1391, 771, 1395, 1395,
3163 1395, 1397, 1398, 1399, 1395, 1400, 1401, 1402,
3164 1395, 1396, 1403, 1403, 1403, 1405, 1406, 1407,
3165 1403, 1408, 1409, 1410, 1403, 1404, 1391, 1391,
3166 1411, 1412, 1386, 1391, 772, 773, 1394, 1391,
3167 771, 1413, 1414, 1415, 771, 1416, 1417, 1418,
3168 769, 769, 769, 769, 1420, 1421, 1422, 1396,
3169 769, 1423, 1424, 1425, 769, 1419, 770, 770,
3170 770, 1427, 1428, 1429, 1396, 770, 1430, 1431,
3171 1432, 770, 1426, 769, 769, 769, 1434, 1435,
3172 1436, 1404, 769, 1437, 1438, 1439, 769, 1433,
3173 1395, 1395, 771, 1440, 1441, 1399, 1395, 1400,
3174 1401, 1402, 1395, 1396, 1442, 1443, 1444, 771,
3175 1445, 1446, 1447, 770, 770, 770, 770, 1449,
3176 1450, 1451, 1404, 770, 1452, 1453, 1454, 770,
3177 1448, 1403, 1403, 771, 1455, 1456, 1407, 1403,
3178 1408, 1409, 1410, 1403, 1404, 1403, 1403, 1403,
3179 1405, 1406, 1407, 771, 1408, 1409, 1410, 1403,
3180 1404, 1403, 1403, 1403, 1405, 1406, 1407, 772,
3181 1408, 1409, 1410, 1403, 1404, 1403, 1403, 1403,
3182 1405, 1406, 1407, 773, 1408, 1409, 1410, 1403,
3183 1404, 1395, 1395, 1395, 1397, 1398, 1399, 771,
3184 1400, 1401, 1402, 1395, 1396, 1395, 1395, 1395,
3185 1397, 1398, 1399, 772, 1400, 1401, 1402, 1395,
3186 1396, 1395, 1395, 1395, 1397, 1398, 1399, 773,
3187 1400, 1401, 1402, 1395, 1396, 1458, 769, 1460,
3188 1459, 1461, 770, 1463, 1462, 771, 1464, 775,
3189 1464, 1465, 1464, 777, 1466, 1467, 1468, 1469,
3190 1470, 1471, 1472, 1469, 781, 777, 1466, 1474,
3191 1475, 1473, 782, 783, 1476, 1473, 781, 1479,
3192 1480, 1481, 1482, 1477, 1483, 1484, 1485, 1477,
3193 1478, 1488, 1489, 1490, 1491, 1486, 1492, 1493,
3194 1494, 1486, 1487, 1496, 1495, 1498, 1497, 781,
3195 1499, 782, 1499, 783, 1499, 787, 1500, 1501,
3196 1502, 1503, 1504, 1505, 1506, 1503, 789, 787,
3197 1500, 1508, 1507, 790, 791, 1509, 1507, 789,
3198 1511, 1510, 1513, 1512, 789, 1514, 790, 1514,
3199 791, 1514, 795, 1517, 1518, 1520, 1521, 1522,
3200 1516, 1523, 1524, 1525, 1526, 1527, 1528, 1529,
3201 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537,
3202 1538, 1539, 1540, 1541, 1542, 1544, 1545, 1546,
3203 1547, 1548, 1549, 795, 795, 1515, 1516, 1519,
3204 1543, 1550, 1515, 1046, 795, 795, 1552, 1553,
3205 865, 846, 1554, 846, 1555, 1556, 1557, 1558,
3206 1559, 1560, 1561, 1562, 1563, 1564, 1565, 920,
3207 1566, 896, 1567, 1568, 1569, 1570, 1571, 1572,
3208 1573, 1574, 1575, 1576, 1577, 1578, 795, 795,
3209 795, 801, 941, 1551, 1046, 1579, 795, 795,
3210 795, 1046, 1579, 1046, 1046, 795, 1579, 795,
3211 1579, 795, 1579, 795, 1046, 1046, 1046, 1046,
3212 1046, 1579, 795, 1046, 1046, 1046, 795, 1046,
3213 795, 1579, 795, 1046, 1046, 1046, 1046, 795,
3214 1579, 1046, 795, 1046, 795, 1046, 795, 1046,
3215 1046, 795, 1046, 1579, 795, 1046, 795, 1046,
3216 795, 1046, 1579, 1046, 795, 1579, 1046, 795,
3217 1046, 795, 1579, 1046, 1046, 1046, 1046, 1046,
3218 1579, 795, 795, 1046, 795, 1046, 1579, 1046,
3219 795, 1579, 1046, 1046, 1579, 795, 795, 1046,
3220 795, 1046, 795, 1046, 1579, 1580, 1581, 1582,
3221 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590,
3222 1091, 1591, 1592, 1593, 1594, 1595, 1596, 1597,
3223 1598, 1599, 1600, 1601, 1602, 1601, 1603, 1604,
3224 1605, 1606, 1607, 1047, 1579, 1608, 1609, 1610,
3225 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618,
3226 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626,
3227 1101, 1627, 1628, 1629, 1068, 1630, 1631, 1632,
3228 1633, 1634, 1635, 1047, 1636, 1637, 1638, 1639,
3229 1640, 1641, 1642, 1643, 1050, 1644, 1047, 1050,
3230 1645, 1646, 1647, 1648, 1059, 1579, 1649, 1650,
3231 1651, 1652, 1079, 1653, 1654, 1059, 1655, 1656,
3232 1657, 1658, 1659, 1047, 1579, 1660, 1619, 1661,
3233 1662, 1663, 1059, 1664, 1665, 1050, 1047, 1059,
3234 801, 1579, 1629, 1047, 1050, 1059, 801, 1059,
3235 801, 1666, 1059, 1579, 801, 1050, 1667, 1668,
3236 1050, 1669, 1670, 1057, 1671, 1672, 1673, 1674,
3237 1675, 1625, 1676, 1677, 1678, 1679, 1680, 1681,
3238 1682, 1683, 1684, 1685, 1686, 1687, 1644, 1688,
3239 1050, 1059, 801, 1579, 1689, 1690, 1059, 1047,
3240 1579, 801, 1047, 1579, 1050, 1691, 1107, 1692,
3241 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1047,
3242 1700, 1701, 1702, 1703, 1704, 1705, 1047, 1059,
3243 1579, 1707, 1708, 1709, 1710, 1711, 1712, 1713,
3244 1714, 1715, 1716, 1717, 1713, 1719, 1720, 1721,
3245 1722, 1706, 1718, 1706, 1579, 1706, 1579,
3246}
3247
3248var _hcltok_trans_targs []int16 = []int16{
3249 1464, 1, 1464, 1464, 1464, 3, 4, 1472,
3250 1464, 5, 1473, 6, 7, 9, 10, 287,
3251 13, 14, 15, 16, 17, 288, 289, 20,
3252 290, 22, 23, 291, 292, 293, 294, 295,
3253 296, 297, 298, 299, 300, 329, 349, 354,
3254 128, 129, 130, 357, 152, 372, 376, 1464,
3255 11, 12, 18, 19, 21, 24, 25, 26,
3256 27, 28, 29, 30, 31, 32, 33, 65,
3257 106, 121, 132, 155, 171, 284, 34, 35,
3258 36, 37, 38, 39, 40, 41, 42, 43,
3259 44, 45, 46, 47, 48, 49, 50, 51,
3260 52, 53, 54, 55, 56, 57, 58, 59,
3261 60, 61, 62, 63, 64, 66, 67, 68,
3262 69, 70, 71, 72, 73, 74, 75, 76,
3263 77, 78, 79, 80, 81, 82, 83, 84,
3264 85, 86, 87, 88, 89, 90, 91, 92,
3265 93, 94, 95, 96, 97, 98, 99, 100,
3266 101, 102, 103, 104, 105, 107, 108, 109,
3267 110, 111, 112, 113, 114, 115, 116, 117,
3268 118, 119, 120, 122, 123, 124, 125, 126,
3269 127, 131, 133, 134, 135, 136, 137, 138,
3270 139, 140, 141, 142, 143, 144, 145, 146,
3271 147, 148, 149, 150, 151, 153, 154, 156,
3272 157, 158, 159, 160, 161, 162, 163, 164,
3273 165, 166, 167, 168, 169, 170, 172, 204,
3274 228, 231, 232, 234, 243, 244, 247, 251,
3275 269, 276, 278, 280, 282, 173, 174, 175,
3276 176, 177, 178, 179, 180, 181, 182, 183,
3277 184, 185, 186, 187, 188, 189, 190, 191,
3278 192, 193, 194, 195, 196, 197, 198, 199,
3279 200, 201, 202, 203, 205, 206, 207, 208,
3280 209, 210, 211, 212, 213, 214, 215, 216,
3281 217, 218, 219, 220, 221, 222, 223, 224,
3282 225, 226, 227, 229, 230, 233, 235, 236,
3283 237, 238, 239, 240, 241, 242, 245, 246,
3284 248, 249, 250, 252, 253, 254, 255, 256,
3285 257, 258, 259, 260, 261, 262, 263, 264,
3286 265, 266, 267, 268, 270, 271, 272, 273,
3287 274, 275, 277, 279, 281, 283, 285, 286,
3288 301, 302, 303, 304, 305, 306, 307, 308,
3289 309, 310, 311, 312, 313, 314, 315, 316,
3290 317, 318, 319, 320, 321, 322, 323, 324,
3291 325, 326, 327, 328, 330, 331, 332, 333,
3292 334, 335, 336, 337, 338, 339, 340, 341,
3293 342, 343, 344, 345, 346, 347, 348, 350,
3294 351, 352, 353, 355, 356, 358, 359, 360,
3295 361, 362, 363, 364, 365, 366, 367, 368,
3296 369, 370, 371, 373, 374, 375, 377, 383,
3297 405, 410, 412, 414, 378, 379, 380, 381,
3298 382, 384, 385, 386, 387, 388, 389, 390,
3299 391, 392, 393, 394, 395, 396, 397, 398,
3300 399, 400, 401, 402, 403, 404, 406, 407,
3301 408, 409, 411, 413, 415, 1464, 1477, 438,
3302 439, 440, 441, 418, 442, 443, 444, 445,
3303 446, 447, 448, 449, 450, 451, 452, 453,
3304 454, 455, 456, 457, 458, 459, 460, 461,
3305 462, 463, 464, 465, 466, 467, 468, 470,
3306 471, 472, 473, 474, 475, 476, 477, 478,
3307 479, 480, 481, 482, 483, 484, 485, 486,
3308 420, 487, 488, 489, 490, 491, 492, 493,
3309 494, 495, 496, 497, 498, 499, 500, 501,
3310 502, 503, 504, 419, 505, 506, 507, 508,
3311 509, 511, 512, 513, 514, 515, 516, 517,
3312 518, 519, 520, 521, 522, 523, 524, 526,
3313 527, 528, 529, 530, 531, 535, 537, 538,
3314 539, 540, 435, 541, 542, 543, 544, 545,
3315 546, 547, 548, 549, 550, 551, 552, 553,
3316 554, 555, 557, 558, 560, 561, 562, 563,
3317 564, 565, 433, 566, 567, 568, 569, 570,
3318 571, 572, 573, 574, 576, 608, 632, 635,
3319 636, 638, 647, 648, 651, 655, 673, 533,
3320 680, 682, 684, 686, 577, 578, 579, 580,
3321 581, 582, 583, 584, 585, 586, 587, 588,
3322 589, 590, 591, 592, 593, 594, 595, 596,
3323 597, 598, 599, 600, 601, 602, 603, 604,
3324 605, 606, 607, 609, 610, 611, 612, 613,
3325 614, 615, 616, 617, 618, 619, 620, 621,
3326 622, 623, 624, 625, 626, 627, 628, 629,
3327 630, 631, 633, 634, 637, 639, 640, 641,
3328 642, 643, 644, 645, 646, 649, 650, 652,
3329 653, 654, 656, 657, 658, 659, 660, 661,
3330 662, 663, 664, 665, 666, 667, 668, 669,
3331 670, 671, 672, 674, 675, 676, 677, 678,
3332 679, 681, 683, 685, 687, 689, 690, 1464,
3333 1464, 691, 828, 829, 760, 830, 831, 832,
3334 833, 834, 835, 789, 836, 725, 837, 838,
3335 839, 840, 841, 842, 843, 844, 745, 845,
3336 846, 847, 848, 849, 850, 851, 852, 853,
3337 854, 770, 855, 857, 858, 859, 860, 861,
3338 862, 863, 864, 865, 866, 703, 867, 868,
3339 869, 870, 871, 872, 873, 874, 875, 741,
3340 876, 877, 878, 879, 880, 811, 882, 883,
3341 886, 888, 889, 890, 891, 892, 893, 896,
3342 897, 899, 900, 901, 903, 904, 905, 906,
3343 907, 908, 909, 910, 911, 912, 913, 915,
3344 916, 917, 918, 921, 923, 924, 926, 928,
3345 1515, 1517, 1518, 1516, 931, 932, 1515, 934,
3346 1541, 1541, 1541, 1543, 1544, 1542, 939, 940,
3347 1545, 1546, 1550, 1550, 1550, 1551, 946, 947,
3348 1552, 1553, 1557, 1558, 1557, 973, 974, 975,
3349 976, 953, 977, 978, 979, 980, 981, 982,
3350 983, 984, 985, 986, 987, 988, 989, 990,
3351 991, 992, 993, 994, 995, 996, 997, 998,
3352 999, 1000, 1001, 1002, 1003, 1005, 1006, 1007,
3353 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
3354 1016, 1017, 1018, 1019, 1020, 1021, 955, 1022,
3355 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030,
3356 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038,
3357 1039, 954, 1040, 1041, 1042, 1043, 1044, 1046,
3358 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054,
3359 1055, 1056, 1057, 1058, 1059, 1061, 1062, 1063,
3360 1064, 1065, 1066, 1070, 1072, 1073, 1074, 1075,
3361 970, 1076, 1077, 1078, 1079, 1080, 1081, 1082,
3362 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090,
3363 1092, 1093, 1095, 1096, 1097, 1098, 1099, 1100,
3364 968, 1101, 1102, 1103, 1104, 1105, 1106, 1107,
3365 1108, 1109, 1111, 1143, 1167, 1170, 1171, 1173,
3366 1182, 1183, 1186, 1190, 1208, 1068, 1215, 1217,
3367 1219, 1221, 1112, 1113, 1114, 1115, 1116, 1117,
3368 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125,
3369 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133,
3370 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141,
3371 1142, 1144, 1145, 1146, 1147, 1148, 1149, 1150,
3372 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158,
3373 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166,
3374 1168, 1169, 1172, 1174, 1175, 1176, 1177, 1178,
3375 1179, 1180, 1181, 1184, 1185, 1187, 1188, 1189,
3376 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198,
3377 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206,
3378 1207, 1209, 1210, 1211, 1212, 1213, 1214, 1216,
3379 1218, 1220, 1222, 1224, 1225, 1557, 1557, 1226,
3380 1363, 1364, 1295, 1365, 1366, 1367, 1368, 1369,
3381 1370, 1324, 1371, 1260, 1372, 1373, 1374, 1375,
3382 1376, 1377, 1378, 1379, 1280, 1380, 1381, 1382,
3383 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1305,
3384 1390, 1392, 1393, 1394, 1395, 1396, 1397, 1398,
3385 1399, 1400, 1401, 1238, 1402, 1403, 1404, 1405,
3386 1406, 1407, 1408, 1409, 1410, 1276, 1411, 1412,
3387 1413, 1414, 1415, 1346, 1417, 1418, 1421, 1423,
3388 1424, 1425, 1426, 1427, 1428, 1431, 1432, 1434,
3389 1435, 1436, 1438, 1439, 1440, 1441, 1442, 1443,
3390 1444, 1445, 1446, 1447, 1448, 1450, 1451, 1452,
3391 1453, 1456, 1458, 1459, 1461, 1463, 1465, 1464,
3392 1466, 1467, 1464, 1468, 1464, 1469, 1470, 1471,
3393 1474, 1475, 1476, 1464, 1478, 1464, 1479, 1464,
3394 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487,
3395 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495,
3396 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503,
3397 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511,
3398 1512, 1513, 1514, 1464, 1464, 1464, 1464, 1464,
3399 2, 1464, 1464, 8, 1464, 1464, 1464, 1464,
3400 1464, 416, 417, 421, 422, 423, 424, 425,
3401 426, 427, 428, 429, 430, 431, 432, 434,
3402 436, 437, 469, 510, 525, 532, 534, 536,
3403 556, 559, 575, 688, 1464, 1464, 1464, 692,
3404 693, 694, 695, 696, 697, 698, 699, 700,
3405 701, 702, 704, 705, 706, 707, 708, 709,
3406 710, 711, 712, 713, 714, 715, 716, 717,
3407 718, 719, 720, 721, 722, 723, 724, 726,
3408 727, 728, 729, 730, 731, 732, 733, 734,
3409 735, 736, 737, 738, 739, 740, 742, 743,
3410 744, 746, 747, 748, 749, 750, 751, 752,
3411 753, 754, 755, 756, 757, 758, 759, 761,
3412 762, 763, 764, 765, 766, 767, 768, 769,
3413 771, 772, 773, 774, 775, 776, 777, 778,
3414 779, 780, 781, 782, 783, 784, 785, 786,
3415 787, 788, 790, 791, 792, 793, 794, 795,
3416 796, 797, 798, 799, 800, 801, 802, 803,
3417 804, 805, 806, 807, 808, 809, 810, 812,
3418 813, 814, 815, 816, 817, 818, 819, 820,
3419 821, 822, 823, 824, 825, 826, 827, 856,
3420 881, 884, 885, 887, 894, 895, 898, 902,
3421 914, 919, 920, 922, 925, 927, 1515, 1515,
3422 1534, 1536, 1519, 1515, 1538, 1539, 1540, 1515,
3423 929, 930, 933, 1515, 1516, 929, 930, 1519,
3424 931, 932, 933, 1515, 1516, 929, 930, 1519,
3425 931, 932, 933, 1520, 1525, 1521, 1522, 1524,
3426 1531, 1532, 1533, 1517, 1521, 1522, 1524, 1531,
3427 1532, 1533, 1518, 1523, 1526, 1527, 1528, 1529,
3428 1530, 1517, 1521, 1522, 1524, 1531, 1532, 1533,
3429 1520, 1525, 1523, 1526, 1527, 1528, 1529, 1530,
3430 1518, 1523, 1526, 1527, 1528, 1529, 1530, 1520,
3431 1525, 1515, 1535, 1515, 1515, 1537, 1515, 1515,
3432 1515, 935, 936, 942, 943, 1541, 1547, 1548,
3433 1549, 1541, 937, 938, 941, 1541, 1542, 1541,
3434 936, 937, 938, 939, 940, 941, 1541, 1542,
3435 1541, 936, 937, 938, 939, 940, 941, 1541,
3436 1541, 1541, 1541, 1541, 944, 949, 950, 1550,
3437 1554, 1555, 1556, 1550, 945, 948, 1550, 1550,
3438 1550, 1550, 1550, 1557, 1559, 1560, 1561, 1562,
3439 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570,
3440 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578,
3441 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586,
3442 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1557,
3443 951, 952, 956, 957, 958, 959, 960, 961,
3444 962, 963, 964, 965, 966, 967, 969, 971,
3445 972, 1004, 1045, 1060, 1067, 1069, 1071, 1091,
3446 1094, 1110, 1223, 1557, 1227, 1228, 1229, 1230,
3447 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1239,
3448 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247,
3449 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255,
3450 1256, 1257, 1258, 1259, 1261, 1262, 1263, 1264,
3451 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272,
3452 1273, 1274, 1275, 1277, 1278, 1279, 1281, 1282,
3453 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290,
3454 1291, 1292, 1293, 1294, 1296, 1297, 1298, 1299,
3455 1300, 1301, 1302, 1303, 1304, 1306, 1307, 1308,
3456 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316,
3457 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1325,
3458 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333,
3459 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341,
3460 1342, 1343, 1344, 1345, 1347, 1348, 1349, 1350,
3461 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358,
3462 1359, 1360, 1361, 1362, 1391, 1416, 1419, 1420,
3463 1422, 1429, 1430, 1433, 1437, 1449, 1454, 1455,
3464 1457, 1460, 1462,
3465}
3466
3467var _hcltok_trans_actions []byte = []byte{
3468 151, 0, 93, 147, 109, 0, 0, 201,
3469 143, 0, 13, 0, 0, 0, 0, 0,
3470 0, 0, 0, 0, 0, 0, 0, 0,
3471 0, 0, 0, 0, 0, 0, 0, 0,
3472 0, 0, 0, 0, 0, 0, 0, 0,
3473 0, 0, 0, 0, 0, 0, 0, 123,
3474 0, 0, 0, 0, 0, 0, 0, 0,
3475 0, 0, 0, 0, 0, 0, 0, 0,
3476 0, 0, 0, 0, 0, 0, 0, 0,
3477 0, 0, 0, 0, 0, 0, 0, 0,
3478 0, 0, 0, 0, 0, 0, 0, 0,
3479 0, 0, 0, 0, 0, 0, 0, 0,
3480 0, 0, 0, 0, 0, 0, 0, 0,
3481 0, 0, 0, 0, 0, 0, 0, 0,
3482 0, 0, 0, 0, 0, 0, 0, 0,
3483 0, 0, 0, 0, 0, 0, 0, 0,
3484 0, 0, 0, 0, 0, 0, 0, 0,
3485 0, 0, 0, 0, 0, 0, 0, 0,
3486 0, 0, 0, 0, 0, 0, 0, 0,
3487 0, 0, 0, 0, 0, 0, 0, 0,
3488 0, 0, 0, 0, 0, 0, 0, 0,
3489 0, 0, 0, 0, 0, 0, 0, 0,
3490 0, 0, 0, 0, 0, 0, 0, 0,
3491 0, 0, 0, 0, 0, 0, 0, 0,
3492 0, 0, 0, 0, 0, 0, 0, 0,
3493 0, 0, 0, 0, 0, 0, 0, 0,
3494 0, 0, 0, 0, 0, 0, 0, 0,
3495 0, 0, 0, 0, 0, 0, 0, 0,
3496 0, 0, 0, 0, 0, 0, 0, 0,
3497 0, 0, 0, 0, 0, 0, 0, 0,
3498 0, 0, 0, 0, 0, 0, 0, 0,
3499 0, 0, 0, 0, 0, 0, 0, 0,
3500 0, 0, 0, 0, 0, 0, 0, 0,
3501 0, 0, 0, 0, 0, 0, 0, 0,
3502 0, 0, 0, 0, 0, 0, 0, 0,
3503 0, 0, 0, 0, 0, 0, 0, 0,
3504 0, 0, 0, 0, 0, 0, 0, 0,
3505 0, 0, 0, 0, 0, 0, 0, 0,
3506 0, 0, 0, 0, 0, 0, 0, 0,
3507 0, 0, 0, 0, 0, 0, 0, 0,
3508 0, 0, 0, 0, 0, 0, 0, 0,
3509 0, 0, 0, 0, 0, 0, 0, 0,
3510 0, 0, 0, 0, 0, 0, 0, 0,
3511 0, 0, 0, 0, 0, 0, 0, 0,
3512 0, 0, 0, 0, 0, 0, 0, 0,
3513 0, 0, 0, 0, 0, 0, 0, 0,
3514 0, 0, 0, 0, 0, 0, 0, 0,
3515 0, 0, 0, 0, 0, 0, 0, 0,
3516 0, 0, 0, 0, 0, 0, 0, 0,
3517 0, 0, 0, 0, 0, 0, 0, 0,
3518 0, 0, 0, 0, 0, 0, 0, 0,
3519 0, 0, 0, 0, 0, 0, 0, 0,
3520 0, 0, 0, 0, 0, 145, 198, 0,
3521 0, 0, 0, 0, 0, 0, 0, 0,
3522 0, 0, 0, 0, 0, 0, 0, 0,
3523 0, 0, 0, 0, 0, 0, 0, 0,
3524 0, 0, 0, 0, 0, 0, 0, 0,
3525 0, 0, 0, 0, 0, 0, 0, 0,
3526 0, 0, 0, 0, 0, 0, 0, 0,
3527 0, 0, 0, 0, 0, 0, 0, 0,
3528 0, 0, 0, 0, 0, 0, 0, 0,
3529 0, 0, 0, 0, 0, 0, 0, 0,
3530 0, 0, 0, 0, 0, 0, 0, 0,
3531 0, 0, 0, 0, 0, 0, 0, 0,
3532 0, 0, 0, 0, 0, 0, 0, 0,
3533 0, 0, 0, 0, 0, 0, 0, 0,
3534 0, 0, 0, 0, 0, 0, 0, 0,
3535 0, 0, 0, 0, 0, 0, 0, 0,
3536 0, 0, 0, 0, 0, 0, 0, 0,
3537 0, 0, 0, 0, 0, 0, 0, 0,
3538 0, 0, 0, 0, 0, 0, 0, 0,
3539 0, 0, 0, 0, 0, 0, 0, 0,
3540 0, 0, 0, 0, 0, 0, 0, 0,
3541 0, 0, 0, 0, 0, 0, 0, 0,
3542 0, 0, 0, 0, 0, 0, 0, 0,
3543 0, 0, 0, 0, 0, 0, 0, 0,
3544 0, 0, 0, 0, 0, 0, 0, 0,
3545 0, 0, 0, 0, 0, 0, 0, 0,
3546 0, 0, 0, 0, 0, 0, 0, 0,
3547 0, 0, 0, 0, 0, 0, 0, 0,
3548 0, 0, 0, 0, 0, 0, 0, 0,
3549 0, 0, 0, 0, 0, 0, 0, 0,
3550 0, 0, 0, 0, 0, 0, 0, 0,
3551 0, 0, 0, 0, 0, 0, 0, 149,
3552 127, 0, 0, 0, 0, 0, 0, 0,
3553 0, 0, 0, 0, 0, 0, 0, 0,
3554 0, 0, 0, 0, 0, 0, 0, 0,
3555 0, 0, 0, 0, 0, 0, 0, 0,
3556 0, 0, 0, 0, 0, 0, 0, 0,
3557 0, 0, 0, 0, 0, 0, 0, 0,
3558 0, 0, 0, 0, 0, 0, 0, 0,
3559 0, 0, 0, 0, 0, 0, 0, 0,
3560 0, 0, 0, 0, 0, 0, 0, 0,
3561 0, 0, 0, 0, 0, 0, 0, 0,
3562 0, 0, 0, 0, 0, 0, 0, 0,
3563 0, 0, 0, 0, 0, 0, 0, 0,
3564 35, 13, 13, 13, 0, 0, 37, 0,
3565 57, 43, 55, 180, 180, 180, 0, 0,
3566 0, 0, 77, 63, 75, 186, 0, 0,
3567 0, 0, 87, 192, 91, 0, 0, 0,
3568 0, 0, 0, 0, 0, 0, 0, 0,
3569 0, 0, 0, 0, 0, 0, 0, 0,
3570 0, 0, 0, 0, 0, 0, 0, 0,
3571 0, 0, 0, 0, 0, 0, 0, 0,
3572 0, 0, 0, 0, 0, 0, 0, 0,
3573 0, 0, 0, 0, 0, 0, 0, 0,
3574 0, 0, 0, 0, 0, 0, 0, 0,
3575 0, 0, 0, 0, 0, 0, 0, 0,
3576 0, 0, 0, 0, 0, 0, 0, 0,
3577 0, 0, 0, 0, 0, 0, 0, 0,
3578 0, 0, 0, 0, 0, 0, 0, 0,
3579 0, 0, 0, 0, 0, 0, 0, 0,
3580 0, 0, 0, 0, 0, 0, 0, 0,
3581 0, 0, 0, 0, 0, 0, 0, 0,
3582 0, 0, 0, 0, 0, 0, 0, 0,
3583 0, 0, 0, 0, 0, 0, 0, 0,
3584 0, 0, 0, 0, 0, 0, 0, 0,
3585 0, 0, 0, 0, 0, 0, 0, 0,
3586 0, 0, 0, 0, 0, 0, 0, 0,
3587 0, 0, 0, 0, 0, 0, 0, 0,
3588 0, 0, 0, 0, 0, 0, 0, 0,
3589 0, 0, 0, 0, 0, 0, 0, 0,
3590 0, 0, 0, 0, 0, 0, 0, 0,
3591 0, 0, 0, 0, 0, 0, 0, 0,
3592 0, 0, 0, 0, 0, 0, 0, 0,
3593 0, 0, 0, 0, 0, 0, 0, 0,
3594 0, 0, 0, 0, 0, 0, 0, 0,
3595 0, 0, 0, 0, 0, 0, 0, 0,
3596 0, 0, 0, 0, 0, 0, 0, 0,
3597 0, 0, 0, 0, 0, 0, 0, 0,
3598 0, 0, 0, 0, 0, 89, 81, 0,
3599 0, 0, 0, 0, 0, 0, 0, 0,
3600 0, 0, 0, 0, 0, 0, 0, 0,
3601 0, 0, 0, 0, 0, 0, 0, 0,
3602 0, 0, 0, 0, 0, 0, 0, 0,
3603 0, 0, 0, 0, 0, 0, 0, 0,
3604 0, 0, 0, 0, 0, 0, 0, 0,
3605 0, 0, 0, 0, 0, 0, 0, 0,
3606 0, 0, 0, 0, 0, 0, 0, 0,
3607 0, 0, 0, 0, 0, 0, 0, 0,
3608 0, 0, 0, 0, 0, 0, 0, 0,
3609 0, 0, 0, 0, 0, 0, 0, 0,
3610 0, 0, 0, 0, 0, 0, 0, 95,
3611 0, 0, 121, 210, 113, 0, 13, 204,
3612 13, 0, 0, 115, 0, 117, 0, 125,
3613 0, 0, 0, 0, 0, 0, 0, 0,
3614 0, 0, 0, 0, 0, 0, 0, 0,
3615 0, 0, 0, 0, 0, 0, 13, 13,
3616 13, 207, 207, 207, 207, 207, 207, 13,
3617 13, 207, 13, 129, 141, 137, 99, 105,
3618 0, 135, 131, 0, 103, 97, 111, 101,
3619 133, 0, 0, 0, 0, 0, 0, 0,
3620 0, 0, 0, 0, 0, 0, 0, 0,
3621 0, 0, 0, 0, 0, 0, 0, 0,
3622 0, 0, 0, 0, 107, 119, 139, 0,
3623 0, 0, 0, 0, 0, 0, 0, 0,
3624 0, 0, 0, 0, 0, 0, 0, 0,
3625 0, 0, 0, 0, 0, 0, 0, 0,
3626 0, 0, 0, 0, 0, 0, 0, 0,
3627 0, 0, 0, 0, 0, 0, 0, 0,
3628 0, 0, 0, 0, 0, 0, 0, 0,
3629 0, 0, 0, 0, 0, 0, 0, 0,
3630 0, 0, 0, 0, 0, 0, 0, 0,
3631 0, 0, 0, 0, 0, 0, 0, 0,
3632 0, 0, 0, 0, 0, 0, 0, 0,
3633 0, 0, 0, 0, 0, 0, 0, 0,
3634 0, 0, 0, 0, 0, 0, 0, 0,
3635 0, 0, 0, 0, 0, 0, 0, 0,
3636 0, 0, 0, 0, 0, 0, 0, 0,
3637 0, 0, 0, 0, 0, 0, 0, 0,
3638 0, 0, 0, 0, 0, 0, 0, 0,
3639 0, 0, 0, 0, 0, 0, 0, 0,
3640 0, 0, 0, 0, 0, 0, 21, 19,
3641 0, 0, 13, 23, 0, 13, 13, 29,
3642 0, 0, 0, 153, 174, 1, 1, 174,
3643 1, 1, 1, 156, 177, 3, 3, 177,
3644 3, 3, 3, 0, 0, 0, 0, 13,
3645 13, 13, 13, 174, 1, 1, 174, 174,
3646 174, 174, 174, 1, 1, 174, 174, 174,
3647 174, 177, 3, 3, 177, 177, 177, 177,
3648 1, 1, 0, 0, 13, 13, 13, 13,
3649 177, 3, 3, 177, 177, 177, 177, 3,
3650 3, 31, 0, 25, 15, 0, 27, 17,
3651 33, 0, 0, 0, 0, 45, 0, 183,
3652 183, 51, 0, 0, 0, 162, 213, 159,
3653 5, 5, 5, 5, 5, 5, 168, 217,
3654 165, 7, 7, 7, 7, 7, 7, 47,
3655 39, 49, 41, 53, 0, 0, 0, 65,
3656 0, 189, 189, 71, 0, 0, 67, 59,
3657 69, 61, 73, 79, 0, 0, 0, 0,
3658 0, 0, 0, 0, 0, 0, 0, 0,
3659 0, 0, 0, 0, 0, 0, 0, 0,
3660 0, 0, 13, 13, 13, 195, 195, 195,
3661 195, 195, 195, 13, 13, 195, 13, 83,
3662 0, 0, 0, 0, 0, 0, 0, 0,
3663 0, 0, 0, 0, 0, 0, 0, 0,
3664 0, 0, 0, 0, 0, 0, 0, 0,
3665 0, 0, 0, 85, 0, 0, 0, 0,
3666 0, 0, 0, 0, 0, 0, 0, 0,
3667 0, 0, 0, 0, 0, 0, 0, 0,
3668 0, 0, 0, 0, 0, 0, 0, 0,
3669 0, 0, 0, 0, 0, 0, 0, 0,
3670 0, 0, 0, 0, 0, 0, 0, 0,
3671 0, 0, 0, 0, 0, 0, 0, 0,
3672 0, 0, 0, 0, 0, 0, 0, 0,
3673 0, 0, 0, 0, 0, 0, 0, 0,
3674 0, 0, 0, 0, 0, 0, 0, 0,
3675 0, 0, 0, 0, 0, 0, 0, 0,
3676 0, 0, 0, 0, 0, 0, 0, 0,
3677 0, 0, 0, 0, 0, 0, 0, 0,
3678 0, 0, 0, 0, 0, 0, 0, 0,
3679 0, 0, 0, 0, 0, 0, 0, 0,
3680 0, 0, 0, 0, 0, 0, 0, 0,
3681 0, 0, 0, 0, 0, 0, 0, 0,
3682 0, 0, 0, 0, 0, 0, 0, 0,
3683 0, 0, 0,
3684}
3685
3686var _hcltok_to_state_actions []byte = []byte{
3687 0, 0, 0, 0, 0, 0, 0, 0,
3688 0, 0, 0, 0, 0, 0, 0, 0,
3689 0, 0, 0, 0, 0, 0, 0, 0,
3690 0, 0, 0, 0, 0, 0, 0, 0,
3691 0, 0, 0, 0, 0, 0, 0, 0,
3692 0, 0, 0, 0, 0, 0, 0, 0,
3693 0, 0, 0, 0, 0, 0, 0, 0,
3694 0, 0, 0, 0, 0, 0, 0, 0,
3695 0, 0, 0, 0, 0, 0, 0, 0,
3696 0, 0, 0, 0, 0, 0, 0, 0,
3697 0, 0, 0, 0, 0, 0, 0, 0,
3698 0, 0, 0, 0, 0, 0, 0, 0,
3699 0, 0, 0, 0, 0, 0, 0, 0,
3700 0, 0, 0, 0, 0, 0, 0, 0,
3701 0, 0, 0, 0, 0, 0, 0, 0,
3702 0, 0, 0, 0, 0, 0, 0, 0,
3703 0, 0, 0, 0, 0, 0, 0, 0,
3704 0, 0, 0, 0, 0, 0, 0, 0,
3705 0, 0, 0, 0, 0, 0, 0, 0,
3706 0, 0, 0, 0, 0, 0, 0, 0,
3707 0, 0, 0, 0, 0, 0, 0, 0,
3708 0, 0, 0, 0, 0, 0, 0, 0,
3709 0, 0, 0, 0, 0, 0, 0, 0,
3710 0, 0, 0, 0, 0, 0, 0, 0,
3711 0, 0, 0, 0, 0, 0, 0, 0,
3712 0, 0, 0, 0, 0, 0, 0, 0,
3713 0, 0, 0, 0, 0, 0, 0, 0,
3714 0, 0, 0, 0, 0, 0, 0, 0,
3715 0, 0, 0, 0, 0, 0, 0, 0,
3716 0, 0, 0, 0, 0, 0, 0, 0,
3717 0, 0, 0, 0, 0, 0, 0, 0,
3718 0, 0, 0, 0, 0, 0, 0, 0,
3719 0, 0, 0, 0, 0, 0, 0, 0,
3720 0, 0, 0, 0, 0, 0, 0, 0,
3721 0, 0, 0, 0, 0, 0, 0, 0,
3722 0, 0, 0, 0, 0, 0, 0, 0,
3723 0, 0, 0, 0, 0, 0, 0, 0,
3724 0, 0, 0, 0, 0, 0, 0, 0,
3725 0, 0, 0, 0, 0, 0, 0, 0,
3726 0, 0, 0, 0, 0, 0, 0, 0,
3727 0, 0, 0, 0, 0, 0, 0, 0,
3728 0, 0, 0, 0, 0, 0, 0, 0,
3729 0, 0, 0, 0, 0, 0, 0, 0,
3730 0, 0, 0, 0, 0, 0, 0, 0,
3731 0, 0, 0, 0, 0, 0, 0, 0,
3732 0, 0, 0, 0, 0, 0, 0, 0,
3733 0, 0, 0, 0, 0, 0, 0, 0,
3734 0, 0, 0, 0, 0, 0, 0, 0,
3735 0, 0, 0, 0, 0, 0, 0, 0,
3736 0, 0, 0, 0, 0, 0, 0, 0,
3737 0, 0, 0, 0, 0, 0, 0, 0,
3738 0, 0, 0, 0, 0, 0, 0, 0,
3739 0, 0, 0, 0, 0, 0, 0, 0,
3740 0, 0, 0, 0, 0, 0, 0, 0,
3741 0, 0, 0, 0, 0, 0, 0, 0,
3742 0, 0, 0, 0, 0, 0, 0, 0,
3743 0, 0, 0, 0, 0, 0, 0, 0,
3744 0, 0, 0, 0, 0, 0, 0, 0,
3745 0, 0, 0, 0, 0, 0, 0, 0,
3746 0, 0, 0, 0, 0, 0, 0, 0,
3747 0, 0, 0, 0, 0, 0, 0, 0,
3748 0, 0, 0, 0, 0, 0, 0, 0,
3749 0, 0, 0, 0, 0, 0, 0, 0,
3750 0, 0, 0, 0, 0, 0, 0, 0,
3751 0, 0, 0, 0, 0, 0, 0, 0,
3752 0, 0, 0, 0, 0, 0, 0, 0,
3753 0, 0, 0, 0, 0, 0, 0, 0,
3754 0, 0, 0, 0, 0, 0, 0, 0,
3755 0, 0, 0, 0, 0, 0, 0, 0,
3756 0, 0, 0, 0, 0, 0, 0, 0,
3757 0, 0, 0, 0, 0, 0, 0, 0,
3758 0, 0, 0, 0, 0, 0, 0, 0,
3759 0, 0, 0, 0, 0, 0, 0, 0,
3760 0, 0, 0, 0, 0, 0, 0, 0,
3761 0, 0, 0, 0, 0, 0, 0, 0,
3762 0, 0, 0, 0, 0, 0, 0, 0,
3763 0, 0, 0, 0, 0, 0, 0, 0,
3764 0, 0, 0, 0, 0, 0, 0, 0,
3765 0, 0, 0, 0, 0, 0, 0, 0,
3766 0, 0, 0, 0, 0, 0, 0, 0,
3767 0, 0, 0, 0, 0, 0, 0, 0,
3768 0, 0, 0, 0, 0, 0, 0, 0,
3769 0, 0, 0, 0, 0, 0, 0, 0,
3770 0, 0, 0, 0, 0, 0, 0, 0,
3771 0, 0, 0, 0, 0, 0, 0, 0,
3772 0, 0, 0, 0, 0, 0, 0, 0,
3773 0, 0, 0, 0, 0, 0, 0, 0,
3774 0, 0, 0, 0, 0, 0, 0, 0,
3775 0, 0, 0, 0, 0, 0, 0, 0,
3776 0, 0, 0, 0, 0, 0, 0, 0,
3777 0, 0, 0, 0, 0, 0, 0, 0,
3778 0, 0, 0, 0, 0, 0, 0, 0,
3779 0, 0, 0, 0, 0, 0, 0, 0,
3780 0, 0, 0, 0, 0, 0, 0, 0,
3781 0, 0, 0, 0, 0, 0, 0, 0,
3782 0, 0, 0, 0, 0, 0, 0, 0,
3783 0, 0, 0, 0, 0, 0, 0, 0,
3784 0, 0, 0, 0, 0, 0, 0, 0,
3785 0, 0, 0, 0, 0, 0, 0, 0,
3786 0, 0, 0, 0, 0, 0, 0, 0,
3787 0, 0, 0, 0, 0, 0, 0, 0,
3788 0, 0, 0, 0, 0, 0, 0, 0,
3789 0, 0, 0, 0, 0, 0, 0, 0,
3790 0, 0, 0, 0, 0, 0, 0, 0,
3791 0, 0, 0, 0, 0, 0, 0, 0,
3792 0, 0, 0, 0, 0, 0, 0, 0,
3793 0, 0, 0, 0, 0, 0, 0, 0,
3794 0, 0, 0, 0, 0, 0, 0, 0,
3795 0, 0, 0, 0, 0, 0, 0, 0,
3796 0, 0, 0, 0, 0, 0, 0, 0,
3797 0, 0, 0, 0, 0, 0, 0, 0,
3798 0, 0, 0, 0, 0, 0, 0, 0,
3799 0, 0, 0, 0, 0, 0, 0, 0,
3800 0, 0, 0, 0, 0, 0, 0, 0,
3801 0, 0, 0, 0, 0, 0, 0, 0,
3802 0, 0, 0, 0, 0, 0, 0, 0,
3803 0, 0, 0, 0, 0, 0, 0, 0,
3804 0, 0, 0, 0, 0, 0, 0, 0,
3805 0, 0, 0, 0, 0, 0, 0, 0,
3806 0, 0, 0, 0, 0, 0, 0, 0,
3807 0, 0, 0, 0, 0, 0, 0, 0,
3808 0, 0, 0, 0, 0, 0, 0, 0,
3809 0, 0, 0, 0, 0, 0, 0, 0,
3810 0, 0, 0, 0, 0, 0, 0, 0,
3811 0, 0, 0, 0, 0, 0, 0, 0,
3812 0, 0, 0, 0, 0, 0, 0, 0,
3813 0, 0, 0, 0, 0, 0, 0, 0,
3814 0, 0, 0, 0, 0, 0, 0, 0,
3815 0, 0, 0, 0, 0, 0, 0, 0,
3816 0, 0, 0, 0, 0, 0, 0, 0,
3817 0, 0, 0, 0, 0, 0, 0, 0,
3818 0, 0, 0, 0, 0, 0, 0, 0,
3819 0, 0, 0, 0, 0, 0, 0, 0,
3820 0, 0, 0, 0, 0, 0, 0, 0,
3821 0, 0, 0, 0, 0, 0, 0, 0,
3822 0, 0, 0, 0, 0, 0, 0, 0,
3823 0, 0, 0, 0, 0, 0, 0, 0,
3824 0, 0, 0, 0, 0, 0, 0, 0,
3825 0, 0, 0, 0, 0, 0, 0, 0,
3826 0, 0, 0, 0, 0, 0, 0, 0,
3827 0, 0, 0, 0, 0, 0, 0, 0,
3828 0, 0, 0, 0, 0, 0, 0, 0,
3829 0, 0, 0, 0, 0, 0, 0, 0,
3830 0, 0, 0, 0, 0, 0, 0, 0,
3831 0, 0, 0, 0, 0, 0, 0, 0,
3832 0, 0, 0, 0, 0, 0, 0, 0,
3833 0, 0, 0, 0, 0, 0, 0, 0,
3834 0, 0, 0, 0, 0, 0, 0, 0,
3835 0, 0, 0, 0, 0, 0, 0, 0,
3836 0, 0, 0, 0, 0, 0, 0, 0,
3837 0, 0, 0, 0, 0, 0, 0, 0,
3838 0, 0, 0, 0, 0, 0, 0, 0,
3839 0, 0, 0, 0, 0, 0, 0, 0,
3840 0, 0, 0, 0, 0, 0, 0, 0,
3841 0, 0, 0, 0, 0, 0, 0, 0,
3842 0, 0, 0, 0, 0, 0, 0, 0,
3843 0, 0, 0, 0, 0, 0, 0, 0,
3844 0, 0, 0, 0, 0, 0, 0, 0,
3845 0, 0, 0, 0, 0, 0, 0, 0,
3846 0, 0, 0, 0, 0, 0, 0, 0,
3847 0, 0, 0, 0, 0, 0, 0, 0,
3848 0, 0, 0, 0, 0, 0, 0, 0,
3849 0, 0, 0, 0, 0, 0, 0, 0,
3850 0, 0, 0, 0, 0, 0, 0, 0,
3851 0, 0, 0, 0, 0, 0, 0, 0,
3852 0, 0, 0, 0, 0, 0, 0, 0,
3853 0, 0, 0, 0, 0, 0, 0, 0,
3854 0, 0, 0, 0, 0, 0, 0, 0,
3855 0, 0, 0, 0, 0, 0, 0, 0,
3856 0, 0, 0, 0, 0, 0, 0, 0,
3857 0, 0, 0, 0, 0, 0, 0, 0,
3858 0, 0, 0, 0, 0, 0, 0, 0,
3859 0, 0, 0, 0, 0, 0, 0, 0,
3860 0, 0, 0, 0, 0, 0, 0, 0,
3861 0, 0, 0, 0, 0, 0, 0, 0,
3862 0, 0, 0, 0, 0, 0, 0, 0,
3863 0, 0, 0, 0, 0, 0, 0, 0,
3864 0, 0, 0, 0, 0, 0, 0, 0,
3865 0, 0, 0, 0, 0, 0, 0, 0,
3866 0, 0, 0, 0, 0, 0, 0, 0,
3867 0, 0, 0, 0, 0, 0, 0, 0,
3868 0, 0, 0, 0, 0, 0, 0, 0,
3869 0, 0, 0, 0, 0, 0, 0, 0,
3870 9, 0, 0, 0, 0, 0, 0, 0,
3871 0, 0, 0, 0, 0, 0, 0, 0,
3872 0, 0, 0, 0, 0, 0, 0, 0,
3873 0, 0, 0, 0, 0, 0, 0, 0,
3874 0, 0, 0, 0, 0, 0, 0, 0,
3875 0, 0, 0, 0, 0, 0, 0, 0,
3876 0, 0, 0, 9, 0, 0, 0, 0,
3877 0, 0, 0, 0, 0, 0, 0, 0,
3878 0, 0, 0, 0, 0, 0, 0, 0,
3879 0, 0, 0, 0, 0, 171, 0, 0,
3880 0, 0, 0, 0, 0, 0, 171, 0,
3881 0, 0, 0, 0, 0, 9, 0, 0,
3882 0, 0, 0, 0, 0, 0, 0, 0,
3883 0, 0, 0, 0, 0, 0, 0, 0,
3884 0, 0, 0, 0, 0, 0, 0, 0,
3885 0, 0, 0, 0, 0, 0, 0, 0,
3886 0, 0,
3887}
3888
3889var _hcltok_from_state_actions []byte = []byte{
3890 0, 0, 0, 0, 0, 0, 0, 0,
3891 0, 0, 0, 0, 0, 0, 0, 0,
3892 0, 0, 0, 0, 0, 0, 0, 0,
3893 0, 0, 0, 0, 0, 0, 0, 0,
3894 0, 0, 0, 0, 0, 0, 0, 0,
3895 0, 0, 0, 0, 0, 0, 0, 0,
3896 0, 0, 0, 0, 0, 0, 0, 0,
3897 0, 0, 0, 0, 0, 0, 0, 0,
3898 0, 0, 0, 0, 0, 0, 0, 0,
3899 0, 0, 0, 0, 0, 0, 0, 0,
3900 0, 0, 0, 0, 0, 0, 0, 0,
3901 0, 0, 0, 0, 0, 0, 0, 0,
3902 0, 0, 0, 0, 0, 0, 0, 0,
3903 0, 0, 0, 0, 0, 0, 0, 0,
3904 0, 0, 0, 0, 0, 0, 0, 0,
3905 0, 0, 0, 0, 0, 0, 0, 0,
3906 0, 0, 0, 0, 0, 0, 0, 0,
3907 0, 0, 0, 0, 0, 0, 0, 0,
3908 0, 0, 0, 0, 0, 0, 0, 0,
3909 0, 0, 0, 0, 0, 0, 0, 0,
3910 0, 0, 0, 0, 0, 0, 0, 0,
3911 0, 0, 0, 0, 0, 0, 0, 0,
3912 0, 0, 0, 0, 0, 0, 0, 0,
3913 0, 0, 0, 0, 0, 0, 0, 0,
3914 0, 0, 0, 0, 0, 0, 0, 0,
3915 0, 0, 0, 0, 0, 0, 0, 0,
3916 0, 0, 0, 0, 0, 0, 0, 0,
3917 0, 0, 0, 0, 0, 0, 0, 0,
3918 0, 0, 0, 0, 0, 0, 0, 0,
3919 0, 0, 0, 0, 0, 0, 0, 0,
3920 0, 0, 0, 0, 0, 0, 0, 0,
3921 0, 0, 0, 0, 0, 0, 0, 0,
3922 0, 0, 0, 0, 0, 0, 0, 0,
3923 0, 0, 0, 0, 0, 0, 0, 0,
3924 0, 0, 0, 0, 0, 0, 0, 0,
3925 0, 0, 0, 0, 0, 0, 0, 0,
3926 0, 0, 0, 0, 0, 0, 0, 0,
3927 0, 0, 0, 0, 0, 0, 0, 0,
3928 0, 0, 0, 0, 0, 0, 0, 0,
3929 0, 0, 0, 0, 0, 0, 0, 0,
3930 0, 0, 0, 0, 0, 0, 0, 0,
3931 0, 0, 0, 0, 0, 0, 0, 0,
3932 0, 0, 0, 0, 0, 0, 0, 0,
3933 0, 0, 0, 0, 0, 0, 0, 0,
3934 0, 0, 0, 0, 0, 0, 0, 0,
3935 0, 0, 0, 0, 0, 0, 0, 0,
3936 0, 0, 0, 0, 0, 0, 0, 0,
3937 0, 0, 0, 0, 0, 0, 0, 0,
3938 0, 0, 0, 0, 0, 0, 0, 0,
3939 0, 0, 0, 0, 0, 0, 0, 0,
3940 0, 0, 0, 0, 0, 0, 0, 0,
3941 0, 0, 0, 0, 0, 0, 0, 0,
3942 0, 0, 0, 0, 0, 0, 0, 0,
3943 0, 0, 0, 0, 0, 0, 0, 0,
3944 0, 0, 0, 0, 0, 0, 0, 0,
3945 0, 0, 0, 0, 0, 0, 0, 0,
3946 0, 0, 0, 0, 0, 0, 0, 0,
3947 0, 0, 0, 0, 0, 0, 0, 0,
3948 0, 0, 0, 0, 0, 0, 0, 0,
3949 0, 0, 0, 0, 0, 0, 0, 0,
3950 0, 0, 0, 0, 0, 0, 0, 0,
3951 0, 0, 0, 0, 0, 0, 0, 0,
3952 0, 0, 0, 0, 0, 0, 0, 0,
3953 0, 0, 0, 0, 0, 0, 0, 0,
3954 0, 0, 0, 0, 0, 0, 0, 0,
3955 0, 0, 0, 0, 0, 0, 0, 0,
3956 0, 0, 0, 0, 0, 0, 0, 0,
3957 0, 0, 0, 0, 0, 0, 0, 0,
3958 0, 0, 0, 0, 0, 0, 0, 0,
3959 0, 0, 0, 0, 0, 0, 0, 0,
3960 0, 0, 0, 0, 0, 0, 0, 0,
3961 0, 0, 0, 0, 0, 0, 0, 0,
3962 0, 0, 0, 0, 0, 0, 0, 0,
3963 0, 0, 0, 0, 0, 0, 0, 0,
3964 0, 0, 0, 0, 0, 0, 0, 0,
3965 0, 0, 0, 0, 0, 0, 0, 0,
3966 0, 0, 0, 0, 0, 0, 0, 0,
3967 0, 0, 0, 0, 0, 0, 0, 0,
3968 0, 0, 0, 0, 0, 0, 0, 0,
3969 0, 0, 0, 0, 0, 0, 0, 0,
3970 0, 0, 0, 0, 0, 0, 0, 0,
3971 0, 0, 0, 0, 0, 0, 0, 0,
3972 0, 0, 0, 0, 0, 0, 0, 0,
3973 0, 0, 0, 0, 0, 0, 0, 0,
3974 0, 0, 0, 0, 0, 0, 0, 0,
3975 0, 0, 0, 0, 0, 0, 0, 0,
3976 0, 0, 0, 0, 0, 0, 0, 0,
3977 0, 0, 0, 0, 0, 0, 0, 0,
3978 0, 0, 0, 0, 0, 0, 0, 0,
3979 0, 0, 0, 0, 0, 0, 0, 0,
3980 0, 0, 0, 0, 0, 0, 0, 0,
3981 0, 0, 0, 0, 0, 0, 0, 0,
3982 0, 0, 0, 0, 0, 0, 0, 0,
3983 0, 0, 0, 0, 0, 0, 0, 0,
3984 0, 0, 0, 0, 0, 0, 0, 0,
3985 0, 0, 0, 0, 0, 0, 0, 0,
3986 0, 0, 0, 0, 0, 0, 0, 0,
3987 0, 0, 0, 0, 0, 0, 0, 0,
3988 0, 0, 0, 0, 0, 0, 0, 0,
3989 0, 0, 0, 0, 0, 0, 0, 0,
3990 0, 0, 0, 0, 0, 0, 0, 0,
3991 0, 0, 0, 0, 0, 0, 0, 0,
3992 0, 0, 0, 0, 0, 0, 0, 0,
3993 0, 0, 0, 0, 0, 0, 0, 0,
3994 0, 0, 0, 0, 0, 0, 0, 0,
3995 0, 0, 0, 0, 0, 0, 0, 0,
3996 0, 0, 0, 0, 0, 0, 0, 0,
3997 0, 0, 0, 0, 0, 0, 0, 0,
3998 0, 0, 0, 0, 0, 0, 0, 0,
3999 0, 0, 0, 0, 0, 0, 0, 0,
4000 0, 0, 0, 0, 0, 0, 0, 0,
4001 0, 0, 0, 0, 0, 0, 0, 0,
4002 0, 0, 0, 0, 0, 0, 0, 0,
4003 0, 0, 0, 0, 0, 0, 0, 0,
4004 0, 0, 0, 0, 0, 0, 0, 0,
4005 0, 0, 0, 0, 0, 0, 0, 0,
4006 0, 0, 0, 0, 0, 0, 0, 0,
4007 0, 0, 0, 0, 0, 0, 0, 0,
4008 0, 0, 0, 0, 0, 0, 0, 0,
4009 0, 0, 0, 0, 0, 0, 0, 0,
4010 0, 0, 0, 0, 0, 0, 0, 0,
4011 0, 0, 0, 0, 0, 0, 0, 0,
4012 0, 0, 0, 0, 0, 0, 0, 0,
4013 0, 0, 0, 0, 0, 0, 0, 0,
4014 0, 0, 0, 0, 0, 0, 0, 0,
4015 0, 0, 0, 0, 0, 0, 0, 0,
4016 0, 0, 0, 0, 0, 0, 0, 0,
4017 0, 0, 0, 0, 0, 0, 0, 0,
4018 0, 0, 0, 0, 0, 0, 0, 0,
4019 0, 0, 0, 0, 0, 0, 0, 0,
4020 0, 0, 0, 0, 0, 0, 0, 0,
4021 0, 0, 0, 0, 0, 0, 0, 0,
4022 0, 0, 0, 0, 0, 0, 0, 0,
4023 0, 0, 0, 0, 0, 0, 0, 0,
4024 0, 0, 0, 0, 0, 0, 0, 0,
4025 0, 0, 0, 0, 0, 0, 0, 0,
4026 0, 0, 0, 0, 0, 0, 0, 0,
4027 0, 0, 0, 0, 0, 0, 0, 0,
4028 0, 0, 0, 0, 0, 0, 0, 0,
4029 0, 0, 0, 0, 0, 0, 0, 0,
4030 0, 0, 0, 0, 0, 0, 0, 0,
4031 0, 0, 0, 0, 0, 0, 0, 0,
4032 0, 0, 0, 0, 0, 0, 0, 0,
4033 0, 0, 0, 0, 0, 0, 0, 0,
4034 0, 0, 0, 0, 0, 0, 0, 0,
4035 0, 0, 0, 0, 0, 0, 0, 0,
4036 0, 0, 0, 0, 0, 0, 0, 0,
4037 0, 0, 0, 0, 0, 0, 0, 0,
4038 0, 0, 0, 0, 0, 0, 0, 0,
4039 0, 0, 0, 0, 0, 0, 0, 0,
4040 0, 0, 0, 0, 0, 0, 0, 0,
4041 0, 0, 0, 0, 0, 0, 0, 0,
4042 0, 0, 0, 0, 0, 0, 0, 0,
4043 0, 0, 0, 0, 0, 0, 0, 0,
4044 0, 0, 0, 0, 0, 0, 0, 0,
4045 0, 0, 0, 0, 0, 0, 0, 0,
4046 0, 0, 0, 0, 0, 0, 0, 0,
4047 0, 0, 0, 0, 0, 0, 0, 0,
4048 0, 0, 0, 0, 0, 0, 0, 0,
4049 0, 0, 0, 0, 0, 0, 0, 0,
4050 0, 0, 0, 0, 0, 0, 0, 0,
4051 0, 0, 0, 0, 0, 0, 0, 0,
4052 0, 0, 0, 0, 0, 0, 0, 0,
4053 0, 0, 0, 0, 0, 0, 0, 0,
4054 0, 0, 0, 0, 0, 0, 0, 0,
4055 0, 0, 0, 0, 0, 0, 0, 0,
4056 0, 0, 0, 0, 0, 0, 0, 0,
4057 0, 0, 0, 0, 0, 0, 0, 0,
4058 0, 0, 0, 0, 0, 0, 0, 0,
4059 0, 0, 0, 0, 0, 0, 0, 0,
4060 0, 0, 0, 0, 0, 0, 0, 0,
4061 0, 0, 0, 0, 0, 0, 0, 0,
4062 0, 0, 0, 0, 0, 0, 0, 0,
4063 0, 0, 0, 0, 0, 0, 0, 0,
4064 0, 0, 0, 0, 0, 0, 0, 0,
4065 0, 0, 0, 0, 0, 0, 0, 0,
4066 0, 0, 0, 0, 0, 0, 0, 0,
4067 0, 0, 0, 0, 0, 0, 0, 0,
4068 0, 0, 0, 0, 0, 0, 0, 0,
4069 0, 0, 0, 0, 0, 0, 0, 0,
4070 0, 0, 0, 0, 0, 0, 0, 0,
4071 0, 0, 0, 0, 0, 0, 0, 0,
4072 0, 0, 0, 0, 0, 0, 0, 0,
4073 11, 0, 0, 0, 0, 0, 0, 0,
4074 0, 0, 0, 0, 0, 0, 0, 0,
4075 0, 0, 0, 0, 0, 0, 0, 0,
4076 0, 0, 0, 0, 0, 0, 0, 0,
4077 0, 0, 0, 0, 0, 0, 0, 0,
4078 0, 0, 0, 0, 0, 0, 0, 0,
4079 0, 0, 0, 11, 0, 0, 0, 0,
4080 0, 0, 0, 0, 0, 0, 0, 0,
4081 0, 0, 0, 0, 0, 0, 0, 0,
4082 0, 0, 0, 0, 0, 11, 0, 0,
4083 0, 0, 0, 0, 0, 0, 11, 0,
4084 0, 0, 0, 0, 0, 11, 0, 0,
4085 0, 0, 0, 0, 0, 0, 0, 0,
4086 0, 0, 0, 0, 0, 0, 0, 0,
4087 0, 0, 0, 0, 0, 0, 0, 0,
4088 0, 0, 0, 0, 0, 0, 0, 0,
4089 0, 0,
4090}
4091
4092var _hcltok_eof_trans []int16 = []int16{
4093 0, 1, 4, 1, 1, 9, 9, 9,
4094 4, 4, 4, 4, 4, 4, 4, 4,
4095 4, 4, 4, 4, 4, 4, 4, 4,
4096 4, 4, 4, 4, 4, 4, 4, 4,
4097 4, 4, 4, 4, 4, 4, 4, 4,
4098 4, 4, 4, 4, 4, 4, 4, 4,
4099 4, 4, 4, 4, 4, 4, 4, 4,
4100 4, 4, 4, 4, 4, 4, 4, 4,
4101 4, 4, 4, 4, 4, 4, 4, 4,
4102 4, 4, 4, 4, 4, 4, 4, 4,
4103 4, 4, 4, 4, 4, 4, 4, 4,
4104 4, 4, 4, 4, 4, 4, 4, 4,
4105 4, 4, 4, 4, 4, 4, 4, 4,
4106 4, 4, 4, 4, 4, 4, 4, 4,
4107 4, 4, 4, 4, 4, 4, 4, 4,
4108 4, 4, 4, 4, 4, 4, 4, 4,
4109 4, 4, 4, 4, 4, 4, 4, 4,
4110 4, 4, 4, 4, 4, 4, 4, 4,
4111 4, 4, 4, 4, 4, 4, 4, 4,
4112 4, 4, 4, 4, 4, 4, 4, 4,
4113 4, 4, 4, 4, 4, 4, 4, 4,
4114 4, 4, 4, 4, 4, 4, 4, 4,
4115 4, 4, 4, 4, 4, 4, 4, 4,
4116 4, 4, 4, 4, 4, 4, 4, 4,
4117 4, 4, 4, 4, 4, 4, 4, 4,
4118 4, 4, 4, 4, 4, 4, 4, 4,
4119 4, 4, 4, 4, 4, 4, 4, 4,
4120 4, 4, 4, 4, 4, 4, 4, 4,
4121 4, 4, 4, 4, 4, 4, 4, 4,
4122 4, 4, 4, 4, 4, 4, 4, 4,
4123 4, 4, 4, 4, 4, 4, 4, 4,
4124 4, 4, 4, 4, 4, 4, 4, 4,
4125 4, 4, 4, 4, 4, 4, 4, 4,
4126 4, 4, 4, 4, 4, 4, 4, 4,
4127 4, 4, 4, 4, 4, 4, 4, 4,
4128 4, 4, 4, 4, 4, 4, 4, 4,
4129 4, 4, 4, 4, 4, 4, 4, 4,
4130 4, 4, 4, 4, 4, 4, 4, 4,
4131 4, 4, 4, 4, 4, 4, 4, 4,
4132 4, 4, 4, 4, 4, 4, 4, 4,
4133 4, 4, 4, 4, 4, 4, 4, 4,
4134 4, 4, 4, 4, 4, 4, 4, 4,
4135 4, 4, 4, 4, 4, 4, 4, 4,
4136 4, 4, 4, 4, 4, 4, 4, 4,
4137 4, 4, 4, 4, 4, 4, 4, 4,
4138 4, 4, 4, 4, 4, 4, 4, 4,
4139 4, 4, 4, 4, 4, 4, 4, 4,
4140 4, 4, 4, 4, 4, 4, 4, 4,
4141 4, 4, 4, 4, 4, 4, 4, 4,
4142 4, 4, 4, 4, 4, 4, 4, 4,
4143 4, 4, 4, 4, 4, 4, 4, 4,
4144 4, 4, 4, 4, 4, 4, 4, 4,
4145 422, 422, 1, 422, 422, 422, 422, 422,
4146 422, 422, 422, 422, 422, 422, 422, 422,
4147 422, 422, 422, 422, 422, 422, 422, 422,
4148 422, 422, 422, 422, 422, 422, 422, 422,
4149 422, 422, 422, 422, 422, 422, 422, 422,
4150 422, 422, 422, 422, 422, 422, 422, 422,
4151 422, 422, 422, 422, 422, 422, 422, 422,
4152 422, 422, 422, 422, 422, 422, 422, 422,
4153 422, 422, 422, 422, 422, 422, 422, 422,
4154 422, 422, 422, 422, 422, 422, 422, 422,
4155 422, 422, 422, 422, 422, 422, 422, 422,
4156 422, 422, 422, 422, 422, 422, 422, 422,
4157 422, 422, 422, 422, 422, 422, 422, 422,
4158 422, 422, 422, 422, 422, 422, 422, 422,
4159 422, 422, 422, 422, 422, 422, 422, 422,
4160 422, 422, 422, 422, 422, 422, 422, 422,
4161 422, 422, 422, 422, 422, 422, 422, 422,
4162 422, 422, 422, 422, 422, 422, 422, 422,
4163 422, 422, 422, 422, 422, 422, 422, 422,
4164 422, 422, 422, 422, 422, 422, 422, 422,
4165 422, 422, 422, 422, 422, 422, 422, 422,
4166 422, 422, 422, 422, 422, 422, 422, 422,
4167 422, 422, 422, 422, 422, 422, 422, 422,
4168 422, 422, 422, 422, 422, 422, 422, 422,
4169 422, 422, 422, 422, 422, 422, 422, 422,
4170 422, 422, 422, 422, 422, 422, 422, 422,
4171 422, 422, 422, 422, 422, 422, 422, 422,
4172 422, 422, 422, 422, 422, 422, 422, 422,
4173 422, 422, 422, 422, 422, 422, 422, 422,
4174 422, 422, 422, 422, 422, 422, 422, 422,
4175 422, 422, 422, 422, 422, 422, 422, 422,
4176 422, 422, 422, 422, 422, 422, 422, 422,
4177 422, 422, 422, 422, 422, 422, 422, 422,
4178 422, 422, 422, 422, 422, 422, 422, 422,
4179 422, 422, 422, 672, 672, 672, 672, 672,
4180 672, 672, 672, 672, 672, 672, 672, 672,
4181 672, 672, 672, 672, 672, 672, 672, 672,
4182 672, 672, 672, 672, 672, 672, 672, 672,
4183 672, 672, 672, 672, 672, 672, 672, 672,
4184 672, 672, 672, 672, 672, 672, 672, 672,
4185 672, 672, 672, 672, 672, 672, 672, 672,
4186 672, 672, 672, 672, 672, 672, 672, 672,
4187 672, 672, 672, 672, 672, 672, 672, 672,
4188 672, 672, 672, 672, 672, 672, 672, 672,
4189 672, 672, 672, 672, 672, 672, 672, 672,
4190 672, 672, 672, 672, 672, 672, 672, 672,
4191 672, 672, 672, 672, 672, 672, 672, 672,
4192 672, 672, 672, 672, 672, 672, 672, 672,
4193 672, 672, 672, 672, 672, 672, 672, 672,
4194 672, 672, 672, 672, 672, 672, 672, 672,
4195 672, 672, 672, 672, 672, 672, 672, 672,
4196 672, 672, 672, 672, 672, 672, 672, 672,
4197 672, 672, 672, 672, 672, 672, 672, 672,
4198 672, 672, 672, 672, 672, 672, 672, 672,
4199 672, 672, 672, 672, 672, 672, 672, 672,
4200 672, 672, 672, 672, 672, 672, 672, 672,
4201 672, 672, 672, 672, 672, 672, 672, 672,
4202 672, 672, 672, 672, 672, 672, 672, 672,
4203 672, 672, 672, 672, 672, 672, 672, 672,
4204 672, 672, 672, 672, 672, 672, 672, 672,
4205 672, 672, 672, 672, 672, 672, 672, 672,
4206 672, 672, 672, 672, 672, 672, 672, 672,
4207 672, 672, 672, 672, 672, 672, 672, 672,
4208 672, 672, 672, 672, 672, 672, 672, 672,
4209 672, 769, 769, 769, 769, 769, 775, 775,
4210 777, 779, 779, 777, 777, 779, 0, 0,
4211 787, 789, 787, 787, 789, 0, 0, 795,
4212 795, 797, 795, 795, 795, 795, 795, 795,
4213 795, 795, 795, 795, 795, 795, 795, 795,
4214 795, 795, 795, 795, 795, 795, 795, 795,
4215 795, 795, 795, 795, 795, 795, 795, 795,
4216 795, 795, 795, 795, 795, 795, 795, 795,
4217 795, 795, 795, 795, 795, 795, 795, 795,
4218 795, 795, 795, 795, 795, 795, 795, 795,
4219 795, 795, 795, 795, 795, 795, 795, 795,
4220 795, 795, 795, 795, 795, 795, 795, 795,
4221 795, 795, 795, 795, 795, 795, 795, 795,
4222 795, 795, 795, 795, 795, 795, 795, 795,
4223 795, 795, 795, 795, 795, 795, 795, 795,
4224 795, 795, 795, 795, 795, 795, 795, 795,
4225 795, 795, 795, 795, 795, 795, 795, 795,
4226 795, 795, 795, 795, 795, 795, 795, 795,
4227 795, 795, 795, 795, 795, 795, 795, 795,
4228 795, 795, 795, 795, 795, 795, 795, 795,
4229 795, 795, 795, 795, 795, 795, 795, 795,
4230 795, 795, 795, 795, 795, 795, 795, 795,
4231 795, 795, 795, 795, 795, 795, 795, 795,
4232 795, 795, 795, 795, 795, 795, 795, 795,
4233 795, 795, 795, 795, 795, 795, 795, 795,
4234 795, 795, 795, 795, 795, 795, 795, 795,
4235 795, 795, 795, 795, 795, 795, 795, 795,
4236 795, 795, 795, 795, 795, 795, 795, 795,
4237 795, 795, 795, 795, 795, 795, 795, 795,
4238 795, 795, 795, 795, 795, 795, 795, 795,
4239 795, 795, 795, 795, 795, 795, 795, 795,
4240 795, 795, 795, 795, 795, 795, 795, 795,
4241 795, 795, 795, 795, 795, 795, 795, 795,
4242 795, 795, 795, 795, 795, 795, 795, 795,
4243 795, 795, 795, 795, 795, 795, 795, 795,
4244 795, 795, 795, 795, 795, 795, 795, 795,
4245 795, 795, 795, 795, 795, 795, 795, 795,
4246 795, 795, 1046, 1046, 1046, 1046, 1046, 1046,
4247 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4248 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4249 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4250 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4251 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4252 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4253 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4254 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4255 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4256 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4257 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4258 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4259 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4260 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4261 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4262 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4263 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4264 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4265 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4266 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4267 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4268 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4269 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4270 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4271 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4272 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4273 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4274 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4275 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4276 0, 1196, 1197, 1198, 1197, 1198, 1198, 1198,
4277 1202, 1203, 1198, 1198, 1198, 1209, 1198, 1198,
4278 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239,
4279 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239,
4280 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239,
4281 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239,
4282 1239, 1239, 1239, 0, 1392, 1396, 1404, 1392,
4283 1392, 1396, 1396, 1404, 1396, 1392, 1404, 1404,
4284 1404, 1404, 1404, 1396, 1396, 1396, 1458, 1460,
4285 1458, 1463, 1465, 1465, 1465, 0, 1474, 1478,
4286 1487, 1496, 1498, 1500, 1500, 1500, 0, 1508,
4287 1511, 1513, 1515, 1515, 1515, 0, 1552, 1580,
4288 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580,
4289 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580,
4290 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580,
4291 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580,
4292 1580, 1580,
4293}
4294
4295const hcltok_start int = 1464
4296const hcltok_first_final int = 1464
4297const hcltok_error int = 0
4298
4299const hcltok_en_stringTemplate int = 1515
4300const hcltok_en_heredocTemplate int = 1541
4301const hcltok_en_bareTemplate int = 1550
4302const hcltok_en_identOnly int = 1557
4303const hcltok_en_main int = 1464
4304
4305// line 16 "scan_tokens.rl"
4306
4307func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token {
4308 f := &tokenAccum{
4309 Filename: filename,
4310 Bytes: data,
4311 Pos: start,
4312 }
4313
4314 // line 294 "scan_tokens.rl"
4315
4316 // Ragel state
4317 p := 0 // "Pointer" into data
4318 pe := len(data) // End-of-data "pointer"
4319 ts := 0
4320 te := 0
4321 act := 0
4322 eof := pe
4323 var stack []int
4324 var top int
4325
4326 var cs int // current state
4327 switch mode {
4328 case scanNormal:
4329 cs = hcltok_en_main
4330 case scanTemplate:
4331 cs = hcltok_en_bareTemplate
4332 case scanIdentOnly:
4333 cs = hcltok_en_identOnly
4334 default:
4335 panic("invalid scanMode")
4336 }
4337
4338 braces := 0
4339 var retBraces []int // stack of brace levels that cause us to use fret
4340 var heredocs []heredocInProgress // stack of heredocs we're currently processing
4341
4342 // line 329 "scan_tokens.rl"
4343
4344 // Make Go compiler happy
4345 _ = ts
4346 _ = te
4347 _ = act
4348 _ = eof
4349
4350 token := func(ty TokenType) {
4351 f.emitToken(ty, ts, te)
4352 }
4353 selfToken := func() {
4354 b := data[ts:te]
4355 if len(b) != 1 {
4356 // should never happen
4357 panic("selfToken only works for single-character tokens")
4358 }
4359 f.emitToken(TokenType(b[0]), ts, te)
4360 }
4361
4362 // line 4372 "scan_tokens.go"
4363 {
4364 top = 0
4365 ts = 0
4366 te = 0
4367 act = 0
4368 }
4369
4370 // line 4380 "scan_tokens.go"
4371 {
4372 var _klen int
4373 var _trans int
4374 var _acts int
4375 var _nacts uint
4376 var _keys int
4377 if p == pe {
4378 goto _test_eof
4379 }
4380 if cs == 0 {
4381 goto _out
4382 }
4383 _resume:
4384 _acts = int(_hcltok_from_state_actions[cs])
4385 _nacts = uint(_hcltok_actions[_acts])
4386 _acts++
4387 for ; _nacts > 0; _nacts-- {
4388 _acts++
4389 switch _hcltok_actions[_acts-1] {
4390 case 6:
4391 // line 1 "NONE"
4392
4393 ts = p
4394
4395 // line 4404 "scan_tokens.go"
4396 }
4397 }
4398
4399 _keys = int(_hcltok_key_offsets[cs])
4400 _trans = int(_hcltok_index_offsets[cs])
4401
4402 _klen = int(_hcltok_single_lengths[cs])
4403 if _klen > 0 {
4404 _lower := int(_keys)
4405 var _mid int
4406 _upper := int(_keys + _klen - 1)
4407 for {
4408 if _upper < _lower {
4409 break
4410 }
4411
4412 _mid = _lower + ((_upper - _lower) >> 1)
4413 switch {
4414 case data[p] < _hcltok_trans_keys[_mid]:
4415 _upper = _mid - 1
4416 case data[p] > _hcltok_trans_keys[_mid]:
4417 _lower = _mid + 1
4418 default:
4419 _trans += int(_mid - int(_keys))
4420 goto _match
4421 }
4422 }
4423 _keys += _klen
4424 _trans += _klen
4425 }
4426
4427 _klen = int(_hcltok_range_lengths[cs])
4428 if _klen > 0 {
4429 _lower := int(_keys)
4430 var _mid int
4431 _upper := int(_keys + (_klen << 1) - 2)
4432 for {
4433 if _upper < _lower {
4434 break
4435 }
4436
4437 _mid = _lower + (((_upper - _lower) >> 1) & ^1)
4438 switch {
4439 case data[p] < _hcltok_trans_keys[_mid]:
4440 _upper = _mid - 2
4441 case data[p] > _hcltok_trans_keys[_mid+1]:
4442 _lower = _mid + 2
4443 default:
4444 _trans += int((_mid - int(_keys)) >> 1)
4445 goto _match
4446 }
4447 }
4448 _trans += _klen
4449 }
4450
4451 _match:
4452 _trans = int(_hcltok_indicies[_trans])
4453 _eof_trans:
4454 cs = int(_hcltok_trans_targs[_trans])
4455
4456 if _hcltok_trans_actions[_trans] == 0 {
4457 goto _again
4458 }
4459
4460 _acts = int(_hcltok_trans_actions[_trans])
4461 _nacts = uint(_hcltok_actions[_acts])
4462 _acts++
4463 for ; _nacts > 0; _nacts-- {
4464 _acts++
4465 switch _hcltok_actions[_acts-1] {
4466 case 0:
4467 // line 218 "scan_tokens.rl"
4468
4469 p--
4470
4471 case 1:
4472 // line 219 "scan_tokens.rl"
4473
4474 p--
4475
4476 case 2:
4477 // line 224 "scan_tokens.rl"
4478
4479 p--
4480
4481 case 3:
4482 // line 225 "scan_tokens.rl"
4483
4484 p--
4485
4486 case 7:
4487 // line 1 "NONE"
4488
4489 te = p + 1
4490
4491 case 8:
4492 // line 155 "scan_tokens.rl"
4493
4494 te = p + 1
4495 {
4496 token(TokenTemplateInterp)
4497 braces++
4498 retBraces = append(retBraces, braces)
4499 if len(heredocs) > 0 {
4500 heredocs[len(heredocs)-1].StartOfLine = false
4501 }
4502 {
4503 stack = append(stack, 0)
4504 stack[top] = cs
4505 top++
4506 cs = 1464
4507 goto _again
4508 }
4509 }
4510 case 9:
4511 // line 165 "scan_tokens.rl"
4512
4513 te = p + 1
4514 {
4515 token(TokenTemplateControl)
4516 braces++
4517 retBraces = append(retBraces, braces)
4518 if len(heredocs) > 0 {
4519 heredocs[len(heredocs)-1].StartOfLine = false
4520 }
4521 {
4522 stack = append(stack, 0)
4523 stack[top] = cs
4524 top++
4525 cs = 1464
4526 goto _again
4527 }
4528 }
4529 case 10:
4530 // line 79 "scan_tokens.rl"
4531
4532 te = p + 1
4533 {
4534 token(TokenCQuote)
4535 top--
4536 cs = stack[top]
4537 {
4538 stack = stack[:len(stack)-1]
4539 }
4540 goto _again
4541
4542 }
4543 case 11:
4544 // line 239 "scan_tokens.rl"
4545
4546 te = p + 1
4547 {
4548 token(TokenInvalid)
4549 }
4550 case 12:
4551 // line 240 "scan_tokens.rl"
4552
4553 te = p + 1
4554 {
4555 token(TokenBadUTF8)
4556 }
4557 case 13:
4558 // line 155 "scan_tokens.rl"
4559
4560 te = p
4561 p--
4562 {
4563 token(TokenTemplateInterp)
4564 braces++
4565 retBraces = append(retBraces, braces)
4566 if len(heredocs) > 0 {
4567 heredocs[len(heredocs)-1].StartOfLine = false
4568 }
4569 {
4570 stack = append(stack, 0)
4571 stack[top] = cs
4572 top++
4573 cs = 1464
4574 goto _again
4575 }
4576 }
4577 case 14:
4578 // line 165 "scan_tokens.rl"
4579
4580 te = p
4581 p--
4582 {
4583 token(TokenTemplateControl)
4584 braces++
4585 retBraces = append(retBraces, braces)
4586 if len(heredocs) > 0 {
4587 heredocs[len(heredocs)-1].StartOfLine = false
4588 }
4589 {
4590 stack = append(stack, 0)
4591 stack[top] = cs
4592 top++
4593 cs = 1464
4594 goto _again
4595 }
4596 }
4597 case 15:
4598 // line 238 "scan_tokens.rl"
4599
4600 te = p
4601 p--
4602 {
4603 token(TokenQuotedLit)
4604 }
4605 case 16:
4606 // line 239 "scan_tokens.rl"
4607
4608 te = p
4609 p--
4610 {
4611 token(TokenInvalid)
4612 }
4613 case 17:
4614 // line 240 "scan_tokens.rl"
4615
4616 te = p
4617 p--
4618 {
4619 token(TokenBadUTF8)
4620 }
4621 case 18:
4622 // line 238 "scan_tokens.rl"
4623
4624 p = (te) - 1
4625 {
4626 token(TokenQuotedLit)
4627 }
4628 case 19:
4629 // line 240 "scan_tokens.rl"
4630
4631 p = (te) - 1
4632 {
4633 token(TokenBadUTF8)
4634 }
4635 case 20:
4636 // line 143 "scan_tokens.rl"
4637
4638 act = 10
4639 case 21:
4640 // line 248 "scan_tokens.rl"
4641
4642 act = 11
4643 case 22:
4644 // line 155 "scan_tokens.rl"
4645
4646 te = p + 1
4647 {
4648 token(TokenTemplateInterp)
4649 braces++
4650 retBraces = append(retBraces, braces)
4651 if len(heredocs) > 0 {
4652 heredocs[len(heredocs)-1].StartOfLine = false
4653 }
4654 {
4655 stack = append(stack, 0)
4656 stack[top] = cs
4657 top++
4658 cs = 1464
4659 goto _again
4660 }
4661 }
4662 case 23:
4663 // line 165 "scan_tokens.rl"
4664
4665 te = p + 1
4666 {
4667 token(TokenTemplateControl)
4668 braces++
4669 retBraces = append(retBraces, braces)
4670 if len(heredocs) > 0 {
4671 heredocs[len(heredocs)-1].StartOfLine = false
4672 }
4673 {
4674 stack = append(stack, 0)
4675 stack[top] = cs
4676 top++
4677 cs = 1464
4678 goto _again
4679 }
4680 }
4681 case 24:
4682 // line 106 "scan_tokens.rl"
4683
4684 te = p + 1
4685 {
4686 // This action is called specificially when a heredoc literal
4687 // ends with a newline character.
4688
4689 // This might actually be our end marker.
4690 topdoc := &heredocs[len(heredocs)-1]
4691 if topdoc.StartOfLine {
4692 maybeMarker := bytes.TrimSpace(data[ts:te])
4693 if bytes.Equal(maybeMarker, topdoc.Marker) {
4694 // We actually emit two tokens here: the end-of-heredoc
4695 // marker first, and then separately the newline that
4696 // follows it. This then avoids issues with the closing
4697 // marker consuming a newline that would normally be used
4698 // to mark the end of an attribute definition.
4699 // We might have either a \n sequence or an \r\n sequence
4700 // here, so we must handle both.
4701 nls := te - 1
4702 nle := te
4703 te--
4704 if data[te-1] == '\r' {
4705 // back up one more byte
4706 nls--
4707 te--
4708 }
4709 token(TokenCHeredoc)
4710 ts = nls
4711 te = nle
4712 token(TokenNewline)
4713 heredocs = heredocs[:len(heredocs)-1]
4714 top--
4715 cs = stack[top]
4716 {
4717 stack = stack[:len(stack)-1]
4718 }
4719 goto _again
4720
4721 }
4722 }
4723
4724 topdoc.StartOfLine = true
4725 token(TokenStringLit)
4726 }
4727 case 25:
4728 // line 248 "scan_tokens.rl"
4729
4730 te = p + 1
4731 {
4732 token(TokenBadUTF8)
4733 }
4734 case 26:
4735 // line 155 "scan_tokens.rl"
4736
4737 te = p
4738 p--
4739 {
4740 token(TokenTemplateInterp)
4741 braces++
4742 retBraces = append(retBraces, braces)
4743 if len(heredocs) > 0 {
4744 heredocs[len(heredocs)-1].StartOfLine = false
4745 }
4746 {
4747 stack = append(stack, 0)
4748 stack[top] = cs
4749 top++
4750 cs = 1464
4751 goto _again
4752 }
4753 }
4754 case 27:
4755 // line 165 "scan_tokens.rl"
4756
4757 te = p
4758 p--
4759 {
4760 token(TokenTemplateControl)
4761 braces++
4762 retBraces = append(retBraces, braces)
4763 if len(heredocs) > 0 {
4764 heredocs[len(heredocs)-1].StartOfLine = false
4765 }
4766 {
4767 stack = append(stack, 0)
4768 stack[top] = cs
4769 top++
4770 cs = 1464
4771 goto _again
4772 }
4773 }
4774 case 28:
4775 // line 143 "scan_tokens.rl"
4776
4777 te = p
4778 p--
4779 {
4780 // This action is called when a heredoc literal _doesn't_ end
4781 // with a newline character, e.g. because we're about to enter
4782 // an interpolation sequence.
4783 heredocs[len(heredocs)-1].StartOfLine = false
4784 token(TokenStringLit)
4785 }
4786 case 29:
4787 // line 248 "scan_tokens.rl"
4788
4789 te = p
4790 p--
4791 {
4792 token(TokenBadUTF8)
4793 }
4794 case 30:
4795 // line 143 "scan_tokens.rl"
4796
4797 p = (te) - 1
4798 {
4799 // This action is called when a heredoc literal _doesn't_ end
4800 // with a newline character, e.g. because we're about to enter
4801 // an interpolation sequence.
4802 heredocs[len(heredocs)-1].StartOfLine = false
4803 token(TokenStringLit)
4804 }
4805 case 31:
4806 // line 1 "NONE"
4807
4808 switch act {
4809 case 0:
4810 {
4811 cs = 0
4812 goto _again
4813 }
4814 case 10:
4815 {
4816 p = (te) - 1
4817
4818 // This action is called when a heredoc literal _doesn't_ end
4819 // with a newline character, e.g. because we're about to enter
4820 // an interpolation sequence.
4821 heredocs[len(heredocs)-1].StartOfLine = false
4822 token(TokenStringLit)
4823 }
4824 case 11:
4825 {
4826 p = (te) - 1
4827 token(TokenBadUTF8)
4828 }
4829 }
4830
4831 case 32:
4832 // line 151 "scan_tokens.rl"
4833
4834 act = 14
4835 case 33:
4836 // line 255 "scan_tokens.rl"
4837
4838 act = 15
4839 case 34:
4840 // line 155 "scan_tokens.rl"
4841
4842 te = p + 1
4843 {
4844 token(TokenTemplateInterp)
4845 braces++
4846 retBraces = append(retBraces, braces)
4847 if len(heredocs) > 0 {
4848 heredocs[len(heredocs)-1].StartOfLine = false
4849 }
4850 {
4851 stack = append(stack, 0)
4852 stack[top] = cs
4853 top++
4854 cs = 1464
4855 goto _again
4856 }
4857 }
4858 case 35:
4859 // line 165 "scan_tokens.rl"
4860
4861 te = p + 1
4862 {
4863 token(TokenTemplateControl)
4864 braces++
4865 retBraces = append(retBraces, braces)
4866 if len(heredocs) > 0 {
4867 heredocs[len(heredocs)-1].StartOfLine = false
4868 }
4869 {
4870 stack = append(stack, 0)
4871 stack[top] = cs
4872 top++
4873 cs = 1464
4874 goto _again
4875 }
4876 }
4877 case 36:
4878 // line 151 "scan_tokens.rl"
4879
4880 te = p + 1
4881 {
4882 token(TokenStringLit)
4883 }
4884 case 37:
4885 // line 255 "scan_tokens.rl"
4886
4887 te = p + 1
4888 {
4889 token(TokenBadUTF8)
4890 }
4891 case 38:
4892 // line 155 "scan_tokens.rl"
4893
4894 te = p
4895 p--
4896 {
4897 token(TokenTemplateInterp)
4898 braces++
4899 retBraces = append(retBraces, braces)
4900 if len(heredocs) > 0 {
4901 heredocs[len(heredocs)-1].StartOfLine = false
4902 }
4903 {
4904 stack = append(stack, 0)
4905 stack[top] = cs
4906 top++
4907 cs = 1464
4908 goto _again
4909 }
4910 }
4911 case 39:
4912 // line 165 "scan_tokens.rl"
4913
4914 te = p
4915 p--
4916 {
4917 token(TokenTemplateControl)
4918 braces++
4919 retBraces = append(retBraces, braces)
4920 if len(heredocs) > 0 {
4921 heredocs[len(heredocs)-1].StartOfLine = false
4922 }
4923 {
4924 stack = append(stack, 0)
4925 stack[top] = cs
4926 top++
4927 cs = 1464
4928 goto _again
4929 }
4930 }
4931 case 40:
4932 // line 151 "scan_tokens.rl"
4933
4934 te = p
4935 p--
4936 {
4937 token(TokenStringLit)
4938 }
4939 case 41:
4940 // line 255 "scan_tokens.rl"
4941
4942 te = p
4943 p--
4944 {
4945 token(TokenBadUTF8)
4946 }
4947 case 42:
4948 // line 151 "scan_tokens.rl"
4949
4950 p = (te) - 1
4951 {
4952 token(TokenStringLit)
4953 }
4954 case 43:
4955 // line 1 "NONE"
4956
4957 switch act {
4958 case 0:
4959 {
4960 cs = 0
4961 goto _again
4962 }
4963 case 14:
4964 {
4965 p = (te) - 1
4966
4967 token(TokenStringLit)
4968 }
4969 case 15:
4970 {
4971 p = (te) - 1
4972 token(TokenBadUTF8)
4973 }
4974 }
4975
4976 case 44:
4977 // line 259 "scan_tokens.rl"
4978
4979 act = 16
4980 case 45:
4981 // line 260 "scan_tokens.rl"
4982
4983 act = 17
4984 case 46:
4985 // line 260 "scan_tokens.rl"
4986
4987 te = p + 1
4988 {
4989 token(TokenBadUTF8)
4990 }
4991 case 47:
4992 // line 261 "scan_tokens.rl"
4993
4994 te = p + 1
4995 {
4996 token(TokenInvalid)
4997 }
4998 case 48:
4999 // line 259 "scan_tokens.rl"
5000
5001 te = p
5002 p--
5003 {
5004 token(TokenIdent)
5005 }
5006 case 49:
5007 // line 260 "scan_tokens.rl"
5008
5009 te = p
5010 p--
5011 {
5012 token(TokenBadUTF8)
5013 }
5014 case 50:
5015 // line 259 "scan_tokens.rl"
5016
5017 p = (te) - 1
5018 {
5019 token(TokenIdent)
5020 }
5021 case 51:
5022 // line 260 "scan_tokens.rl"
5023
5024 p = (te) - 1
5025 {
5026 token(TokenBadUTF8)
5027 }
5028 case 52:
5029 // line 1 "NONE"
5030
5031 switch act {
5032 case 16:
5033 {
5034 p = (te) - 1
5035 token(TokenIdent)
5036 }
5037 case 17:
5038 {
5039 p = (te) - 1
5040 token(TokenBadUTF8)
5041 }
5042 }
5043
5044 case 53:
5045 // line 267 "scan_tokens.rl"
5046
5047 act = 21
5048 case 54:
5049 // line 269 "scan_tokens.rl"
5050
5051 act = 22
5052 case 55:
5053 // line 280 "scan_tokens.rl"
5054
5055 act = 32
5056 case 56:
5057 // line 290 "scan_tokens.rl"
5058
5059 act = 38
5060 case 57:
5061 // line 291 "scan_tokens.rl"
5062
5063 act = 39
5064 case 58:
5065 // line 269 "scan_tokens.rl"
5066
5067 te = p + 1
5068 {
5069 token(TokenComment)
5070 }
5071 case 59:
5072 // line 270 "scan_tokens.rl"
5073
5074 te = p + 1
5075 {
5076 token(TokenNewline)
5077 }
5078 case 60:
5079 // line 272 "scan_tokens.rl"
5080
5081 te = p + 1
5082 {
5083 token(TokenEqualOp)
5084 }
5085 case 61:
5086 // line 273 "scan_tokens.rl"
5087
5088 te = p + 1
5089 {
5090 token(TokenNotEqual)
5091 }
5092 case 62:
5093 // line 274 "scan_tokens.rl"
5094
5095 te = p + 1
5096 {
5097 token(TokenGreaterThanEq)
5098 }
5099 case 63:
5100 // line 275 "scan_tokens.rl"
5101
5102 te = p + 1
5103 {
5104 token(TokenLessThanEq)
5105 }
5106 case 64:
5107 // line 276 "scan_tokens.rl"
5108
5109 te = p + 1
5110 {
5111 token(TokenAnd)
5112 }
5113 case 65:
5114 // line 277 "scan_tokens.rl"
5115
5116 te = p + 1
5117 {
5118 token(TokenOr)
5119 }
5120 case 66:
5121 // line 278 "scan_tokens.rl"
5122
5123 te = p + 1
5124 {
5125 token(TokenEllipsis)
5126 }
5127 case 67:
5128 // line 279 "scan_tokens.rl"
5129
5130 te = p + 1
5131 {
5132 token(TokenFatArrow)
5133 }
5134 case 68:
5135 // line 280 "scan_tokens.rl"
5136
5137 te = p + 1
5138 {
5139 selfToken()
5140 }
5141 case 69:
5142 // line 175 "scan_tokens.rl"
5143
5144 te = p + 1
5145 {
5146 token(TokenOBrace)
5147 braces++
5148 }
5149 case 70:
5150 // line 180 "scan_tokens.rl"
5151
5152 te = p + 1
5153 {
5154 if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
5155 token(TokenTemplateSeqEnd)
5156 braces--
5157 retBraces = retBraces[0 : len(retBraces)-1]
5158 top--
5159 cs = stack[top]
5160 {
5161 stack = stack[:len(stack)-1]
5162 }
5163 goto _again
5164
5165 } else {
5166 token(TokenCBrace)
5167 braces--
5168 }
5169 }
5170 case 71:
5171 // line 192 "scan_tokens.rl"
5172
5173 te = p + 1
5174 {
5175 // Only consume from the retBraces stack and return if we are at
5176 // a suitable brace nesting level, otherwise things will get
5177 // confused. (Not entering this branch indicates a syntax error,
5178 // which we will catch in the parser.)
5179 if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
5180 token(TokenTemplateSeqEnd)
5181 braces--
5182 retBraces = retBraces[0 : len(retBraces)-1]
5183 top--
5184 cs = stack[top]
5185 {
5186 stack = stack[:len(stack)-1]
5187 }
5188 goto _again
5189
5190 } else {
5191 // We intentionally generate a TokenTemplateSeqEnd here,
5192 // even though the user apparently wanted a brace, because
5193 // we want to allow the parser to catch the incorrect use
5194 // of a ~} to balance a generic opening brace, rather than
5195 // a template sequence.
5196 token(TokenTemplateSeqEnd)
5197 braces--
5198 }
5199 }
5200 case 72:
5201 // line 74 "scan_tokens.rl"
5202
5203 te = p + 1
5204 {
5205 token(TokenOQuote)
5206 {
5207 stack = append(stack, 0)
5208 stack[top] = cs
5209 top++
5210 cs = 1515
5211 goto _again
5212 }
5213 }
5214 case 73:
5215 // line 84 "scan_tokens.rl"
5216
5217 te = p + 1
5218 {
5219 token(TokenOHeredoc)
5220 // the token is currently the whole heredoc introducer, like
5221 // <<EOT or <<-EOT, followed by a newline. We want to extract
5222 // just the "EOT" portion that we'll use as the closing marker.
5223
5224 marker := data[ts+2 : te-1]
5225 if marker[0] == '-' {
5226 marker = marker[1:]
5227 }
5228 if marker[len(marker)-1] == '\r' {
5229 marker = marker[:len(marker)-1]
5230 }
5231
5232 heredocs = append(heredocs, heredocInProgress{
5233 Marker: marker,
5234 StartOfLine: true,
5235 })
5236
5237 {
5238 stack = append(stack, 0)
5239 stack[top] = cs
5240 top++
5241 cs = 1541
5242 goto _again
5243 }
5244 }
5245 case 74:
5246 // line 290 "scan_tokens.rl"
5247
5248 te = p + 1
5249 {
5250 token(TokenBadUTF8)
5251 }
5252 case 75:
5253 // line 291 "scan_tokens.rl"
5254
5255 te = p + 1
5256 {
5257 token(TokenInvalid)
5258 }
5259 case 76:
5260 // line 265 "scan_tokens.rl"
5261
5262 te = p
5263 p--
5264
5265 case 77:
5266 // line 266 "scan_tokens.rl"
5267
5268 te = p
5269 p--
5270 {
5271 token(TokenNumberLit)
5272 }
5273 case 78:
5274 // line 267 "scan_tokens.rl"
5275
5276 te = p
5277 p--
5278 {
5279 token(TokenIdent)
5280 }
5281 case 79:
5282 // line 269 "scan_tokens.rl"
5283
5284 te = p
5285 p--
5286 {
5287 token(TokenComment)
5288 }
5289 case 80:
5290 // line 280 "scan_tokens.rl"
5291
5292 te = p
5293 p--
5294 {
5295 selfToken()
5296 }
5297 case 81:
5298 // line 290 "scan_tokens.rl"
5299
5300 te = p
5301 p--
5302 {
5303 token(TokenBadUTF8)
5304 }
5305 case 82:
5306 // line 291 "scan_tokens.rl"
5307
5308 te = p
5309 p--
5310 {
5311 token(TokenInvalid)
5312 }
5313 case 83:
5314 // line 266 "scan_tokens.rl"
5315
5316 p = (te) - 1
5317 {
5318 token(TokenNumberLit)
5319 }
5320 case 84:
5321 // line 267 "scan_tokens.rl"
5322
5323 p = (te) - 1
5324 {
5325 token(TokenIdent)
5326 }
5327 case 85:
5328 // line 280 "scan_tokens.rl"
5329
5330 p = (te) - 1
5331 {
5332 selfToken()
5333 }
5334 case 86:
5335 // line 290 "scan_tokens.rl"
5336
5337 p = (te) - 1
5338 {
5339 token(TokenBadUTF8)
5340 }
5341 case 87:
5342 // line 1 "NONE"
5343
5344 switch act {
5345 case 21:
5346 {
5347 p = (te) - 1
5348 token(TokenIdent)
5349 }
5350 case 22:
5351 {
5352 p = (te) - 1
5353 token(TokenComment)
5354 }
5355 case 32:
5356 {
5357 p = (te) - 1
5358 selfToken()
5359 }
5360 case 38:
5361 {
5362 p = (te) - 1
5363 token(TokenBadUTF8)
5364 }
5365 case 39:
5366 {
5367 p = (te) - 1
5368 token(TokenInvalid)
5369 }
5370 }
5371
5372 // line 5232 "scan_tokens.go"
5373 }
5374 }
5375
5376 _again:
5377 _acts = int(_hcltok_to_state_actions[cs])
5378 _nacts = uint(_hcltok_actions[_acts])
5379 _acts++
5380 for ; _nacts > 0; _nacts-- {
5381 _acts++
5382 switch _hcltok_actions[_acts-1] {
5383 case 4:
5384 // line 1 "NONE"
5385
5386 ts = 0
5387
5388 case 5:
5389 // line 1 "NONE"
5390
5391 act = 0
5392
5393 // line 5252 "scan_tokens.go"
5394 }
5395 }
5396
5397 if cs == 0 {
5398 goto _out
5399 }
5400 p++
5401 if p != pe {
5402 goto _resume
5403 }
5404 _test_eof:
5405 {
5406 }
5407 if p == eof {
5408 if _hcltok_eof_trans[cs] > 0 {
5409 _trans = int(_hcltok_eof_trans[cs] - 1)
5410 goto _eof_trans
5411 }
5412 }
5413
5414 _out:
5415 {
5416 }
5417 }
5418
5419 // line 352 "scan_tokens.rl"
5420
5421 // If we fall out here without being in a final state then we've
5422 // encountered something that the scanner can't match, which we'll
5423 // deal with as an invalid.
5424 if cs < hcltok_first_final {
5425 if mode == scanTemplate && len(stack) == 0 {
5426 // If we're scanning a bare template then any straggling
5427 // top-level stuff is actually literal string, rather than
5428 // invalid. This handles the case where the template ends
5429 // with a single "$" or "%", which trips us up because we
5430 // want to see another character to decide if it's a sequence
5431 // or an escape.
5432 f.emitToken(TokenStringLit, ts, len(data))
5433 } else {
5434 f.emitToken(TokenInvalid, ts, len(data))
5435 }
5436 }
5437
5438 // We always emit a synthetic EOF token at the end, since it gives the
5439 // parser position information for an "unexpected EOF" diagnostic.
5440 f.emitToken(TokenEOF, len(data), len(data))
5441
5442 return f.Tokens
5443}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl
new file mode 100644
index 0000000..83ef65b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl
@@ -0,0 +1,376 @@
1
2package hclsyntax
3
4import (
5 "bytes"
6
7 "github.com/hashicorp/hcl2/hcl"
8)
9
10// This file is generated from scan_tokens.rl. DO NOT EDIT.
11%%{
12 # (except you are actually in scan_tokens.rl here, so edit away!)
13
14 machine hcltok;
15 write data;
16}%%
17
18func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token {
19 f := &tokenAccum{
20 Filename: filename,
21 Bytes: data,
22 Pos: start,
23 }
24
25 %%{
26 include UnicodeDerived "unicode_derived.rl";
27
28 UTF8Cont = 0x80 .. 0xBF;
29 AnyUTF8 = (
30 0x00..0x7F |
31 0xC0..0xDF . UTF8Cont |
32 0xE0..0xEF . UTF8Cont . UTF8Cont |
33 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
34 );
35 BrokenUTF8 = any - AnyUTF8;
36
37 NumberLitContinue = (digit|'.'|('e'|'E') ('+'|'-')? digit);
38 NumberLit = digit ("" | (NumberLitContinue - '.') | (NumberLitContinue* (NumberLitContinue - '.')));
39 Ident = (ID_Start | '_') (ID_Continue | '-')*;
40
41 # Symbols that just represent themselves are handled as a single rule.
42 SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`";
43
44 EqualOp = "==";
45 NotEqual = "!=";
46 GreaterThanEqual = ">=";
47 LessThanEqual = "<=";
48 LogicalAnd = "&&";
49 LogicalOr = "||";
50
51 Ellipsis = "...";
52 FatArrow = "=>";
53
54 Newline = '\r' ? '\n';
55 EndOfLine = Newline;
56
57 BeginStringTmpl = '"';
58 BeginHeredocTmpl = '<<' ('-')? Ident Newline;
59
60 Comment = (
61 ("#" (any - EndOfLine)* EndOfLine) |
62 ("//" (any - EndOfLine)* EndOfLine) |
63 ("/*" any* "*/")
64 );
65
66 # Note: hclwrite assumes that only ASCII spaces appear between tokens,
67 # and uses this assumption to recreate the spaces between tokens by
68 # looking at byte offset differences. This means it will produce
69 # incorrect results in the presence of tabs, but that's acceptable
70 # because the canonical style (which hclwrite itself can impose
71 # automatically is to never use tabs).
72 Spaces = (' ' | 0x09)+;
73
74 action beginStringTemplate {
75 token(TokenOQuote);
76 fcall stringTemplate;
77 }
78
79 action endStringTemplate {
80 token(TokenCQuote);
81 fret;
82 }
83
84 action beginHeredocTemplate {
85 token(TokenOHeredoc);
86 // the token is currently the whole heredoc introducer, like
87 // <<EOT or <<-EOT, followed by a newline. We want to extract
88 // just the "EOT" portion that we'll use as the closing marker.
89
90 marker := data[ts+2:te-1]
91 if marker[0] == '-' {
92 marker = marker[1:]
93 }
94 if marker[len(marker)-1] == '\r' {
95 marker = marker[:len(marker)-1]
96 }
97
98 heredocs = append(heredocs, heredocInProgress{
99 Marker: marker,
100 StartOfLine: true,
101 })
102
103 fcall heredocTemplate;
104 }
105
106 action heredocLiteralEOL {
107 // This action is called specificially when a heredoc literal
108 // ends with a newline character.
109
110 // This might actually be our end marker.
111 topdoc := &heredocs[len(heredocs)-1]
112 if topdoc.StartOfLine {
113 maybeMarker := bytes.TrimSpace(data[ts:te])
114 if bytes.Equal(maybeMarker, topdoc.Marker) {
115 // We actually emit two tokens here: the end-of-heredoc
116 // marker first, and then separately the newline that
117 // follows it. This then avoids issues with the closing
118 // marker consuming a newline that would normally be used
119 // to mark the end of an attribute definition.
120 // We might have either a \n sequence or an \r\n sequence
121 // here, so we must handle both.
122 nls := te-1
123 nle := te
124 te--
125 if data[te-1] == '\r' {
126 // back up one more byte
127 nls--
128 te--
129 }
130 token(TokenCHeredoc);
131 ts = nls
132 te = nle
133 token(TokenNewline);
134 heredocs = heredocs[:len(heredocs)-1]
135 fret;
136 }
137 }
138
139 topdoc.StartOfLine = true;
140 token(TokenStringLit);
141 }
142
143 action heredocLiteralMidline {
144 // This action is called when a heredoc literal _doesn't_ end
145 // with a newline character, e.g. because we're about to enter
146 // an interpolation sequence.
147 heredocs[len(heredocs)-1].StartOfLine = false;
148 token(TokenStringLit);
149 }
150
151 action bareTemplateLiteral {
152 token(TokenStringLit);
153 }
154
155 action beginTemplateInterp {
156 token(TokenTemplateInterp);
157 braces++;
158 retBraces = append(retBraces, braces);
159 if len(heredocs) > 0 {
160 heredocs[len(heredocs)-1].StartOfLine = false;
161 }
162 fcall main;
163 }
164
165 action beginTemplateControl {
166 token(TokenTemplateControl);
167 braces++;
168 retBraces = append(retBraces, braces);
169 if len(heredocs) > 0 {
170 heredocs[len(heredocs)-1].StartOfLine = false;
171 }
172 fcall main;
173 }
174
175 action openBrace {
176 token(TokenOBrace);
177 braces++;
178 }
179
180 action closeBrace {
181 if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
182 token(TokenTemplateSeqEnd);
183 braces--;
184 retBraces = retBraces[0:len(retBraces)-1]
185 fret;
186 } else {
187 token(TokenCBrace);
188 braces--;
189 }
190 }
191
192 action closeTemplateSeqEatWhitespace {
193 // Only consume from the retBraces stack and return if we are at
194 // a suitable brace nesting level, otherwise things will get
195 // confused. (Not entering this branch indicates a syntax error,
196 // which we will catch in the parser.)
197 if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
198 token(TokenTemplateSeqEnd);
199 braces--;
200 retBraces = retBraces[0:len(retBraces)-1]
201 fret;
202 } else {
203 // We intentionally generate a TokenTemplateSeqEnd here,
204 // even though the user apparently wanted a brace, because
205 // we want to allow the parser to catch the incorrect use
206 // of a ~} to balance a generic opening brace, rather than
207 // a template sequence.
208 token(TokenTemplateSeqEnd);
209 braces--;
210 }
211 }
212
213 TemplateInterp = "${" ("~")?;
214 TemplateControl = "%{" ("~")?;
215 EndStringTmpl = '"';
216 StringLiteralChars = (AnyUTF8 - ("\r"|"\n"));
217 TemplateStringLiteral = (
218 ('$' ^'{' %{ fhold; }) |
219 ('%' ^'{' %{ fhold; }) |
220 ('\\' StringLiteralChars) |
221 (StringLiteralChars - ("$" | '%' | '"'))
222 )+;
223 HeredocStringLiteral = (
224 ('$' ^'{' %{ fhold; }) |
225 ('%' ^'{' %{ fhold; }) |
226 (StringLiteralChars - ("$" | '%'))
227 )*;
228 BareStringLiteral = (
229 ('$' ^'{') |
230 ('%' ^'{') |
231 (StringLiteralChars - ("$" | '%'))
232 )* Newline?;
233
234 stringTemplate := |*
235 TemplateInterp => beginTemplateInterp;
236 TemplateControl => beginTemplateControl;
237 EndStringTmpl => endStringTemplate;
238 TemplateStringLiteral => { token(TokenQuotedLit); };
239 AnyUTF8 => { token(TokenInvalid); };
240 BrokenUTF8 => { token(TokenBadUTF8); };
241 *|;
242
243 heredocTemplate := |*
244 TemplateInterp => beginTemplateInterp;
245 TemplateControl => beginTemplateControl;
246 HeredocStringLiteral EndOfLine => heredocLiteralEOL;
247 HeredocStringLiteral => heredocLiteralMidline;
248 BrokenUTF8 => { token(TokenBadUTF8); };
249 *|;
250
251 bareTemplate := |*
252 TemplateInterp => beginTemplateInterp;
253 TemplateControl => beginTemplateControl;
254 BareStringLiteral => bareTemplateLiteral;
255 BrokenUTF8 => { token(TokenBadUTF8); };
256 *|;
257
258 identOnly := |*
259 Ident => { token(TokenIdent) };
260 BrokenUTF8 => { token(TokenBadUTF8) };
261 AnyUTF8 => { token(TokenInvalid) };
262 *|;
263
264 main := |*
265 Spaces => {};
266 NumberLit => { token(TokenNumberLit) };
267 Ident => { token(TokenIdent) };
268
269 Comment => { token(TokenComment) };
270 Newline => { token(TokenNewline) };
271
272 EqualOp => { token(TokenEqualOp); };
273 NotEqual => { token(TokenNotEqual); };
274 GreaterThanEqual => { token(TokenGreaterThanEq); };
275 LessThanEqual => { token(TokenLessThanEq); };
276 LogicalAnd => { token(TokenAnd); };
277 LogicalOr => { token(TokenOr); };
278 Ellipsis => { token(TokenEllipsis); };
279 FatArrow => { token(TokenFatArrow); };
280 SelfToken => { selfToken() };
281
282 "{" => openBrace;
283 "}" => closeBrace;
284
285 "~}" => closeTemplateSeqEatWhitespace;
286
287 BeginStringTmpl => beginStringTemplate;
288 BeginHeredocTmpl => beginHeredocTemplate;
289
290 BrokenUTF8 => { token(TokenBadUTF8) };
291 AnyUTF8 => { token(TokenInvalid) };
292 *|;
293
294 }%%
295
296 // Ragel state
297 p := 0 // "Pointer" into data
298 pe := len(data) // End-of-data "pointer"
299 ts := 0
300 te := 0
301 act := 0
302 eof := pe
303 var stack []int
304 var top int
305
306 var cs int // current state
307 switch mode {
308 case scanNormal:
309 cs = hcltok_en_main
310 case scanTemplate:
311 cs = hcltok_en_bareTemplate
312 case scanIdentOnly:
313 cs = hcltok_en_identOnly
314 default:
315 panic("invalid scanMode")
316 }
317
318 braces := 0
319 var retBraces []int // stack of brace levels that cause us to use fret
320 var heredocs []heredocInProgress // stack of heredocs we're currently processing
321
322 %%{
323 prepush {
324 stack = append(stack, 0);
325 }
326 postpop {
327 stack = stack[:len(stack)-1];
328 }
329 }%%
330
331 // Make Go compiler happy
332 _ = ts
333 _ = te
334 _ = act
335 _ = eof
336
337 token := func (ty TokenType) {
338 f.emitToken(ty, ts, te)
339 }
340 selfToken := func () {
341 b := data[ts:te]
342 if len(b) != 1 {
343 // should never happen
344 panic("selfToken only works for single-character tokens")
345 }
346 f.emitToken(TokenType(b[0]), ts, te)
347 }
348
349 %%{
350 write init nocs;
351 write exec;
352 }%%
353
354 // If we fall out here without being in a final state then we've
355 // encountered something that the scanner can't match, which we'll
356 // deal with as an invalid.
357 if cs < hcltok_first_final {
358 if mode == scanTemplate && len(stack) == 0 {
359 // If we're scanning a bare template then any straggling
360 // top-level stuff is actually literal string, rather than
361 // invalid. This handles the case where the template ends
362 // with a single "$" or "%", which trips us up because we
363 // want to see another character to decide if it's a sequence
364 // or an escape.
365 f.emitToken(TokenStringLit, ts, len(data))
366 } else {
367 f.emitToken(TokenInvalid, ts, len(data))
368 }
369 }
370
371 // We always emit a synthetic EOF token at the end, since it gives the
372 // parser position information for an "unexpected EOF" diagnostic.
373 f.emitToken(TokenEOF, len(data), len(data))
374
375 return f.Tokens
376}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md
new file mode 100644
index 0000000..49b9a3e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md
@@ -0,0 +1,923 @@
1# HCL Native Syntax Specification
2
3This is the specification of the syntax and semantics of the native syntax
4for HCL. HCL is a system for defining configuration languages for applications.
5The HCL information model is designed to support multiple concrete syntaxes
6for configuration, but this native syntax is considered the primary format
7and is optimized for human authoring and maintenence, as opposed to machine
8generation of configuration.
9
10The language consists of three integrated sub-languages:
11
12* The _structural_ language defines the overall heirarchical configuration
13 structure, and is a serialization of HCL bodies, blocks and attributes.
14
15* The _expression_ language is used to express attribute values, either as
16 literals or as derivations of other values.
17
18* The _template_ language is used to compose values together into strings,
19 as one of several types of expression in the expression language.
20
21In normal use these three sub-languages are used together within configuration
22files to describe an overall configuration, with the structural language
23being used at the top level. The expression and template languages can also
24be used in isolation, to implement features such as REPLs, debuggers, and
25integration into more limited HCL syntaxes such as the JSON profile.
26
27## Syntax Notation
28
29Within this specification a semi-formal notation is used to illustrate the
30details of syntax. This notation is intended for human consumption rather
31than machine consumption, with the following conventions:
32
33* A naked name starting with an uppercase letter is a global production,
34 common to all of the syntax specifications in this document.
35* A naked name starting with a lowercase letter is a local production,
36 meaningful only within the specification where it is defined.
37* Double and single quotes (`"` and `'`) are used to mark literal character
38 sequences, which may be either punctuation markers or keywords.
39* The default operator for combining items, which has no punctuation,
40 is concatenation.
41* The symbol `|` indicates that any one of its left and right operands may
42 be present.
43* The `*` symbol indicates zero or more repetitions of the item to its left.
44* The `?` symbol indicates zero or one of the item to its left.
45* Parentheses (`(` and `)`) are used to group items together to apply
46 the `|`, `*` and `?` operators to them collectively.
47
48The grammar notation does not fully describe the language. The prose may
49augment or conflict with the illustrated grammar. In case of conflict, prose
50has priority.
51
52## Source Code Representation
53
54Source code is unicode text expressed in the UTF-8 encoding. The language
55itself does not perform unicode normalization, so syntax features such as
56identifiers are sequences of unicode code points and so e.g. a precombined
57accented character is distinct from a letter associated with a combining
58accent. (String literals have some special handling with regard to Unicode
59normalization which will be covered later in the relevant section.)
60
61UTF-8 encoded Unicode byte order marks are not permitted. Invalid or
62non-normalized UTF-8 encoding is always a parse error.
63
64## Lexical Elements
65
66### Comments and Whitespace
67
68Comments and Whitespace are recognized as lexical elements but are ignored
69except as described below.
70
71Whitespace is defined as a sequence of zero or more space characters
72(U+0020). Newline sequences (either U+000A or U+000D followed by U+000A)
73are _not_ considered whitespace but are ignored as such in certain contexts.
74
75Horizontal tab characters (U+0009) are not considered to be whitespace and
76are not valid within HCL native syntax.
77
78Comments serve as program documentation and come in two forms:
79
80* _Line comments_ start with either the `//` or `#` sequences and end with
81 the next newline sequence. A line comments is considered equivalent to a
82 newline sequence.
83
84* _Inline comments_ start with the `/*` sequence and end with the `*/`
85 sequence, and may have any characters within except the ending sequence.
86 An inline comments is considered equivalent to a whitespace sequence.
87
88Comments and whitespace cannot begin within within other comments, or within
89template literals except inside an interpolation sequence or template directive.
90
91### Identifiers
92
93Identifiers name entities such as blocks, attributes and expression variables.
94Identifiers are interpreted as per [UAX #31][UAX31] Section 2. Specifically,
95their syntax is defined in terms of the `ID_Start` and `ID_Continue`
96character properties as follows:
97
98```ebnf
99Identifier = ID_Start (ID_Continue | '-')*;
100```
101
102The Unicode specification provides the normative requirements for identifier
103parsing. Non-normatively, the spirit of this specification is that `ID_Start`
104consists of Unicode letter and certain unambiguous punctuation tokens, while
105`ID_Continue` augments that set with Unicode digits, combining marks, etc.
106
107The dash character `-` is additionally allowed in identifiers, even though
108that is not part of the unicode `ID_Continue` definition. This is to allow
109attribute names and block type names to contain dashes, although underscores
110as word separators are considered the idiomatic usage.
111
112[UAX31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax"
113
114### Keywords
115
116There are no globally-reserved words, but in some contexts certain identifiers
117are reserved to function as keywords. These are discussed further in the
118relevant documentation sections that follow. In such situations, the
119identifier's role as a keyword supersedes any other valid interpretation that
120may be possible. Outside of these specific situations, the keywords have no
121special meaning and are interpreted as regular identifiers.
122
123### Operators and Delimiters
124
125The following character sequences represent operators, delimiters, and other
126special tokens:
127
128```
129+ && == < : { [ ( ${
130- || != > ? } ] ) %{
131* ! <= = .
132/ >= => ,
133% ...
134```
135
136### Numeric Literals
137
138A numeric literal is a decimal representation of a
139real number. It has an integer part, a fractional part,
140and an exponent part.
141
142```ebnf
143NumericLit = decimal+ ("." decimal+)? (expmark decimal+)?;
144decimal = '0' .. '9';
145expmark = ('e' | 'E') ("+" | "-")?;
146```
147
148## Structural Elements
149
150The structural language consists of syntax representing the following
151constructs:
152
153* _Attributes_, which assign a value to a specified name.
154* _Blocks_, which create a child body annotated by a type and optional labels.
155* _Body Content_, which consists of a collection of attributes and blocks.
156
157These constructs correspond to the similarly-named concepts in the
158language-agnostic HCL information model.
159
160```ebnf
161ConfigFile = Body;
162Body = (Attribute | Block)*;
163Attribute = Identifier "=" Expression Newline;
164Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline;
165```
166
167### Configuration Files
168
169A _configuration file_ is a sequence of characters whose top-level is
170interpreted as a Body.
171
172### Bodies
173
174A _body_ is a collection of associated attributes and blocks. The meaning of
175this association is defined by the calling application.
176
177### Attribute Definitions
178
179An _attribute definition_ assigns a value to a particular attribute name within
180a body. Each distinct attribute name may be defined no more than once within a
181single body.
182
183The attribute value is given as an expression, which is retained literally
184for later evaluation by the calling application.
185
186### Blocks
187
188A _block_ creates a child body that is annotated with a block _type_ and
189zero or more block _labels_. Blocks create a structural heirachy which can be
190interpreted by the calling application.
191
192Block labels can either be quoted literal strings or naked identifiers.
193
194## Expressions
195
196The expression sub-language is used within attribute definitions to specify
197values.
198
199```ebnf
200Expression = (
201 ExprTerm |
202 Operation |
203 Conditional
204);
205```
206
207### Types
208
209The value types used within the expression language are those defined by the
210syntax-agnostic HCL information model. An expression may return any valid
211type, but only a subset of the available types have first-class syntax.
212A calling application may make other types available via _variables_ and
213_functions_.
214
215### Expression Terms
216
217Expression _terms_ are the operands for unary and binary expressions, as well
218as acting as expressions in their own right.
219
220```ebnf
221ExprTerm = (
222 LiteralValue |
223 CollectionValue |
224 TemplateExpr |
225 VariableExpr |
226 FunctionCall |
227 ForExpr |
228 ExprTerm Index |
229 ExprTerm GetAttr |
230 ExprTerm Splat |
231 "(" Expression ")"
232);
233```
234
235The productions for these different term types are given in their corresponding
236sections.
237
238Between the `(` and `)` characters denoting a sub-expression, newline
239characters are ignored as whitespace.
240
241### Literal Values
242
243A _literal value_ immediately represents a particular value of a primitive
244type.
245
246```ebnf
247LiteralValue = (
248 NumericLit |
249 "true" |
250 "false" |
251 "null"
252);
253```
254
255* Numeric literals represent values of type _number_.
256* The `true` and `false` keywords represent values of type _bool_.
257* The `null` keyword represents a null value of the dynamic pseudo-type.
258
259String literals are not directly available in the expression sub-language, but
260are available via the template sub-language, which can in turn be incorporated
261via _template expressions_.
262
263### Collection Values
264
265A _collection value_ combines zero or more other expressions to produce a
266collection value.
267
268```ebnf
269CollectionValue = tuple | object;
270tuple = "[" (
271 (Expression ("," Expression)* ","?)?
272) "]";
273object = "{" (
274 (objectelem ("," objectelem)* ","?)?
275) "}";
276objectelem = (Identifier | Expression) "=" Expression;
277```
278
279Only tuple and object values can be directly constructed via native syntax.
280Tuple and object values can in turn be converted to list, set and map values
281with other operations, which behaves as defined by the syntax-agnostic HCL
282information model.
283
284When specifying an object element, an identifier is interpreted as a literal
285attribute name as opposed to a variable reference. To populate an item key
286from a variable, use parentheses to disambiguate:
287
288* `{foo = "baz"}` is interpreted as an attribute literally named `foo`.
289* `{(foo) = "baz"}` is interpreted as an attribute whose name is taken
290 from the variable named `foo`.
291
292Between the open and closing delimiters of these sequences, newline sequences
293are ignored as whitespace.
294
295There is a syntax ambiguity between _for expressions_ and collection values
296whose first element is a reference to a variable named `for`. The
297_for expression_ interpretation has priority, so to produce a tuple whose
298first element is the value of a variable named `for`, or an object with a
299key named `for`, use paretheses to disambiguate:
300
301* `[for, foo, baz]` is a syntax error.
302* `[(for), foo, baz]` is a tuple whose first element is the value of variable
303 `for`.
304* `{for: 1, baz: 2}` is a syntax error.
305* `{(for): 1, baz: 2}` is an object with an attribute literally named `for`.
306* `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the
307 ambiguity by reordering.
308
309### Template Expressions
310
311A _template expression_ embeds a program written in the template sub-language
312as an expression. Template expressions come in two forms:
313
314* A _quoted_ template expression is delimited by quote characters (`"`) and
315 defines a template as a single-line expression with escape characters.
316* A _heredoc_ template expression is introduced by a `<<` sequence and
317 defines a template via a multi-line sequence terminated by a user-chosen
318 delimiter.
319
320In both cases the template interpolation and directive syntax is available for
321use within the delimiters, and any text outside of these special sequences is
322interpreted as a literal string.
323
324In _quoted_ template expressions any literal string sequences within the
325template behave in a special way: literal newline sequences are not permitted
326and instead _escape sequences_ can be included, starting with the
327backslash `\`:
328
329```
330 \n Unicode newline control character
331 \r Unicode carriage return control character
332 \t Unicode tab control character
333 \" Literal quote mark, used to prevent interpretation as end of string
334 \\ Literal backslash, used to prevent interpretation as escape sequence
335 \uNNNN Unicode character from Basic Multilingual Plane (NNNN is four hexadecimal digits)
336 \UNNNNNNNN Unicode character from supplementary planes (NNNNNNNN is eight hexadecimal digits)
337```
338
339The _heredoc_ template expression type is introduced by either `<<` or `<<-`,
340followed by an identifier. The template expression ends when the given
341identifier subsequently appears again on a line of its own.
342
343If a heredoc template is introduced with the `<<-` symbol, any literal string
344at the start of each line is analyzed to find the minimum number of leading
345spaces, and then that number of prefix spaces is removed from all line-leading
346literal strings. The final closing marker may also have an arbitrary number
347of spaces preceding it on its line.
348
349```ebnf
350TemplateExpr = quotedTemplate | heredocTemplate;
351quotedTemplate = (as defined in prose above);
352heredocTemplate = (
353 ("<<" | "<<-") Identifier Newline
354 (content as defined in prose above)
355 Identifier Newline
356);
357```
358
359A quoted template expression containing only a single literal string serves
360as a syntax for defining literal string _expressions_. In certain contexts
361the template syntax is restricted in this manner:
362
363```ebnf
364StringLit = '"' (quoted literals as defined in prose above) '"';
365```
366
367The `StringLit` production permits the escape sequences discussed for quoted
368template expressions as above, but does _not_ permit template interpolation
369or directive sequences.
370
371### Variables and Variable Expressions
372
373A _variable_ is a value that has been assigned a symbolic name. Variables are
374made available for use in expressions by the calling application, by populating
375the _global scope_ used for expression evaluation.
376
377Variables can also be created by expressions themselves, which always creates
378a _child scope_ that incorporates the variables from its parent scope but
379(re-)defines zero or more names with new values.
380
381The value of a variable is accessed using a _variable expression_, which is
382a standalone `Identifier` whose name corresponds to a defined variable:
383
384```ebnf
385VariableExpr = Identifier;
386```
387
388Variables in a particular scope are immutable, but child scopes may _hide_
389a variable from an ancestor scope by defining a new variable of the same name.
390When looking up variables, the most locally-defined variable of the given name
391is used, and ancestor-scoped variables of the same name cannot be accessed.
392
393No direct syntax is provided for declaring or assigning variables, but other
394expression constructs implicitly create child scopes and define variables as
395part of their evaluation.
396
397### Functions and Function Calls
398
399A _function_ is an operation that has been assigned a symbolic name. Functions
400are made available for use in expressions by the calling application, by
401populating the _function table_ used for expression evaluation.
402
403The namespace of functions is distinct from the namespace of variables. A
404function and a variable may share the same name with no implication that they
405are in any way related.
406
407A function can be executed via a _function call_ expression:
408
409```ebnf
410FunctionCall = Identifier "(" arguments ")";
411Arguments = (
412 () ||
413 (Expression ("," Expression)* ("," | "...")?)
414);
415```
416
417The definition of functions and the semantics of calling them are defined by
418the language-agnostic HCL information model. The given arguments are mapped
419onto the function's _parameters_ and the result of a function call expression
420is the return value of the named function when given those arguments.
421
422If the final argument expression is followed by the ellipsis symbol (`...`),
423the final argument expression must evaluate to either a list or tuple value.
424The elements of the value are each mapped to a single parameter of the
425named function, beginning at the first parameter remaining after all other
426argument expressions have been mapped.
427
428Within the parentheses that delimit the function arguments, newline sequences
429are ignored as whitespace.
430
431### For Expressions
432
433A _for expression_ is a construct for constructing a collection by projecting
434the items from another collection.
435
436```ebnf
437ForExpr = forTupleExpr | forObjectExpr;
438forTupleExpr = "[" forIntro Expression forCond? "]";
439forObjectExpr = "{" forIntro Expression "=>" Expression "..."? forCond? "}";
440forIntro = "for" Identifier ("," Identifier)? "in" Expression ":";
441forCond = "if" Expression;
442```
443
444The punctuation used to delimit a for expression decide whether it will produce
445a tuple value (`[` and `]`) or an object value (`{` and `}`).
446
447The "introduction" is equivalent in both cases: the keyword `for` followed by
448either one or two identifiers separated by a comma which define the temporary
449variable names used for iteration, followed by the keyword `in` and then
450an expression that must evaluate to a value that can be iterated. The
451introduction is then terminated by the colon (`:`) symbol.
452
453If only one identifier is provided, it is the name of a variable that will
454be temporarily assigned the value of each element during iteration. If both
455are provided, the first is the key and the second is the value.
456
457Tuple, object, list, map, and set types are iterable. The type of collection
458used defines how the key and value variables are populated:
459
460* For tuple and list types, the _key_ is the zero-based index into the
461 sequence for each element, and the _value_ is the element value. The
462 elements are visited in index order.
463* For object and map types, the _key_ is the string attribute name or element
464 key, and the _value_ is the attribute or element value. The elements are
465 visited in the order defined by a lexicographic sort of the attribute names
466 or keys.
467* For set types, the _key_ and _value_ are both the element value. The elements
468 are visited in an undefined but consistent order.
469
470The expression after the colon and (in the case of object `for`) the expression
471after the `=>` are both evaluated once for each element of the source
472collection, in a local scope that defines the key and value variable names
473specified.
474
475The results of evaluating these expressions for each input element are used
476to populate an element in the new collection. In the case of tuple `for`, the
477single expression becomes an element, appending values to the tuple in visit
478order. In the case of object `for`, the pair of expressions is used as an
479attribute name and value respectively, creating an element in the resulting
480object.
481
482In the case of object `for`, it is an error if two input elements produce
483the same result from the attribute name expression, since duplicate
484attributes are not possible. If the ellipsis symbol (`...`) appears
485immediately after the value experssion, this activates the grouping mode in
486which each value in the resulting object is a _tuple_ of all of the values
487that were produced against each distinct key.
488
489* `[for v in ["a", "b"]: v]` returns `["a", "b"]`.
490* `[for i, v in ["a", "b"]: i]` returns `[0, 1]`.
491* `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`.
492* `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute
493 `a` is defined twice.
494* `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`.
495
496If the `if` keyword is used after the element expression(s), it applies an
497additional predicate that can be used to conditionally filter elements from
498the source collection from consideration. The expression following `if` is
499evaluated once for each source element, in the same scope used for the
500element expression(s). It must evaluate to a boolean value; if `true`, the
501element will be evaluated as normal, while if `false` the element will be
502skipped.
503
504* `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`.
505
506If the collection value, element expression(s) or condition expression return
507unknown values that are otherwise type-valid, the result is a value of the
508dynamic pseudo-type.
509
510### Index Operator
511
512The _index_ operator returns the value of a single element of a collection
513value. It is a postfix operator and can be applied to any value that has
514a tuple, object, map, or list type.
515
516```ebnf
517Index = "[" Expression "]";
518```
519
520The expression delimited by the brackets is the _key_ by which an element
521will be looked up.
522
523If the index operator is applied to a value of tuple or list type, the
524key expression must be an non-negative integer number representing the
525zero-based element index to access. If applied to a value of object or map
526type, the key expression must be a string representing the attribute name
527or element key. If the given key value is not of the appropriate type, a
528conversion is attempted using the conversion rules from the HCL
529syntax-agnostic information model.
530
531An error is produced if the given key expression does not correspond to
532an element in the collection, either because it is of an unconvertable type,
533because it is outside the range of elements for a tuple or list, or because
534the given attribute or key does not exist.
535
536If either the collection or the key are an unknown value of an
537otherwise-suitable type, the return value is an unknown value whose type
538matches what type would be returned given known values, or a value of the
539dynamic pseudo-type if type information alone cannot determine a suitable
540return type.
541
542Within the brackets that delimit the index key, newline sequences are ignored
543as whitespace.
544
545### Attribute Access Operator
546
547The _attribute access_ operator returns the value of a single attribute in
548an object value. It is a postfix operator and can be applied to any value
549that has an object type.
550
551```ebnf
552GetAttr = "." Identifier;
553```
554
555The given identifier is interpreted as the name of the attribute to access.
556An error is produced if the object to which the operator is applied does not
557have an attribute with the given name.
558
559If the object is an unknown value of a type that has the attribute named, the
560result is an unknown value of the attribute's type.
561
562### Splat Operators
563
564The _splat operators_ allow convenient access to attributes or elements of
565elements in a tuple, list, or set value.
566
567There are two kinds of "splat" operator:
568
569* The _attribute-only_ splat operator supports only attribute lookups into
570 the elements from a list, but supports an arbitrary number of them.
571
572* The _full_ splat operator additionally supports indexing into the elements
573 from a list, and allows any combination of attribute access and index
574 operations.
575
576```ebnf
577Splat = attrSplat | fullSplat;
578attrSplat = "." "*" GetAttr*;
579fullSplat = "[" "*" "]" (GetAttr | Index)*;
580```
581
582The splat operators can be thought of as shorthands for common operations that
583could otherwise be performed using _for expressions_:
584
585* `tuple.*.foo.bar[0]` is approximately equivalent to
586 `[for v in tuple: v.foo.bar][0]`.
587* `tuple[*].foo.bar[0]` is approximately equivalent to
588 `[for v in tuple: v.foo.bar[0]]`
589
590Note the difference in how the trailing index operator is interpreted in
591each case. This different interpretation is the key difference between the
592_attribute-only_ and _full_ splat operators.
593
594Splat operators have one additional behavior compared to the equivalent
595_for expressions_ shown above: if a splat operator is applied to a value that
596is _not_ of tuple, list, or set type, the value is coerced automatically into
597a single-value list of the value type:
598
599* `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object`
600 is a single object.
601* `any_number.*` is equivalent to `[any_number]`, assuming that `any_number`
602 is a single number.
603
604If the left operand of a splat operator is an unknown value of any type, the
605result is a value of the dynamic pseudo-type.
606
607### Operations
608
609Operations apply a particular operator to either one or two expression terms.
610
611```ebnf
612Operation = unaryOp | binaryOp;
613unaryOp = ("-" | "!") ExprTerm;
614binaryOp = ExprTerm binaryOperator ExprTerm;
615binaryOperator = compareOperator | arithmeticOperator | logicOperator;
616compareOperator = "==" | "!=" | "<" | ">" | "<=" | ">=";
617arithmeticOperator = "+" | "-" | "*" | "/" | "%";
618logicOperator = "&&" | "||" | "!";
619```
620
621The unary operators have the highest precedence.
622
623The binary operators are grouped into the following precedence levels:
624
625```
626Level Operators
627 6 * / %
628 5 + -
629 4 > >= < <=
630 3 == !=
631 2 &&
632 1 ||
633```
634
635Higher values of "level" bind tighter. Operators within the same precedence
636level have left-to-right associativity. For example, `x / y * z` is equivalent
637to `(x / y) * z`.
638
639### Comparison Operators
640
641Comparison operators always produce boolean values, as a result of testing
642the relationship between two values.
643
644The two equality operators apply to values of any type:
645
646```
647a == b equal
648a != b not equal
649```
650
651Two values are equal if the are of identical types and their values are
652equal as defined in the HCL syntax-agnostic information model. The equality
653operators are commutative and opposite, such that `(a == b) == !(a != b)`
654and `(a == b) == (b == a)` for all values `a` and `b`.
655
656The four numeric comparison operators apply only to numbers:
657
658```
659a < b less than
660a <= b less than or equal to
661a > b greater than
662a >= b greater than or equal to
663```
664
665If either operand of a comparison operator is a correctly-typed unknown value
666or a value of the dynamic pseudo-type, the result is an unknown boolean.
667
668### Arithmetic Operators
669
670Arithmetic operators apply only to number values and always produce number
671values as results.
672
673```
674a + b sum (addition)
675a - b difference (subtraction)
676a * b product (multiplication)
677a / b quotient (division)
678a % b remainder (modulo)
679-a negation
680```
681
682Arithmetic operations are considered to be performed in an arbitrary-precision
683number space.
684
685If either operand of an arithmetic operator is an unknown number or a value
686of the dynamic pseudo-type, the result is an unknown number.
687
688### Logic Operators
689
690Logic operators apply only to boolean values and always produce boolean values
691as results.
692
693```
694a && b logical AND
695a || b logical OR
696!a logical NOT
697```
698
699If either operand of a logic operator is an unknown bool value or a value
700of the dynamic pseudo-type, the result is an unknown bool value.
701
702### Conditional Operator
703
704The conditional operator allows selecting from one of two expressions based on
705the outcome of a boolean expression.
706
707```ebnf
708Conditional = Expression "?" Expression ":" Expression;
709```
710
711The first expression is the _predicate_, which is evaluated and must produce
712a boolean result. If the predicate value is `true`, the result of the second
713expression is the result of the conditional. If the predicate value is
714`false`, the result of the third expression is the result of the conditional.
715
716The second and third expressions must be of the same type or must be able to
717unify into a common type using the type unification rules defined in the
718HCL syntax-agnostic information model. This unified type is the result type
719of the conditional, with both expressions converted as necessary to the
720unified type.
721
722If the predicate is an unknown boolean value or a value of the dynamic
723pseudo-type then the result is an unknown value of the unified type of the
724other two expressions.
725
726If either the second or third expressions produce errors when evaluated,
727these errors are passed through only if the erroneous expression is selected.
728This allows for expressions such as
729`length(some_list) > 0 ? some_list[0] : default` (given some suitable `length`
730function) without producing an error when the predicate is `false`.
731
732## Templates
733
734The template sub-language is used within template expressions to concisely
735combine strings and other values to produce other strings. It can also be
736used in isolation as a standalone template language.
737
738```ebnf
739Template = (
740 TemplateLiteral |
741 TemplateInterpolation |
742 TemplateDirective
743)*
744TemplateDirective = TemplateIf | TemplateFor;
745```
746
747A template behaves like an expression that always returns a string value.
748The different elements of the template are evaluated and combined into a
749single string to return. If any of the elements produce an unknown string
750or a value of the dynamic pseudo-type, the result is an unknown string.
751
752An important use-case for standalone templates is to enable the use of
753expressions in alternative HCL syntaxes where a native expression grammar is
754not available. For example, the HCL JSON profile treats the values of JSON
755strings as standalone templates when attributes are evaluated in expression
756mode.
757
758### Template Literals
759
760A template literal is a literal sequence of characters to include in the
761resulting string. When the template sub-language is used standalone, a
762template literal can contain any unicode character, with the exception
763of the sequences that introduce interpolations and directives, and for the
764sequences that escape those introductions.
765
766The interpolation and directive introductions are escaped by doubling their
767leading characters. The `${` sequence is escaped as `$${` and the `%{`
768sequence is escaped as `%%{`.
769
770When the template sub-language is embedded in the expression language via
771_template expressions_, additional constraints and transforms are applied to
772template literalsas described in the definition of template expressions.
773
774The value of a template literal can be modified by _strip markers_ in any
775interpolations or directives that are adjacent to it. A strip marker is
776a tilde (`~`) placed immediately after the opening `{` or before the closing
777`}` of a template sequence:
778
779* `hello ${~ "world" }` produces `"helloworld"`.
780* `%{ if true ~} hello %{~ endif }` produces `"hello"`.
781
782When a strip marker is present, any spaces adjacent to it in the corresponding
783string literal (if any) are removed before producing the final value. Space
784characters are interpreted as per Unicode's definition.
785
786Stripping is done at syntax level rather than value level. Values returned
787by interpolations or directives are not subject to stripping:
788
789* `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`,
790 because the space is not in a template literal directly adjacent to the
791 strip marker.
792
793### Template Interpolations
794
795An _interpolation sequence_ evaluates an expression (written in the
796expression sub-language), converts the result to a string value, and
797replaces itself with the resulting string.
798
799```ebnf
800TemplateInterpolation = ("${" | "${~") Expression ("}" | "~}";
801```
802
803If the expression result cannot be converted to a string, an error is
804produced.
805
806### Template If Directive
807
808The template `if` directive is the template equivalent of the
809_conditional expression_, allowing selection of one of two sub-templates based
810on the value of a predicate expression.
811
812```ebnf
813TemplateIf = (
814 ("%{" | "%{~") "if" Expression ("}" | "~}")
815 Template
816 (
817 ("%{" | "%{~") "else" ("}" | "~}")
818 Template
819 )?
820 ("%{" | "%{~") "endif" ("}" | "~}")
821);
822```
823
824The evaluation of the `if` directive is equivalent to the conditional
825expression, with the following exceptions:
826
827* The two sub-templates always produce strings, and thus the result value is
828 also always a string.
829* The `else` clause may be omitted, in which case the conditional's third
830 expression result is implied to be the empty string.
831
832### Template For Directive
833
834The template `for` directive is the template equivalent of the _for expression_,
835producing zero or more copies of its sub-template based on the elements of
836a collection.
837
838```ebnf
839TemplateFor = (
840 ("%{" | "%{~") "for" Identifier ("," Identifier) "in" Expression ("}" | "~}")
841 Template
842 ("%{" | "%{~") "endfor" ("}" | "~}")
843);
844```
845
846The evaluation of the `for` directive is equivalent to the _for expression_
847when producing a tuple, with the following exceptions:
848
849* The sub-template always produces a string.
850* There is no equivalent of the "if" clause on the for expression.
851* The elements of the resulting tuple are all converted to strings and
852 concatenated to produce a flat string result.
853
854### Template Interpolation Unwrapping
855
856As a special case, a template that consists only of a single interpolation,
857with no surrounding literals, directives or other interpolations, is
858"unwrapped". In this case, the result of the interpolation expression is
859returned verbatim, without conversion to string.
860
861This special case exists primarily to enable the native template language
862to be used inside strings in alternative HCL syntaxes that lack a first-class
863template or expression syntax. Unwrapping allows arbitrary expressions to be
864used to populate attributes when strings in such languages are interpreted
865as templates.
866
867* `${true}` produces the boolean value `true`
868* `${"${true}"}` produces the boolean value `true`, because both the inner
869 and outer interpolations are subject to unwrapping.
870* `hello ${true}` produces the string `"hello true"`
871* `${""}${true}` produces the string `"true"` because there are two
872 interpolation sequences, even though one produces an empty result.
873* `%{ for v in [true] }${v}%{ endif }` produces the string `true` because
874 the presence of the `for` directive circumvents the unwrapping even though
875 the final result is a single value.
876
877In some contexts this unwrapping behavior may be circumvented by the calling
878application, by converting the final template result to string. This is
879necessary, for example, if a standalone template is being used to produce
880the direct contents of a file, since the result in that case must always be a
881string.
882
883## Static Analysis
884
885The HCL static analysis operations are implemented for some expression types
886in the native syntax, as described in the following sections.
887
888A goal for static analysis of the native syntax is for the interpretation to
889be as consistent as possible with the dynamic evaluation interpretation of
890the given expression, though some deviations are intentionally made in order
891to maximize the potential for analysis.
892
893### Static List
894
895The tuple construction syntax can be interpreted as a static list. All of
896the expression elements given are returned as the static list elements,
897with no further interpretation.
898
899### Static Map
900
901The object construction syntax can be interpreted as a static map. All of the
902key/value pairs given are returned as the static pairs, with no further
903interpretation.
904
905The usual requirement that an attribute name be interpretable as a string
906does not apply to this static analyis, allowing callers to provide map-like
907constructs with different key types by building on the map syntax.
908
909### Static Call
910
911The function call syntax can be interpreted as a static call. The called
912function name is returned verbatim and the given argument expressions are
913returned as the static arguments, with no further interpretation.
914
915### Static Traversal
916
917A variable expression and any attached attribute access operations and
918constant index operations can be interpreted as a static traversal.
919
920The keywords `true`, `false` and `null` can also be interpreted as
921static traversals, behaving as if they were references to variables of those
922names, to allow callers to redefine the meaning of those keywords in certain
923contexts.
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go
new file mode 100644
index 0000000..d69f65b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go
@@ -0,0 +1,379 @@
1package hclsyntax
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/hcl2/hcl"
8)
9
10// AsHCLBlock returns the block data expressed as a *hcl.Block.
11func (b *Block) AsHCLBlock() *hcl.Block {
12 lastHeaderRange := b.TypeRange
13 if len(b.LabelRanges) > 0 {
14 lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1]
15 }
16
17 return &hcl.Block{
18 Type: b.Type,
19 Labels: b.Labels,
20 Body: b.Body,
21
22 DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange),
23 TypeRange: b.TypeRange,
24 LabelRanges: b.LabelRanges,
25 }
26}
27
28// Body is the implementation of hcl.Body for the HCL native syntax.
29type Body struct {
30 Attributes Attributes
31 Blocks Blocks
32
33 // These are used with PartialContent to produce a "remaining items"
34 // body to return. They are nil on all bodies fresh out of the parser.
35 hiddenAttrs map[string]struct{}
36 hiddenBlocks map[string]struct{}
37
38 SrcRange hcl.Range
39 EndRange hcl.Range // Final token of the body, for reporting missing items
40}
41
42// Assert that *Body implements hcl.Body
43var assertBodyImplBody hcl.Body = &Body{}
44
45func (b *Body) walkChildNodes(w internalWalkFunc) {
46 b.Attributes = w(b.Attributes).(Attributes)
47 b.Blocks = w(b.Blocks).(Blocks)
48}
49
50func (b *Body) Range() hcl.Range {
51 return b.SrcRange
52}
53
54func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
55 content, remainHCL, diags := b.PartialContent(schema)
56
57 // No we'll see if anything actually remains, to produce errors about
58 // extraneous items.
59 remain := remainHCL.(*Body)
60
61 for name, attr := range b.Attributes {
62 if _, hidden := remain.hiddenAttrs[name]; !hidden {
63 var suggestions []string
64 for _, attrS := range schema.Attributes {
65 if _, defined := content.Attributes[attrS.Name]; defined {
66 continue
67 }
68 suggestions = append(suggestions, attrS.Name)
69 }
70 suggestion := nameSuggestion(name, suggestions)
71 if suggestion != "" {
72 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
73 } else {
74 // Is there a block of the same name?
75 for _, blockS := range schema.Blocks {
76 if blockS.Type == name {
77 suggestion = fmt.Sprintf(" Did you mean to define a block of type %q?", name)
78 break
79 }
80 }
81 }
82
83 diags = append(diags, &hcl.Diagnostic{
84 Severity: hcl.DiagError,
85 Summary: "Unsupported attribute",
86 Detail: fmt.Sprintf("An attribute named %q is not expected here.%s", name, suggestion),
87 Subject: &attr.NameRange,
88 })
89 }
90 }
91
92 for _, block := range b.Blocks {
93 blockTy := block.Type
94 if _, hidden := remain.hiddenBlocks[blockTy]; !hidden {
95 var suggestions []string
96 for _, blockS := range schema.Blocks {
97 suggestions = append(suggestions, blockS.Type)
98 }
99 suggestion := nameSuggestion(blockTy, suggestions)
100 if suggestion != "" {
101 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
102 } else {
103 // Is there an attribute of the same name?
104 for _, attrS := range schema.Attributes {
105 if attrS.Name == blockTy {
106 suggestion = fmt.Sprintf(" Did you mean to define attribute %q?", blockTy)
107 break
108 }
109 }
110 }
111
112 diags = append(diags, &hcl.Diagnostic{
113 Severity: hcl.DiagError,
114 Summary: "Unsupported block type",
115 Detail: fmt.Sprintf("Blocks of type %q are not expected here.%s", blockTy, suggestion),
116 Subject: &block.TypeRange,
117 })
118 }
119 }
120
121 return content, diags
122}
123
124func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
125 attrs := make(hcl.Attributes)
126 var blocks hcl.Blocks
127 var diags hcl.Diagnostics
128 hiddenAttrs := make(map[string]struct{})
129 hiddenBlocks := make(map[string]struct{})
130
131 if b.hiddenAttrs != nil {
132 for k, v := range b.hiddenAttrs {
133 hiddenAttrs[k] = v
134 }
135 }
136 if b.hiddenBlocks != nil {
137 for k, v := range b.hiddenBlocks {
138 hiddenBlocks[k] = v
139 }
140 }
141
142 for _, attrS := range schema.Attributes {
143 name := attrS.Name
144 attr, exists := b.Attributes[name]
145 _, hidden := hiddenAttrs[name]
146 if hidden || !exists {
147 if attrS.Required {
148 diags = append(diags, &hcl.Diagnostic{
149 Severity: hcl.DiagError,
150 Summary: "Missing required attribute",
151 Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name),
152 Subject: b.MissingItemRange().Ptr(),
153 })
154 }
155 continue
156 }
157
158 hiddenAttrs[name] = struct{}{}
159 attrs[name] = attr.AsHCLAttribute()
160 }
161
162 blocksWanted := make(map[string]hcl.BlockHeaderSchema)
163 for _, blockS := range schema.Blocks {
164 blocksWanted[blockS.Type] = blockS
165 }
166
167 for _, block := range b.Blocks {
168 if _, hidden := hiddenBlocks[block.Type]; hidden {
169 continue
170 }
171 blockS, wanted := blocksWanted[block.Type]
172 if !wanted {
173 continue
174 }
175
176 if len(block.Labels) > len(blockS.LabelNames) {
177 name := block.Type
178 if len(blockS.LabelNames) == 0 {
179 diags = append(diags, &hcl.Diagnostic{
180 Severity: hcl.DiagError,
181 Summary: fmt.Sprintf("Extraneous label for %s", name),
182 Detail: fmt.Sprintf(
183 "No labels are expected for %s blocks.", name,
184 ),
185 Subject: block.LabelRanges[0].Ptr(),
186 Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
187 })
188 } else {
189 diags = append(diags, &hcl.Diagnostic{
190 Severity: hcl.DiagError,
191 Summary: fmt.Sprintf("Extraneous label for %s", name),
192 Detail: fmt.Sprintf(
193 "Only %d labels (%s) are expected for %s blocks.",
194 len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), name,
195 ),
196 Subject: block.LabelRanges[len(blockS.LabelNames)].Ptr(),
197 Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
198 })
199 }
200 continue
201 }
202
203 if len(block.Labels) < len(blockS.LabelNames) {
204 name := block.Type
205 diags = append(diags, &hcl.Diagnostic{
206 Severity: hcl.DiagError,
207 Summary: fmt.Sprintf("Missing %s for %s", blockS.LabelNames[len(block.Labels)], name),
208 Detail: fmt.Sprintf(
209 "All %s blocks must have %d labels (%s).",
210 name, len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "),
211 ),
212 Subject: &block.OpenBraceRange,
213 Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
214 })
215 continue
216 }
217
218 blocks = append(blocks, block.AsHCLBlock())
219 }
220
221 // We hide blocks only after we've processed all of them, since otherwise
222 // we can't process more than one of the same type.
223 for _, blockS := range schema.Blocks {
224 hiddenBlocks[blockS.Type] = struct{}{}
225 }
226
227 remain := &Body{
228 Attributes: b.Attributes,
229 Blocks: b.Blocks,
230
231 hiddenAttrs: hiddenAttrs,
232 hiddenBlocks: hiddenBlocks,
233
234 SrcRange: b.SrcRange,
235 EndRange: b.EndRange,
236 }
237
238 return &hcl.BodyContent{
239 Attributes: attrs,
240 Blocks: blocks,
241
242 MissingItemRange: b.MissingItemRange(),
243 }, remain, diags
244}
245
246func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
247 attrs := make(hcl.Attributes)
248 var diags hcl.Diagnostics
249
250 if len(b.Blocks) > 0 {
251 example := b.Blocks[0]
252 diags = append(diags, &hcl.Diagnostic{
253 Severity: hcl.DiagError,
254 Summary: fmt.Sprintf("Unexpected %s block", example.Type),
255 Detail: "Blocks are not allowed here.",
256 Context: &example.TypeRange,
257 })
258 // we will continue processing anyway, and return the attributes
259 // we are able to find so that certain analyses can still be done
260 // in the face of errors.
261 }
262
263 if b.Attributes == nil {
264 return attrs, diags
265 }
266
267 for name, attr := range b.Attributes {
268 if _, hidden := b.hiddenAttrs[name]; hidden {
269 continue
270 }
271 attrs[name] = attr.AsHCLAttribute()
272 }
273
274 return attrs, diags
275}
276
277func (b *Body) MissingItemRange() hcl.Range {
278 return b.EndRange
279}
280
281// Attributes is the collection of attribute definitions within a body.
282type Attributes map[string]*Attribute
283
284func (a Attributes) walkChildNodes(w internalWalkFunc) {
285 for k, attr := range a {
286 a[k] = w(attr).(*Attribute)
287 }
288}
289
290// Range returns the range of some arbitrary point within the set of
291// attributes, or an invalid range if there are no attributes.
292//
293// This is provided only to complete the Node interface, but has no practical
294// use.
295func (a Attributes) Range() hcl.Range {
296 // An attributes doesn't really have a useful range to report, since
297 // it's just a grouping construct. So we'll arbitrarily take the
298 // range of one of the attributes, or produce an invalid range if we have
299 // none. In practice, there's little reason to ask for the range of
300 // an Attributes.
301 for _, attr := range a {
302 return attr.Range()
303 }
304 return hcl.Range{
305 Filename: "<unknown>",
306 }
307}
308
309// Attribute represents a single attribute definition within a body.
310type Attribute struct {
311 Name string
312 Expr Expression
313
314 SrcRange hcl.Range
315 NameRange hcl.Range
316 EqualsRange hcl.Range
317}
318
319func (a *Attribute) walkChildNodes(w internalWalkFunc) {
320 a.Expr = w(a.Expr).(Expression)
321}
322
323func (a *Attribute) Range() hcl.Range {
324 return a.SrcRange
325}
326
327// AsHCLAttribute returns the block data expressed as a *hcl.Attribute.
328func (a *Attribute) AsHCLAttribute() *hcl.Attribute {
329 return &hcl.Attribute{
330 Name: a.Name,
331 Expr: a.Expr,
332
333 Range: a.SrcRange,
334 NameRange: a.NameRange,
335 }
336}
337
338// Blocks is the list of nested blocks within a body.
339type Blocks []*Block
340
341func (bs Blocks) walkChildNodes(w internalWalkFunc) {
342 for i, block := range bs {
343 bs[i] = w(block).(*Block)
344 }
345}
346
347// Range returns the range of some arbitrary point within the list of
348// blocks, or an invalid range if there are no blocks.
349//
350// This is provided only to complete the Node interface, but has no practical
351// use.
352func (bs Blocks) Range() hcl.Range {
353 if len(bs) > 0 {
354 return bs[0].Range()
355 }
356 return hcl.Range{
357 Filename: "<unknown>",
358 }
359}
360
361// Block represents a nested block structure
362type Block struct {
363 Type string
364 Labels []string
365 Body *Body
366
367 TypeRange hcl.Range
368 LabelRanges []hcl.Range
369 OpenBraceRange hcl.Range
370 CloseBraceRange hcl.Range
371}
372
373func (b *Block) walkChildNodes(w internalWalkFunc) {
374 b.Body = w(b.Body).(*Body)
375}
376
377func (b *Block) Range() hcl.Range {
378 return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange)
379}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go
new file mode 100644
index 0000000..bcaa15f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go
@@ -0,0 +1,272 @@
1package hclsyntax
2
3import (
4 "fmt"
5
6 "github.com/apparentlymart/go-textseg/textseg"
7 "github.com/hashicorp/hcl2/hcl"
8)
9
10// Token represents a sequence of bytes from some HCL code that has been
11// tagged with a type and its range within the source file.
12type Token struct {
13 Type TokenType
14 Bytes []byte
15 Range hcl.Range
16}
17
18// Tokens is a slice of Token.
19type Tokens []Token
20
21// TokenType is an enumeration used for the Type field on Token.
22type TokenType rune
23
24const (
25 // Single-character tokens are represented by their own character, for
26 // convenience in producing these within the scanner. However, the values
27 // are otherwise arbitrary and just intended to be mnemonic for humans
28 // who might see them in debug output.
29
30 TokenOBrace TokenType = '{'
31 TokenCBrace TokenType = '}'
32 TokenOBrack TokenType = '['
33 TokenCBrack TokenType = ']'
34 TokenOParen TokenType = '('
35 TokenCParen TokenType = ')'
36 TokenOQuote TokenType = '«'
37 TokenCQuote TokenType = '»'
38 TokenOHeredoc TokenType = 'H'
39 TokenCHeredoc TokenType = 'h'
40
41 TokenStar TokenType = '*'
42 TokenSlash TokenType = '/'
43 TokenPlus TokenType = '+'
44 TokenMinus TokenType = '-'
45 TokenPercent TokenType = '%'
46
47 TokenEqual TokenType = '='
48 TokenEqualOp TokenType = '≔'
49 TokenNotEqual TokenType = '≠'
50 TokenLessThan TokenType = '<'
51 TokenLessThanEq TokenType = '≤'
52 TokenGreaterThan TokenType = '>'
53 TokenGreaterThanEq TokenType = '≥'
54
55 TokenAnd TokenType = '∧'
56 TokenOr TokenType = '∨'
57 TokenBang TokenType = '!'
58
59 TokenDot TokenType = '.'
60 TokenComma TokenType = ','
61
62 TokenEllipsis TokenType = '…'
63 TokenFatArrow TokenType = '⇒'
64
65 TokenQuestion TokenType = '?'
66 TokenColon TokenType = ':'
67
68 TokenTemplateInterp TokenType = '∫'
69 TokenTemplateControl TokenType = 'λ'
70 TokenTemplateSeqEnd TokenType = '∎'
71
72 TokenQuotedLit TokenType = 'Q' // might contain backslash escapes
73 TokenStringLit TokenType = 'S' // cannot contain backslash escapes
74 TokenNumberLit TokenType = 'N'
75 TokenIdent TokenType = 'I'
76
77 TokenComment TokenType = 'C'
78
79 TokenNewline TokenType = '\n'
80 TokenEOF TokenType = '␄'
81
82 // The rest are not used in the language but recognized by the scanner so
83 // we can generate good diagnostics in the parser when users try to write
84 // things that might work in other languages they are familiar with, or
85 // simply make incorrect assumptions about the HCL language.
86
87 TokenBitwiseAnd TokenType = '&'
88 TokenBitwiseOr TokenType = '|'
89 TokenBitwiseNot TokenType = '~'
90 TokenBitwiseXor TokenType = '^'
91 TokenStarStar TokenType = '➚'
92 TokenBacktick TokenType = '`'
93 TokenSemicolon TokenType = ';'
94 TokenTabs TokenType = '␉'
95 TokenInvalid TokenType = '�'
96 TokenBadUTF8 TokenType = '💩'
97
98 // TokenNil is a placeholder for when a token is required but none is
99 // available, e.g. when reporting errors. The scanner will never produce
100 // this as part of a token stream.
101 TokenNil TokenType = '\x00'
102)
103
104func (t TokenType) GoString() string {
105 return fmt.Sprintf("hclsyntax.%s", t.String())
106}
107
108type scanMode int
109
110const (
111 scanNormal scanMode = iota
112 scanTemplate
113 scanIdentOnly
114)
115
116type tokenAccum struct {
117 Filename string
118 Bytes []byte
119 Pos hcl.Pos
120 Tokens []Token
121}
122
123func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {
124 // Walk through our buffer to figure out how much we need to adjust
125 // the start pos to get our end pos.
126
127 start := f.Pos
128 start.Column += startOfs - f.Pos.Byte // Safe because only ASCII spaces can be in the offset
129 start.Byte = startOfs
130
131 end := start
132 end.Byte = endOfs
133 b := f.Bytes[startOfs:endOfs]
134 for len(b) > 0 {
135 advance, seq, _ := textseg.ScanGraphemeClusters(b, true)
136 if (len(seq) == 1 && seq[0] == '\n') || (len(seq) == 2 && seq[0] == '\r' && seq[1] == '\n') {
137 end.Line++
138 end.Column = 1
139 } else {
140 end.Column++
141 }
142 b = b[advance:]
143 }
144
145 f.Pos = end
146
147 f.Tokens = append(f.Tokens, Token{
148 Type: ty,
149 Bytes: f.Bytes[startOfs:endOfs],
150 Range: hcl.Range{
151 Filename: f.Filename,
152 Start: start,
153 End: end,
154 },
155 })
156}
157
158type heredocInProgress struct {
159 Marker []byte
160 StartOfLine bool
161}
162
163// checkInvalidTokens does a simple pass across the given tokens and generates
164// diagnostics for tokens that should _never_ appear in HCL source. This
165// is intended to avoid the need for the parser to have special support
166// for them all over.
167//
168// Returns a diagnostics with no errors if everything seems acceptable.
169// Otherwise, returns zero or more error diagnostics, though tries to limit
170// repetition of the same information.
171func checkInvalidTokens(tokens Tokens) hcl.Diagnostics {
172 var diags hcl.Diagnostics
173
174 toldBitwise := 0
175 toldExponent := 0
176 toldBacktick := 0
177 toldSemicolon := 0
178 toldTabs := 0
179 toldBadUTF8 := 0
180
181 for _, tok := range tokens {
182 switch tok.Type {
183 case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot:
184 if toldBitwise < 4 {
185 var suggestion string
186 switch tok.Type {
187 case TokenBitwiseAnd:
188 suggestion = " Did you mean boolean AND (\"&&\")?"
189 case TokenBitwiseOr:
190 suggestion = " Did you mean boolean OR (\"&&\")?"
191 case TokenBitwiseNot:
192 suggestion = " Did you mean boolean NOT (\"!\")?"
193 }
194
195 diags = append(diags, &hcl.Diagnostic{
196 Severity: hcl.DiagError,
197 Summary: "Unsupported operator",
198 Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion),
199 Subject: &tok.Range,
200 })
201 toldBitwise++
202 }
203 case TokenStarStar:
204 if toldExponent < 1 {
205 diags = append(diags, &hcl.Diagnostic{
206 Severity: hcl.DiagError,
207 Summary: "Unsupported operator",
208 Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.",
209 Subject: &tok.Range,
210 })
211
212 toldExponent++
213 }
214 case TokenBacktick:
215 // Only report for alternating (even) backticks, so we won't report both start and ends of the same
216 // backtick-quoted string.
217 if toldExponent < 4 && (toldExponent%2) == 0 {
218 diags = append(diags, &hcl.Diagnostic{
219 Severity: hcl.DiagError,
220 Summary: "Invalid character",
221 Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"<<EOT\".",
222 Subject: &tok.Range,
223 })
224
225 toldBacktick++
226 }
227 case TokenSemicolon:
228 if toldSemicolon < 1 {
229 diags = append(diags, &hcl.Diagnostic{
230 Severity: hcl.DiagError,
231 Summary: "Invalid character",
232 Detail: "The \";\" character is not valid. Use newlines to separate attributes and blocks, and commas to separate items in collection values.",
233 Subject: &tok.Range,
234 })
235
236 toldSemicolon++
237 }
238 case TokenTabs:
239 if toldTabs < 1 {
240 diags = append(diags, &hcl.Diagnostic{
241 Severity: hcl.DiagError,
242 Summary: "Invalid character",
243 Detail: "Tab characters may not be used. The recommended indentation style is two spaces per indent.",
244 Subject: &tok.Range,
245 })
246
247 toldTabs++
248 }
249 case TokenBadUTF8:
250 if toldBadUTF8 < 1 {
251 diags = append(diags, &hcl.Diagnostic{
252 Severity: hcl.DiagError,
253 Summary: "Invalid character encoding",
254 Detail: "All input files must be UTF-8 encoded. Ensure that UTF-8 encoding is selected in your editor.",
255 Subject: &tok.Range,
256 })
257
258 toldBadUTF8++
259 }
260 case TokenInvalid:
261 diags = append(diags, &hcl.Diagnostic{
262 Severity: hcl.DiagError,
263 Summary: "Invalid character",
264 Detail: "This character is not used within the language.",
265 Subject: &tok.Range,
266 })
267
268 toldTabs++
269 }
270 }
271 return diags
272}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go
new file mode 100644
index 0000000..93de7ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go
@@ -0,0 +1,69 @@
1// Code generated by "stringer -type TokenType -output token_type_string.go"; DO NOT EDIT.
2
3package hclsyntax
4
5import "strconv"
6
7const _TokenType_name = "TokenNilTokenNewlineTokenBangTokenPercentTokenBitwiseAndTokenOParenTokenCParenTokenStarTokenPlusTokenCommaTokenMinusTokenDotTokenSlashTokenColonTokenSemicolonTokenLessThanTokenEqualTokenGreaterThanTokenQuestionTokenCommentTokenOHeredocTokenIdentTokenNumberLitTokenQuotedLitTokenStringLitTokenOBrackTokenCBrackTokenBitwiseXorTokenBacktickTokenCHeredocTokenOBraceTokenBitwiseOrTokenCBraceTokenBitwiseNotTokenOQuoteTokenCQuoteTokenTemplateControlTokenEllipsisTokenFatArrowTokenTemplateSeqEndTokenAndTokenOrTokenTemplateInterpTokenEqualOpTokenNotEqualTokenLessThanEqTokenGreaterThanEqTokenEOFTokenTabsTokenStarStarTokenInvalidTokenBadUTF8"
8
9var _TokenType_map = map[TokenType]string{
10 0: _TokenType_name[0:8],
11 10: _TokenType_name[8:20],
12 33: _TokenType_name[20:29],
13 37: _TokenType_name[29:41],
14 38: _TokenType_name[41:56],
15 40: _TokenType_name[56:67],
16 41: _TokenType_name[67:78],
17 42: _TokenType_name[78:87],
18 43: _TokenType_name[87:96],
19 44: _TokenType_name[96:106],
20 45: _TokenType_name[106:116],
21 46: _TokenType_name[116:124],
22 47: _TokenType_name[124:134],
23 58: _TokenType_name[134:144],
24 59: _TokenType_name[144:158],
25 60: _TokenType_name[158:171],
26 61: _TokenType_name[171:181],
27 62: _TokenType_name[181:197],
28 63: _TokenType_name[197:210],
29 67: _TokenType_name[210:222],
30 72: _TokenType_name[222:235],
31 73: _TokenType_name[235:245],
32 78: _TokenType_name[245:259],
33 81: _TokenType_name[259:273],
34 83: _TokenType_name[273:287],
35 91: _TokenType_name[287:298],
36 93: _TokenType_name[298:309],
37 94: _TokenType_name[309:324],
38 96: _TokenType_name[324:337],
39 104: _TokenType_name[337:350],
40 123: _TokenType_name[350:361],
41 124: _TokenType_name[361:375],
42 125: _TokenType_name[375:386],
43 126: _TokenType_name[386:401],
44 171: _TokenType_name[401:412],
45 187: _TokenType_name[412:423],
46 955: _TokenType_name[423:443],
47 8230: _TokenType_name[443:456],
48 8658: _TokenType_name[456:469],
49 8718: _TokenType_name[469:488],
50 8743: _TokenType_name[488:496],
51 8744: _TokenType_name[496:503],
52 8747: _TokenType_name[503:522],
53 8788: _TokenType_name[522:534],
54 8800: _TokenType_name[534:547],
55 8804: _TokenType_name[547:562],
56 8805: _TokenType_name[562:580],
57 9220: _TokenType_name[580:588],
58 9225: _TokenType_name[588:597],
59 10138: _TokenType_name[597:610],
60 65533: _TokenType_name[610:622],
61 128169: _TokenType_name[622:634],
62}
63
64func (i TokenType) String() string {
65 if str, ok := _TokenType_map[i]; ok {
66 return str
67 }
68 return "TokenType(" + strconv.FormatInt(int64(i), 10) + ")"
69}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode2ragel.rb b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode2ragel.rb
new file mode 100644
index 0000000..422e4e5
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode2ragel.rb
@@ -0,0 +1,335 @@
1#!/usr/bin/env ruby
2#
3# This scripted has been updated to accept more command-line arguments:
4#
5# -u, --url URL to process
6# -m, --machine Machine name
7# -p, --properties Properties to add to the machine
8# -o, --output Write output to file
9#
10# Updated by: Marty Schoch <marty.schoch@gmail.com>
11#
12# This script uses the unicode spec to generate a Ragel state machine
13# that recognizes unicode alphanumeric characters. It generates 5
14# character classes: uupper, ulower, ualpha, udigit, and ualnum.
15# Currently supported encodings are UTF-8 [default] and UCS-4.
16#
17# Usage: unicode2ragel.rb [options]
18# -e, --encoding [ucs4 | utf8] Data encoding
19# -h, --help Show this message
20#
21# This script was originally written as part of the Ferret search
22# engine library.
23#
24# Author: Rakan El-Khalil <rakan@well.com>
25
26require 'optparse'
27require 'open-uri'
28
29ENCODINGS = [ :utf8, :ucs4 ]
30ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
31DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
32DEFAULT_MACHINE_NAME= "WChar"
33
34###
35# Display vars & default option
36
37TOTAL_WIDTH = 80
38RANGE_WIDTH = 23
39@encoding = :utf8
40@chart_url = DEFAULT_CHART_URL
41machine_name = DEFAULT_MACHINE_NAME
42properties = []
43@output = $stdout
44
45###
46# Option parsing
47
48cli_opts = OptionParser.new do |opts|
49 opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
50 @encoding = o.downcase.to_sym
51 end
52 opts.on("-h", "--help", "Show this message") do
53 puts opts
54 exit
55 end
56 opts.on("-u", "--url URL", "URL to process") do |o|
57 @chart_url = o
58 end
59 opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
60 machine_name = o
61 end
62 opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
63 properties = o
64 end
65 opts.on("-o", "--output FILE", "output file") do |o|
66 @output = File.new(o, "w+")
67 end
68end
69
70cli_opts.parse(ARGV)
71unless ENCODINGS.member? @encoding
72 puts "Invalid encoding: #{@encoding}"
73 puts cli_opts
74 exit
75end
76
77##
78# Downloads the document at url and yields every alpha line's hex
79# range and description.
80
81def each_alpha( url, property )
82 open( url ) do |file|
83 file.each_line do |line|
84 next if line =~ /^#/;
85 next if line !~ /; #{property} #/;
86
87 range, description = line.split(/;/)
88 range.strip!
89 description.gsub!(/.*#/, '').strip!
90
91 if range =~ /\.\./
92 start, stop = range.split '..'
93 else start = stop = range
94 end
95
96 yield start.hex .. stop.hex, description
97 end
98 end
99end
100
101###
102# Formats to hex at minimum width
103
104def to_hex( n )
105 r = "%0X" % n
106 r = "0#{r}" unless (r.length % 2).zero?
107 r
108end
109
110###
111# UCS4 is just a straight hex conversion of the unicode codepoint.
112
113def to_ucs4( range )
114 rangestr = "0x" + to_hex(range.begin)
115 rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
116 [ rangestr ]
117end
118
119##
120# 0x00 - 0x7f -> 0zzzzzzz[7]
121# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
122# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
123# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
124
125UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
126
127def to_utf8_enc( n )
128 r = 0
129 if n <= 0x7f
130 r = n
131 elsif n <= 0x7ff
132 y = 0xc0 | (n >> 6)
133 z = 0x80 | (n & 0x3f)
134 r = y << 8 | z
135 elsif n <= 0xffff
136 x = 0xe0 | (n >> 12)
137 y = 0x80 | (n >> 6) & 0x3f
138 z = 0x80 | n & 0x3f
139 r = x << 16 | y << 8 | z
140 elsif n <= 0x10ffff
141 w = 0xf0 | (n >> 18)
142 x = 0x80 | (n >> 12) & 0x3f
143 y = 0x80 | (n >> 6) & 0x3f
144 z = 0x80 | n & 0x3f
145 r = w << 24 | x << 16 | y << 8 | z
146 end
147
148 to_hex(r)
149end
150
151def from_utf8_enc( n )
152 n = n.hex
153 r = 0
154 if n <= 0x7f
155 r = n
156 elsif n <= 0xdfff
157 y = (n >> 8) & 0x1f
158 z = n & 0x3f
159 r = y << 6 | z
160 elsif n <= 0xefffff
161 x = (n >> 16) & 0x0f
162 y = (n >> 8) & 0x3f
163 z = n & 0x3f
164 r = x << 10 | y << 6 | z
165 elsif n <= 0xf7ffffff
166 w = (n >> 24) & 0x07
167 x = (n >> 16) & 0x3f
168 y = (n >> 8) & 0x3f
169 z = n & 0x3f
170 r = w << 18 | x << 12 | y << 6 | z
171 end
172 r
173end
174
175###
176# Given a range, splits it up into ranges that can be continuously
177# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
178# This is not strictly needed since the current [5.1] unicode standard
179# doesn't have ranges that straddle utf8 boundaries. This is included
180# for completeness as there is no telling if that will ever change.
181
182def utf8_ranges( range )
183 ranges = []
184 UTF8_BOUNDARIES.each do |max|
185 if range.begin <= max
186 if range.end <= max
187 ranges << range
188 return ranges
189 end
190
191 ranges << (range.begin .. max)
192 range = (max + 1) .. range.end
193 end
194 end
195 ranges
196end
197
198def build_range( start, stop )
199 size = start.size/2
200 left = size - 1
201 return [""] if size < 1
202
203 a = start[0..1]
204 b = stop[0..1]
205
206 ###
207 # Shared prefix
208
209 if a == b
210 return build_range(start[2..-1], stop[2..-1]).map do |elt|
211 "0x#{a} " + elt
212 end
213 end
214
215 ###
216 # Unshared prefix, end of run
217
218 return ["0x#{a}..0x#{b} "] if left.zero?
219
220 ###
221 # Unshared prefix, not end of run
222 # Range can be 0x123456..0x56789A
223 # Which is equivalent to:
224 # 0x123456 .. 0x12FFFF
225 # 0x130000 .. 0x55FFFF
226 # 0x560000 .. 0x56789A
227
228 ret = []
229 ret << build_range(start, a + "FF" * left)
230
231 ###
232 # Only generate middle range if need be.
233
234 if a.hex+1 != b.hex
235 max = to_hex(b.hex - 1)
236 max = "FF" if b == "FF"
237 ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
238 end
239
240 ###
241 # Don't generate last range if it is covered by first range
242
243 ret << build_range(b + "00" * left, stop) unless b == "FF"
244 ret.flatten!
245end
246
247def to_utf8( range )
248 utf8_ranges( range ).map do |r|
249 begin_enc = to_utf8_enc(r.begin)
250 end_enc = to_utf8_enc(r.end)
251 build_range begin_enc, end_enc
252 end.flatten!
253end
254
255##
256# Perform a 3-way comparison of the number of codepoints advertised by
257# the unicode spec for the given range, the originally parsed range,
258# and the resulting utf8 encoded range.
259
260def count_codepoints( code )
261 code.split(' ').inject(1) do |acc, elt|
262 if elt =~ /0x(.+)\.\.0x(.+)/
263 if @encoding == :utf8
264 acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
265 else
266 acc * ($2.hex - $1.hex + 1)
267 end
268 else
269 acc
270 end
271 end
272end
273
274def is_valid?( range, desc, codes )
275 spec_count = 1
276 spec_count = $1.to_i if desc =~ /\[(\d+)\]/
277 range_count = range.end - range.begin + 1
278
279 sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
280 sum == spec_count and sum == range_count
281end
282
283##
284# Generate the state maching to stdout
285
286def generate_machine( name, property )
287 pipe = " "
288 @output.puts " #{name} = "
289 each_alpha( @chart_url, property ) do |range, desc|
290
291 codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
292
293 #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
294 # is_valid? range, desc, codes
295
296 range_width = codes.map { |a| a.size }.max
297 range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
298
299 desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
300 desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
301
302 if desc.size > desc_width
303 desc = desc[0..desc_width - 4] + "..."
304 end
305
306 codes.each_with_index do |r, idx|
307 desc = "" unless idx.zero?
308 code = "%-#{range_width}s" % r
309 @output.puts " #{pipe} #{code} ##{desc}"
310 pipe = "|"
311 end
312 end
313 @output.puts " ;"
314 @output.puts ""
315end
316
317@output.puts <<EOF
318# The following Ragel file was autogenerated with #{$0}
319# from: #{@chart_url}
320#
321# It defines #{properties}.
322#
323# To use this, make sure that your alphtype is set to #{ALPHTYPES[@encoding]},
324# and that your input is in #{@encoding}.
325
326%%{
327 machine #{machine_name};
328
329EOF
330
331properties.each { |x| generate_machine( x, x ) }
332
333@output.puts <<EOF
334}%%
335EOF
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode_derived.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode_derived.rl
new file mode 100644
index 0000000..612ad62
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode_derived.rl
@@ -0,0 +1,2135 @@
1# The following Ragel file was autogenerated with unicode2ragel.rb
2# from: http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt
3#
4# It defines ["ID_Start", "ID_Continue"].
5#
6# To use this, make sure that your alphtype is set to byte,
7# and that your input is in utf8.
8
9%%{
10 machine UnicodeDerived;
11
12 ID_Start =
13 0x41..0x5A #L& [26] LATIN CAPITAL LETTER A..LATIN CAPI...
14 | 0x61..0x7A #L& [26] LATIN SMALL LETTER A..LATIN SMALL ...
15 | 0xC2 0xAA #Lo FEMININE ORDINAL INDICATOR
16 | 0xC2 0xB5 #L& MICRO SIGN
17 | 0xC2 0xBA #Lo MASCULINE ORDINAL INDICATOR
18 | 0xC3 0x80..0x96 #L& [23] LATIN CAPITAL LETTER A WITH GRAVE....
19 | 0xC3 0x98..0xB6 #L& [31] LATIN CAPITAL LETTER O WITH STROKE...
20 | 0xC3 0xB8..0xFF #L& [195] LATIN SMALL LETTER O WITH STROKE.....
21 | 0xC4..0xC5 0x00..0xFF #
22 | 0xC6 0x00..0xBA #
23 | 0xC6 0xBB #Lo LATIN LETTER TWO WITH STROKE
24 | 0xC6 0xBC..0xBF #L& [4] LATIN CAPITAL LETTER TONE FIVE..LA...
25 | 0xC7 0x80..0x83 #Lo [4] LATIN LETTER DENTAL CLICK..LATIN L...
26 | 0xC7 0x84..0xFF #L& [208] LATIN CAPITAL LETTER DZ WITH CARON...
27 | 0xC8..0xC9 0x00..0xFF #
28 | 0xCA 0x00..0x93 #
29 | 0xCA 0x94 #Lo LATIN LETTER GLOTTAL STOP
30 | 0xCA 0x95..0xAF #L& [27] LATIN LETTER PHARYNGEAL VOICED FRI...
31 | 0xCA 0xB0..0xFF #Lm [18] MODIFIER LETTER SMALL H..MODIFIER ...
32 | 0xCB 0x00..0x81 #
33 | 0xCB 0x86..0x91 #Lm [12] MODIFIER LETTER CIRCUMFLEX ACCENT....
34 | 0xCB 0xA0..0xA4 #Lm [5] MODIFIER LETTER SMALL GAMMA..MODIF...
35 | 0xCB 0xAC #Lm MODIFIER LETTER VOICING
36 | 0xCB 0xAE #Lm MODIFIER LETTER DOUBLE APOSTROPHE
37 | 0xCD 0xB0..0xB3 #L& [4] GREEK CAPITAL LETTER HETA..GREEK S...
38 | 0xCD 0xB4 #Lm GREEK NUMERAL SIGN
39 | 0xCD 0xB6..0xB7 #L& [2] GREEK CAPITAL LETTER PAMPHYLIAN DI...
40 | 0xCD 0xBA #Lm GREEK YPOGEGRAMMENI
41 | 0xCD 0xBB..0xBD #L& [3] GREEK SMALL REVERSED LUNATE SIGMA ...
42 | 0xCD 0xBF #L& GREEK CAPITAL LETTER YOT
43 | 0xCE 0x86 #L& GREEK CAPITAL LETTER ALPHA WITH TONOS
44 | 0xCE 0x88..0x8A #L& [3] GREEK CAPITAL LETTER EPSILON WITH ...
45 | 0xCE 0x8C #L& GREEK CAPITAL LETTER OMICRON WITH ...
46 | 0xCE 0x8E..0xA1 #L& [20] GREEK CAPITAL LETTER UPSILON WITH ...
47 | 0xCE 0xA3..0xFF #L& [83] GREEK CAPITAL LETTER SIGMA..GREEK ...
48 | 0xCF 0x00..0xB5 #
49 | 0xCF 0xB7..0xFF #L& [139] GREEK CAPITAL LETTER SHO..CYRILLIC...
50 | 0xD0..0xD1 0x00..0xFF #
51 | 0xD2 0x00..0x81 #
52 | 0xD2 0x8A..0xFF #L& [166] CYRILLIC CAPITAL LETTER SHORT I WI...
53 | 0xD3..0xD3 0x00..0xFF #
54 | 0xD4 0x00..0xAF #
55 | 0xD4 0xB1..0xFF #L& [38] ARMENIAN CAPITAL LETTER AYB..ARMEN...
56 | 0xD5 0x00..0x96 #
57 | 0xD5 0x99 #Lm ARMENIAN MODIFIER LETTER LEFT HALF...
58 | 0xD5 0xA1..0xFF #L& [39] ARMENIAN SMALL LETTER AYB..ARMENIA...
59 | 0xD6 0x00..0x87 #
60 | 0xD7 0x90..0xAA #Lo [27] HEBREW LETTER ALEF..HEBREW LETTER TAV
61 | 0xD7 0xB0..0xB2 #Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV...
62 | 0xD8 0xA0..0xBF #Lo [32] ARABIC LETTER KASHMIRI YEH..ARABIC...
63 | 0xD9 0x80 #Lm ARABIC TATWEEL
64 | 0xD9 0x81..0x8A #Lo [10] ARABIC LETTER FEH..ARABIC LETTER YEH
65 | 0xD9 0xAE..0xAF #Lo [2] ARABIC LETTER DOTLESS BEH..ARABIC ...
66 | 0xD9 0xB1..0xFF #Lo [99] ARABIC LETTER ALEF WASLA..ARABIC L...
67 | 0xDA..0xDA 0x00..0xFF #
68 | 0xDB 0x00..0x93 #
69 | 0xDB 0x95 #Lo ARABIC LETTER AE
70 | 0xDB 0xA5..0xA6 #Lm [2] ARABIC SMALL WAW..ARABIC SMALL YEH
71 | 0xDB 0xAE..0xAF #Lo [2] ARABIC LETTER DAL WITH INVERTED V....
72 | 0xDB 0xBA..0xBC #Lo [3] ARABIC LETTER SHEEN WITH DOT BELOW...
73 | 0xDB 0xBF #Lo ARABIC LETTER HEH WITH INVERTED V
74 | 0xDC 0x90 #Lo SYRIAC LETTER ALAPH
75 | 0xDC 0x92..0xAF #Lo [30] SYRIAC LETTER BETH..SYRIAC LETTER ...
76 | 0xDD 0x8D..0xFF #Lo [89] SYRIAC LETTER SOGDIAN ZHAIN..THAAN...
77 | 0xDE 0x00..0xA5 #
78 | 0xDE 0xB1 #Lo THAANA LETTER NAA
79 | 0xDF 0x8A..0xAA #Lo [33] NKO LETTER A..NKO LETTER JONA RA
80 | 0xDF 0xB4..0xB5 #Lm [2] NKO HIGH TONE APOSTROPHE..NKO LOW ...
81 | 0xDF 0xBA #Lm NKO LAJANYALAN
82 | 0xE0 0xA0 0x80..0x95 #Lo [22] SAMARITAN LETTER ALAF..SAMARITAN L...
83 | 0xE0 0xA0 0x9A #Lm SAMARITAN MODIFIER LETTER EPENTHET...
84 | 0xE0 0xA0 0xA4 #Lm SAMARITAN MODIFIER LETTER SHORT A
85 | 0xE0 0xA0 0xA8 #Lm SAMARITAN MODIFIER LETTER I
86 | 0xE0 0xA1 0x80..0x98 #Lo [25] MANDAIC LETTER HALQA..MANDAIC LETT...
87 | 0xE0 0xA2 0xA0..0xB4 #Lo [21] ARABIC LETTER BEH WITH SMALL V BEL...
88 | 0xE0 0xA2 0xB6..0xBD #Lo [8] ARABIC LETTER BEH WITH SMALL MEEM ...
89 | 0xE0 0xA4 0x84..0xB9 #Lo [54] DEVANAGARI LETTER SHORT A..DEVANAG...
90 | 0xE0 0xA4 0xBD #Lo DEVANAGARI SIGN AVAGRAHA
91 | 0xE0 0xA5 0x90 #Lo DEVANAGARI OM
92 | 0xE0 0xA5 0x98..0xA1 #Lo [10] DEVANAGARI LETTER QA..DEVANAGARI L...
93 | 0xE0 0xA5 0xB1 #Lm DEVANAGARI SIGN HIGH SPACING DOT
94 | 0xE0 0xA5 0xB2..0xFF #Lo [15] DEVANAGARI LETTER CANDRA A..BENGAL...
95 | 0xE0 0xA6 0x00..0x80 #
96 | 0xE0 0xA6 0x85..0x8C #Lo [8] BENGALI LETTER A..BENGALI LETTER V...
97 | 0xE0 0xA6 0x8F..0x90 #Lo [2] BENGALI LETTER E..BENGALI LETTER AI
98 | 0xE0 0xA6 0x93..0xA8 #Lo [22] BENGALI LETTER O..BENGALI LETTER NA
99 | 0xE0 0xA6 0xAA..0xB0 #Lo [7] BENGALI LETTER PA..BENGALI LETTER RA
100 | 0xE0 0xA6 0xB2 #Lo BENGALI LETTER LA
101 | 0xE0 0xA6 0xB6..0xB9 #Lo [4] BENGALI LETTER SHA..BENGALI LETTER HA
102 | 0xE0 0xA6 0xBD #Lo BENGALI SIGN AVAGRAHA
103 | 0xE0 0xA7 0x8E #Lo BENGALI LETTER KHANDA TA
104 | 0xE0 0xA7 0x9C..0x9D #Lo [2] BENGALI LETTER RRA..BENGALI LETTER...
105 | 0xE0 0xA7 0x9F..0xA1 #Lo [3] BENGALI LETTER YYA..BENGALI LETTER...
106 | 0xE0 0xA7 0xB0..0xB1 #Lo [2] BENGALI LETTER RA WITH MIDDLE DIAG...
107 | 0xE0 0xA8 0x85..0x8A #Lo [6] GURMUKHI LETTER A..GURMUKHI LETTER UU
108 | 0xE0 0xA8 0x8F..0x90 #Lo [2] GURMUKHI LETTER EE..GURMUKHI LETTE...
109 | 0xE0 0xA8 0x93..0xA8 #Lo [22] GURMUKHI LETTER OO..GURMUKHI LETTE...
110 | 0xE0 0xA8 0xAA..0xB0 #Lo [7] GURMUKHI LETTER PA..GURMUKHI LETTE...
111 | 0xE0 0xA8 0xB2..0xB3 #Lo [2] GURMUKHI LETTER LA..GURMUKHI LETTE...
112 | 0xE0 0xA8 0xB5..0xB6 #Lo [2] GURMUKHI LETTER VA..GURMUKHI LETTE...
113 | 0xE0 0xA8 0xB8..0xB9 #Lo [2] GURMUKHI LETTER SA..GURMUKHI LETTE...
114 | 0xE0 0xA9 0x99..0x9C #Lo [4] GURMUKHI LETTER KHHA..GURMUKHI LET...
115 | 0xE0 0xA9 0x9E #Lo GURMUKHI LETTER FA
116 | 0xE0 0xA9 0xB2..0xB4 #Lo [3] GURMUKHI IRI..GURMUKHI EK ONKAR
117 | 0xE0 0xAA 0x85..0x8D #Lo [9] GUJARATI LETTER A..GUJARATI VOWEL ...
118 | 0xE0 0xAA 0x8F..0x91 #Lo [3] GUJARATI LETTER E..GUJARATI VOWEL ...
119 | 0xE0 0xAA 0x93..0xA8 #Lo [22] GUJARATI LETTER O..GUJARATI LETTER NA
120 | 0xE0 0xAA 0xAA..0xB0 #Lo [7] GUJARATI LETTER PA..GUJARATI LETTE...
121 | 0xE0 0xAA 0xB2..0xB3 #Lo [2] GUJARATI LETTER LA..GUJARATI LETTE...
122 | 0xE0 0xAA 0xB5..0xB9 #Lo [5] GUJARATI LETTER VA..GUJARATI LETTE...
123 | 0xE0 0xAA 0xBD #Lo GUJARATI SIGN AVAGRAHA
124 | 0xE0 0xAB 0x90 #Lo GUJARATI OM
125 | 0xE0 0xAB 0xA0..0xA1 #Lo [2] GUJARATI LETTER VOCALIC RR..GUJARA...
126 | 0xE0 0xAB 0xB9 #Lo GUJARATI LETTER ZHA
127 | 0xE0 0xAC 0x85..0x8C #Lo [8] ORIYA LETTER A..ORIYA LETTER VOCAL...
128 | 0xE0 0xAC 0x8F..0x90 #Lo [2] ORIYA LETTER E..ORIYA LETTER AI
129 | 0xE0 0xAC 0x93..0xA8 #Lo [22] ORIYA LETTER O..ORIYA LETTER NA
130 | 0xE0 0xAC 0xAA..0xB0 #Lo [7] ORIYA LETTER PA..ORIYA LETTER RA
131 | 0xE0 0xAC 0xB2..0xB3 #Lo [2] ORIYA LETTER LA..ORIYA LETTER LLA
132 | 0xE0 0xAC 0xB5..0xB9 #Lo [5] ORIYA LETTER VA..ORIYA LETTER HA
133 | 0xE0 0xAC 0xBD #Lo ORIYA SIGN AVAGRAHA
134 | 0xE0 0xAD 0x9C..0x9D #Lo [2] ORIYA LETTER RRA..ORIYA LETTER RHA
135 | 0xE0 0xAD 0x9F..0xA1 #Lo [3] ORIYA LETTER YYA..ORIYA LETTER VOC...
136 | 0xE0 0xAD 0xB1 #Lo ORIYA LETTER WA
137 | 0xE0 0xAE 0x83 #Lo TAMIL SIGN VISARGA
138 | 0xE0 0xAE 0x85..0x8A #Lo [6] TAMIL LETTER A..TAMIL LETTER UU
139 | 0xE0 0xAE 0x8E..0x90 #Lo [3] TAMIL LETTER E..TAMIL LETTER AI
140 | 0xE0 0xAE 0x92..0x95 #Lo [4] TAMIL LETTER O..TAMIL LETTER KA
141 | 0xE0 0xAE 0x99..0x9A #Lo [2] TAMIL LETTER NGA..TAMIL LETTER CA
142 | 0xE0 0xAE 0x9C #Lo TAMIL LETTER JA
143 | 0xE0 0xAE 0x9E..0x9F #Lo [2] TAMIL LETTER NYA..TAMIL LETTER TTA
144 | 0xE0 0xAE 0xA3..0xA4 #Lo [2] TAMIL LETTER NNA..TAMIL LETTER TA
145 | 0xE0 0xAE 0xA8..0xAA #Lo [3] TAMIL LETTER NA..TAMIL LETTER PA
146 | 0xE0 0xAE 0xAE..0xB9 #Lo [12] TAMIL LETTER MA..TAMIL LETTER HA
147 | 0xE0 0xAF 0x90 #Lo TAMIL OM
148 | 0xE0 0xB0 0x85..0x8C #Lo [8] TELUGU LETTER A..TELUGU LETTER VOC...
149 | 0xE0 0xB0 0x8E..0x90 #Lo [3] TELUGU LETTER E..TELUGU LETTER AI
150 | 0xE0 0xB0 0x92..0xA8 #Lo [23] TELUGU LETTER O..TELUGU LETTER NA
151 | 0xE0 0xB0 0xAA..0xB9 #Lo [16] TELUGU LETTER PA..TELUGU LETTER HA
152 | 0xE0 0xB0 0xBD #Lo TELUGU SIGN AVAGRAHA
153 | 0xE0 0xB1 0x98..0x9A #Lo [3] TELUGU LETTER TSA..TELUGU LETTER RRRA
154 | 0xE0 0xB1 0xA0..0xA1 #Lo [2] TELUGU LETTER VOCALIC RR..TELUGU L...
155 | 0xE0 0xB2 0x80 #Lo KANNADA SIGN SPACING CANDRABINDU
156 | 0xE0 0xB2 0x85..0x8C #Lo [8] KANNADA LETTER A..KANNADA LETTER V...
157 | 0xE0 0xB2 0x8E..0x90 #Lo [3] KANNADA LETTER E..KANNADA LETTER AI
158 | 0xE0 0xB2 0x92..0xA8 #Lo [23] KANNADA LETTER O..KANNADA LETTER NA
159 | 0xE0 0xB2 0xAA..0xB3 #Lo [10] KANNADA LETTER PA..KANNADA LETTER LLA
160 | 0xE0 0xB2 0xB5..0xB9 #Lo [5] KANNADA LETTER VA..KANNADA LETTER HA
161 | 0xE0 0xB2 0xBD #Lo KANNADA SIGN AVAGRAHA
162 | 0xE0 0xB3 0x9E #Lo KANNADA LETTER FA
163 | 0xE0 0xB3 0xA0..0xA1 #Lo [2] KANNADA LETTER VOCALIC RR..KANNADA...
164 | 0xE0 0xB3 0xB1..0xB2 #Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA ...
165 | 0xE0 0xB4 0x85..0x8C #Lo [8] MALAYALAM LETTER A..MALAYALAM LETT...
166 | 0xE0 0xB4 0x8E..0x90 #Lo [3] MALAYALAM LETTER E..MALAYALAM LETT...
167 | 0xE0 0xB4 0x92..0xBA #Lo [41] MALAYALAM LETTER O..MALAYALAM LETT...
168 | 0xE0 0xB4 0xBD #Lo MALAYALAM SIGN AVAGRAHA
169 | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH
170 | 0xE0 0xB5 0x94..0x96 #Lo [3] MALAYALAM LETTER CHILLU M..MALAYAL...
171 | 0xE0 0xB5 0x9F..0xA1 #Lo [3] MALAYALAM LETTER ARCHAIC II..MALAY...
172 | 0xE0 0xB5 0xBA..0xBF #Lo [6] MALAYALAM LETTER CHILLU NN..MALAYA...
173 | 0xE0 0xB6 0x85..0x96 #Lo [18] SINHALA LETTER AYANNA..SINHALA LET...
174 | 0xE0 0xB6 0x9A..0xB1 #Lo [24] SINHALA LETTER ALPAPRAANA KAYANNA....
175 | 0xE0 0xB6 0xB3..0xBB #Lo [9] SINHALA LETTER SANYAKA DAYANNA..SI...
176 | 0xE0 0xB6 0xBD #Lo SINHALA LETTER DANTAJA LAYANNA
177 | 0xE0 0xB7 0x80..0x86 #Lo [7] SINHALA LETTER VAYANNA..SINHALA LE...
178 | 0xE0 0xB8 0x81..0xB0 #Lo [48] THAI CHARACTER KO KAI..THAI CHARAC...
179 | 0xE0 0xB8 0xB2..0xB3 #Lo [2] THAI CHARACTER SARA AA..THAI CHARA...
180 | 0xE0 0xB9 0x80..0x85 #Lo [6] THAI CHARACTER SARA E..THAI CHARAC...
181 | 0xE0 0xB9 0x86 #Lm THAI CHARACTER MAIYAMOK
182 | 0xE0 0xBA 0x81..0x82 #Lo [2] LAO LETTER KO..LAO LETTER KHO SUNG
183 | 0xE0 0xBA 0x84 #Lo LAO LETTER KHO TAM
184 | 0xE0 0xBA 0x87..0x88 #Lo [2] LAO LETTER NGO..LAO LETTER CO
185 | 0xE0 0xBA 0x8A #Lo LAO LETTER SO TAM
186 | 0xE0 0xBA 0x8D #Lo LAO LETTER NYO
187 | 0xE0 0xBA 0x94..0x97 #Lo [4] LAO LETTER DO..LAO LETTER THO TAM
188 | 0xE0 0xBA 0x99..0x9F #Lo [7] LAO LETTER NO..LAO LETTER FO SUNG
189 | 0xE0 0xBA 0xA1..0xA3 #Lo [3] LAO LETTER MO..LAO LETTER LO LING
190 | 0xE0 0xBA 0xA5 #Lo LAO LETTER LO LOOT
191 | 0xE0 0xBA 0xA7 #Lo LAO LETTER WO
192 | 0xE0 0xBA 0xAA..0xAB #Lo [2] LAO LETTER SO SUNG..LAO LETTER HO ...
193 | 0xE0 0xBA 0xAD..0xB0 #Lo [4] LAO LETTER O..LAO VOWEL SIGN A
194 | 0xE0 0xBA 0xB2..0xB3 #Lo [2] LAO VOWEL SIGN AA..LAO VOWEL SIGN AM
195 | 0xE0 0xBA 0xBD #Lo LAO SEMIVOWEL SIGN NYO
196 | 0xE0 0xBB 0x80..0x84 #Lo [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI
197 | 0xE0 0xBB 0x86 #Lm LAO KO LA
198 | 0xE0 0xBB 0x9C..0x9F #Lo [4] LAO HO NO..LAO LETTER KHMU NYO
199 | 0xE0 0xBC 0x80 #Lo TIBETAN SYLLABLE OM
200 | 0xE0 0xBD 0x80..0x87 #Lo [8] TIBETAN LETTER KA..TIBETAN LETTER JA
201 | 0xE0 0xBD 0x89..0xAC #Lo [36] TIBETAN LETTER NYA..TIBETAN LETTER...
202 | 0xE0 0xBE 0x88..0x8C #Lo [5] TIBETAN SIGN LCE TSA CAN..TIBETAN ...
203 | 0xE1 0x80 0x80..0xAA #Lo [43] MYANMAR LETTER KA..MYANMAR LETTER AU
204 | 0xE1 0x80 0xBF #Lo MYANMAR LETTER GREAT SA
205 | 0xE1 0x81 0x90..0x95 #Lo [6] MYANMAR LETTER SHA..MYANMAR LETTER...
206 | 0xE1 0x81 0x9A..0x9D #Lo [4] MYANMAR LETTER MON NGA..MYANMAR LE...
207 | 0xE1 0x81 0xA1 #Lo MYANMAR LETTER SGAW KAREN SHA
208 | 0xE1 0x81 0xA5..0xA6 #Lo [2] MYANMAR LETTER WESTERN PWO KAREN T...
209 | 0xE1 0x81 0xAE..0xB0 #Lo [3] MYANMAR LETTER EASTERN PWO KAREN N...
210 | 0xE1 0x81 0xB5..0xFF #Lo [13] MYANMAR LETTER SHAN KA..MYANMAR LE...
211 | 0xE1 0x82 0x00..0x81 #
212 | 0xE1 0x82 0x8E #Lo MYANMAR LETTER RUMAI PALAUNG FA
213 | 0xE1 0x82 0xA0..0xFF #L& [38] GEORGIAN CAPITAL LETTER AN..GEORGI...
214 | 0xE1 0x83 0x00..0x85 #
215 | 0xE1 0x83 0x87 #L& GEORGIAN CAPITAL LETTER YN
216 | 0xE1 0x83 0x8D #L& GEORGIAN CAPITAL LETTER AEN
217 | 0xE1 0x83 0x90..0xBA #Lo [43] GEORGIAN LETTER AN..GEORGIAN LETTE...
218 | 0xE1 0x83 0xBC #Lm MODIFIER LETTER GEORGIAN NAR
219 | 0xE1 0x83 0xBD..0xFF #Lo [332] GEORGIAN LETTER AEN..ETHIOPIC ...
220 | 0xE1 0x84..0x88 0x00..0xFF #
221 | 0xE1 0x89 0x00..0x88 #
222 | 0xE1 0x89 0x8A..0x8D #Lo [4] ETHIOPIC SYLLABLE QWI..ETHIOPIC SY...
223 | 0xE1 0x89 0x90..0x96 #Lo [7] ETHIOPIC SYLLABLE QHA..ETHIOPIC SY...
224 | 0xE1 0x89 0x98 #Lo ETHIOPIC SYLLABLE QHWA
225 | 0xE1 0x89 0x9A..0x9D #Lo [4] ETHIOPIC SYLLABLE QHWI..ETHIOPIC S...
226 | 0xE1 0x89 0xA0..0xFF #Lo [41] ETHIOPIC SYLLABLE BA..ETHIOPIC SYL...
227 | 0xE1 0x8A 0x00..0x88 #
228 | 0xE1 0x8A 0x8A..0x8D #Lo [4] ETHIOPIC SYLLABLE XWI..ETHIOPIC SY...
229 | 0xE1 0x8A 0x90..0xB0 #Lo [33] ETHIOPIC SYLLABLE NA..ETHIOPIC SYL...
230 | 0xE1 0x8A 0xB2..0xB5 #Lo [4] ETHIOPIC SYLLABLE KWI..ETHIOPIC SY...
231 | 0xE1 0x8A 0xB8..0xBE #Lo [7] ETHIOPIC SYLLABLE KXA..ETHIOPIC SY...
232 | 0xE1 0x8B 0x80 #Lo ETHIOPIC SYLLABLE KXWA
233 | 0xE1 0x8B 0x82..0x85 #Lo [4] ETHIOPIC SYLLABLE KXWI..ETHIOPIC S...
234 | 0xE1 0x8B 0x88..0x96 #Lo [15] ETHIOPIC SYLLABLE WA..ETHIOPIC SYL...
235 | 0xE1 0x8B 0x98..0xFF #Lo [57] ETHIOPIC SYLLABLE ZA..ETHIOPIC SYL...
236 | 0xE1 0x8C 0x00..0x90 #
237 | 0xE1 0x8C 0x92..0x95 #Lo [4] ETHIOPIC SYLLABLE GWI..ETHIOPIC SY...
238 | 0xE1 0x8C 0x98..0xFF #Lo [67] ETHIOPIC SYLLABLE GGA..ETHIOPIC SY...
239 | 0xE1 0x8D 0x00..0x9A #
240 | 0xE1 0x8E 0x80..0x8F #Lo [16] ETHIOPIC SYLLABLE SEBATBEIT MWA..E...
241 | 0xE1 0x8E 0xA0..0xFF #L& [86] CHEROKEE LETTER A..CHEROKEE LETTER MV
242 | 0xE1 0x8F 0x00..0xB5 #
243 | 0xE1 0x8F 0xB8..0xBD #L& [6] CHEROKEE SMALL LETTER YE..CHEROKEE...
244 | 0xE1 0x90 0x81..0xFF #Lo [620] CANADIAN SYLLABICS E..CANADIAN...
245 | 0xE1 0x91..0x98 0x00..0xFF #
246 | 0xE1 0x99 0x00..0xAC #
247 | 0xE1 0x99 0xAF..0xBF #Lo [17] CANADIAN SYLLABICS QAI..CANADIAN S...
248 | 0xE1 0x9A 0x81..0x9A #Lo [26] OGHAM LETTER BEITH..OGHAM LETTER P...
249 | 0xE1 0x9A 0xA0..0xFF #Lo [75] RUNIC LETTER FEHU FEOH FE F..RUNIC...
250 | 0xE1 0x9B 0x00..0xAA #
251 | 0xE1 0x9B 0xAE..0xB0 #Nl [3] RUNIC ARLAUG SYMBOL..RUNIC BELGTHO...
252 | 0xE1 0x9B 0xB1..0xB8 #Lo [8] RUNIC LETTER K..RUNIC LETTER FRANK...
253 | 0xE1 0x9C 0x80..0x8C #Lo [13] TAGALOG LETTER A..TAGALOG LETTER YA
254 | 0xE1 0x9C 0x8E..0x91 #Lo [4] TAGALOG LETTER LA..TAGALOG LETTER HA
255 | 0xE1 0x9C 0xA0..0xB1 #Lo [18] HANUNOO LETTER A..HANUNOO LETTER HA
256 | 0xE1 0x9D 0x80..0x91 #Lo [18] BUHID LETTER A..BUHID LETTER HA
257 | 0xE1 0x9D 0xA0..0xAC #Lo [13] TAGBANWA LETTER A..TAGBANWA LETTER YA
258 | 0xE1 0x9D 0xAE..0xB0 #Lo [3] TAGBANWA LETTER LA..TAGBANWA LETTE...
259 | 0xE1 0x9E 0x80..0xB3 #Lo [52] KHMER LETTER KA..KHMER INDEPENDENT...
260 | 0xE1 0x9F 0x97 #Lm KHMER SIGN LEK TOO
261 | 0xE1 0x9F 0x9C #Lo KHMER SIGN AVAKRAHASANYA
262 | 0xE1 0xA0 0xA0..0xFF #Lo [35] MONGOLIAN LETTER A..MONGOLIAN LETT...
263 | 0xE1 0xA1 0x00..0x82 #
264 | 0xE1 0xA1 0x83 #Lm MONGOLIAN LETTER TODO LONG VOWEL SIGN
265 | 0xE1 0xA1 0x84..0xB7 #Lo [52] MONGOLIAN LETTER TODO E..MONGOLIAN...
266 | 0xE1 0xA2 0x80..0x84 #Lo [5] MONGOLIAN LETTER ALI GALI ANUSVARA...
267 | 0xE1 0xA2 0x85..0x86 #Mn [2] MONGOLIAN LETTER ALI GALI BALUDA.....
268 | 0xE1 0xA2 0x87..0xA8 #Lo [34] MONGOLIAN LETTER ALI GALI A..MONGO...
269 | 0xE1 0xA2 0xAA #Lo MONGOLIAN LETTER MANCHU ALI GALI LHA
270 | 0xE1 0xA2 0xB0..0xFF #Lo [70] CANADIAN SYLLABICS OY..CANADIAN SY...
271 | 0xE1 0xA3 0x00..0xB5 #
272 | 0xE1 0xA4 0x80..0x9E #Lo [31] LIMBU VOWEL-CARRIER LETTER..LIMBU ...
273 | 0xE1 0xA5 0x90..0xAD #Lo [30] TAI LE LETTER KA..TAI LE LETTER AI
274 | 0xE1 0xA5 0xB0..0xB4 #Lo [5] TAI LE LETTER TONE-2..TAI LE LETTE...
275 | 0xE1 0xA6 0x80..0xAB #Lo [44] NEW TAI LUE LETTER HIGH QA..NEW TA...
276 | 0xE1 0xA6 0xB0..0xFF #Lo [26] NEW TAI LUE VOWEL SIGN VOWEL SHORT...
277 | 0xE1 0xA7 0x00..0x89 #
278 | 0xE1 0xA8 0x80..0x96 #Lo [23] BUGINESE LETTER KA..BUGINESE LETTE...
279 | 0xE1 0xA8 0xA0..0xFF #Lo [53] TAI THAM LETTER HIGH KA..TAI THAM ...
280 | 0xE1 0xA9 0x00..0x94 #
281 | 0xE1 0xAA 0xA7 #Lm TAI THAM SIGN MAI YAMOK
282 | 0xE1 0xAC 0x85..0xB3 #Lo [47] BALINESE LETTER AKARA..BALINESE LE...
283 | 0xE1 0xAD 0x85..0x8B #Lo [7] BALINESE LETTER KAF SASAK..BALINES...
284 | 0xE1 0xAE 0x83..0xA0 #Lo [30] SUNDANESE LETTER A..SUNDANESE LETT...
285 | 0xE1 0xAE 0xAE..0xAF #Lo [2] SUNDANESE LETTER KHA..SUNDANESE LE...
286 | 0xE1 0xAE 0xBA..0xFF #Lo [44] SUNDANESE AVAGRAHA..BATAK LETTER U
287 | 0xE1 0xAF 0x00..0xA5 #
288 | 0xE1 0xB0 0x80..0xA3 #Lo [36] LEPCHA LETTER KA..LEPCHA LETTER A
289 | 0xE1 0xB1 0x8D..0x8F #Lo [3] LEPCHA LETTER TTA..LEPCHA LETTER DDA
290 | 0xE1 0xB1 0x9A..0xB7 #Lo [30] OL CHIKI LETTER LA..OL CHIKI LETTE...
291 | 0xE1 0xB1 0xB8..0xBD #Lm [6] OL CHIKI MU TTUDDAG..OL CHIKI AHAD
292 | 0xE1 0xB2 0x80..0x88 #L& [9] CYRILLIC SMALL LETTER ROUNDED VE.....
293 | 0xE1 0xB3 0xA9..0xAC #Lo [4] VEDIC SIGN ANUSVARA ANTARGOMUKHA.....
294 | 0xE1 0xB3 0xAE..0xB1 #Lo [4] VEDIC SIGN HEXIFORM LONG ANUSVARA....
295 | 0xE1 0xB3 0xB5..0xB6 #Lo [2] VEDIC SIGN JIHVAMULIYA..VEDIC SIGN...
296 | 0xE1 0xB4 0x80..0xAB #L& [44] LATIN LETTER SMALL CAPITAL A..CYRI...
297 | 0xE1 0xB4 0xAC..0xFF #Lm [63] MODIFIER LETTER CAPITAL A..GREEK S...
298 | 0xE1 0xB5 0x00..0xAA #
299 | 0xE1 0xB5 0xAB..0xB7 #L& [13] LATIN SMALL LETTER UE..LATIN SMALL...
300 | 0xE1 0xB5 0xB8 #Lm MODIFIER LETTER CYRILLIC EN
301 | 0xE1 0xB5 0xB9..0xFF #L& [34] LATIN SMALL LETTER INSULAR G..LATI...
302 | 0xE1 0xB6 0x00..0x9A #
303 | 0xE1 0xB6 0x9B..0xBF #Lm [37] MODIFIER LETTER SMALL TURNED ALPHA...
304 | 0xE1 0xB8 0x80..0xFF #L& [278] LATIN CAPITAL LETTER A WITH RI...
305 | 0xE1 0xB9..0xBB 0x00..0xFF #
306 | 0xE1 0xBC 0x00..0x95 #
307 | 0xE1 0xBC 0x98..0x9D #L& [6] GREEK CAPITAL LETTER EPSILON WITH ...
308 | 0xE1 0xBC 0xA0..0xFF #L& [38] GREEK SMALL LETTER ETA WITH PSILI....
309 | 0xE1 0xBD 0x00..0x85 #
310 | 0xE1 0xBD 0x88..0x8D #L& [6] GREEK CAPITAL LETTER OMICRON WITH ...
311 | 0xE1 0xBD 0x90..0x97 #L& [8] GREEK SMALL LETTER UPSILON WITH PS...
312 | 0xE1 0xBD 0x99 #L& GREEK CAPITAL LETTER UPSILON WITH ...
313 | 0xE1 0xBD 0x9B #L& GREEK CAPITAL LETTER UPSILON WITH ...
314 | 0xE1 0xBD 0x9D #L& GREEK CAPITAL LETTER UPSILON WITH ...
315 | 0xE1 0xBD 0x9F..0xBD #L& [31] GREEK CAPITAL LETTER UPSILON WITH ...
316 | 0xE1 0xBE 0x80..0xB4 #L& [53] GREEK SMALL LETTER ALPHA WITH PSIL...
317 | 0xE1 0xBE 0xB6..0xBC #L& [7] GREEK SMALL LETTER ALPHA WITH PERI...
318 | 0xE1 0xBE 0xBE #L& GREEK PROSGEGRAMMENI
319 | 0xE1 0xBF 0x82..0x84 #L& [3] GREEK SMALL LETTER ETA WITH VARIA ...
320 | 0xE1 0xBF 0x86..0x8C #L& [7] GREEK SMALL LETTER ETA WITH PERISP...
321 | 0xE1 0xBF 0x90..0x93 #L& [4] GREEK SMALL LETTER IOTA WITH VRACH...
322 | 0xE1 0xBF 0x96..0x9B #L& [6] GREEK SMALL LETTER IOTA WITH PERIS...
323 | 0xE1 0xBF 0xA0..0xAC #L& [13] GREEK SMALL LETTER UPSILON WITH VR...
324 | 0xE1 0xBF 0xB2..0xB4 #L& [3] GREEK SMALL LETTER OMEGA WITH VARI...
325 | 0xE1 0xBF 0xB6..0xBC #L& [7] GREEK SMALL LETTER OMEGA WITH PERI...
326 | 0xE2 0x81 0xB1 #Lm SUPERSCRIPT LATIN SMALL LETTER I
327 | 0xE2 0x81 0xBF #Lm SUPERSCRIPT LATIN SMALL LETTER N
328 | 0xE2 0x82 0x90..0x9C #Lm [13] LATIN SUBSCRIPT SMALL LETTER A..LA...
329 | 0xE2 0x84 0x82 #L& DOUBLE-STRUCK CAPITAL C
330 | 0xE2 0x84 0x87 #L& EULER CONSTANT
331 | 0xE2 0x84 0x8A..0x93 #L& [10] SCRIPT SMALL G..SCRIPT SMALL L
332 | 0xE2 0x84 0x95 #L& DOUBLE-STRUCK CAPITAL N
333 | 0xE2 0x84 0x98 #Sm SCRIPT CAPITAL P
334 | 0xE2 0x84 0x99..0x9D #L& [5] DOUBLE-STRUCK CAPITAL P..DOUBLE-ST...
335 | 0xE2 0x84 0xA4 #L& DOUBLE-STRUCK CAPITAL Z
336 | 0xE2 0x84 0xA6 #L& OHM SIGN
337 | 0xE2 0x84 0xA8 #L& BLACK-LETTER CAPITAL Z
338 | 0xE2 0x84 0xAA..0xAD #L& [4] KELVIN SIGN..BLACK-LETTER CAPITAL C
339 | 0xE2 0x84 0xAE #So ESTIMATED SYMBOL
340 | 0xE2 0x84 0xAF..0xB4 #L& [6] SCRIPT SMALL E..SCRIPT SMALL O
341 | 0xE2 0x84 0xB5..0xB8 #Lo [4] ALEF SYMBOL..DALET SYMBOL
342 | 0xE2 0x84 0xB9 #L& INFORMATION SOURCE
343 | 0xE2 0x84 0xBC..0xBF #L& [4] DOUBLE-STRUCK SMALL PI..DOUBLE-STR...
344 | 0xE2 0x85 0x85..0x89 #L& [5] DOUBLE-STRUCK ITALIC CAPITAL D..DO...
345 | 0xE2 0x85 0x8E #L& TURNED SMALL F
346 | 0xE2 0x85 0xA0..0xFF #Nl [35] ROMAN NUMERAL ONE..ROMAN NUMERAL T...
347 | 0xE2 0x86 0x00..0x82 #
348 | 0xE2 0x86 0x83..0x84 #L& [2] ROMAN NUMERAL REVERSED ONE HUNDRED...
349 | 0xE2 0x86 0x85..0x88 #Nl [4] ROMAN NUMERAL SIX LATE FORM..ROMAN...
350 | 0xE2 0xB0 0x80..0xAE #L& [47] GLAGOLITIC CAPITAL LETTER AZU..GLA...
351 | 0xE2 0xB0 0xB0..0xFF #L& [47] GLAGOLITIC SMALL LETTER AZU..GLAGO...
352 | 0xE2 0xB1 0x00..0x9E #
353 | 0xE2 0xB1 0xA0..0xBB #L& [28] LATIN CAPITAL LETTER L WITH DOUBLE...
354 | 0xE2 0xB1 0xBC..0xBD #Lm [2] LATIN SUBSCRIPT SMALL LETTER J..MO...
355 | 0xE2 0xB1 0xBE..0xFF #L& [103] LATIN CAPITAL LETTER S WITH SW...
356 | 0xE2 0xB2..0xB2 0x00..0xFF #
357 | 0xE2 0xB3 0x00..0xA4 #
358 | 0xE2 0xB3 0xAB..0xAE #L& [4] COPTIC CAPITAL LETTER CRYPTOGRAMMI...
359 | 0xE2 0xB3 0xB2..0xB3 #L& [2] COPTIC CAPITAL LETTER BOHAIRIC KHE...
360 | 0xE2 0xB4 0x80..0xA5 #L& [38] GEORGIAN SMALL LETTER AN..GEORGIAN...
361 | 0xE2 0xB4 0xA7 #L& GEORGIAN SMALL LETTER YN
362 | 0xE2 0xB4 0xAD #L& GEORGIAN SMALL LETTER AEN
363 | 0xE2 0xB4 0xB0..0xFF #Lo [56] TIFINAGH LETTER YA..TIFINAGH LETTE...
364 | 0xE2 0xB5 0x00..0xA7 #
365 | 0xE2 0xB5 0xAF #Lm TIFINAGH MODIFIER LETTER LABIALIZA...
366 | 0xE2 0xB6 0x80..0x96 #Lo [23] ETHIOPIC SYLLABLE LOA..ETHIOPIC SY...
367 | 0xE2 0xB6 0xA0..0xA6 #Lo [7] ETHIOPIC SYLLABLE SSA..ETHIOPIC SY...
368 | 0xE2 0xB6 0xA8..0xAE #Lo [7] ETHIOPIC SYLLABLE CCA..ETHIOPIC SY...
369 | 0xE2 0xB6 0xB0..0xB6 #Lo [7] ETHIOPIC SYLLABLE ZZA..ETHIOPIC SY...
370 | 0xE2 0xB6 0xB8..0xBE #Lo [7] ETHIOPIC SYLLABLE CCHA..ETHIOPIC S...
371 | 0xE2 0xB7 0x80..0x86 #Lo [7] ETHIOPIC SYLLABLE QYA..ETHIOPIC SY...
372 | 0xE2 0xB7 0x88..0x8E #Lo [7] ETHIOPIC SYLLABLE KYA..ETHIOPIC SY...
373 | 0xE2 0xB7 0x90..0x96 #Lo [7] ETHIOPIC SYLLABLE XYA..ETHIOPIC SY...
374 | 0xE2 0xB7 0x98..0x9E #Lo [7] ETHIOPIC SYLLABLE GYA..ETHIOPIC SY...
375 | 0xE3 0x80 0x85 #Lm IDEOGRAPHIC ITERATION MARK
376 | 0xE3 0x80 0x86 #Lo IDEOGRAPHIC CLOSING MARK
377 | 0xE3 0x80 0x87 #Nl IDEOGRAPHIC NUMBER ZERO
378 | 0xE3 0x80 0xA1..0xA9 #Nl [9] HANGZHOU NUMERAL ONE..HANGZHOU NUM...
379 | 0xE3 0x80 0xB1..0xB5 #Lm [5] VERTICAL KANA REPEAT MARK..VERTICA...
380 | 0xE3 0x80 0xB8..0xBA #Nl [3] HANGZHOU NUMERAL TEN..HANGZHOU NUM...
381 | 0xE3 0x80 0xBB #Lm VERTICAL IDEOGRAPHIC ITERATION MARK
382 | 0xE3 0x80 0xBC #Lo MASU MARK
383 | 0xE3 0x81 0x81..0xFF #Lo [86] HIRAGANA LETTER SMALL A..HIRAGANA ...
384 | 0xE3 0x82 0x00..0x96 #
385 | 0xE3 0x82 0x9B..0x9C #Sk [2] KATAKANA-HIRAGANA VOICED SOUND MAR...
386 | 0xE3 0x82 0x9D..0x9E #Lm [2] HIRAGANA ITERATION MARK..HIRAGANA ...
387 | 0xE3 0x82 0x9F #Lo HIRAGANA DIGRAPH YORI
388 | 0xE3 0x82 0xA1..0xFF #Lo [90] KATAKANA LETTER SMALL A..KATAKANA ...
389 | 0xE3 0x83 0x00..0xBA #
390 | 0xE3 0x83 0xBC..0xBE #Lm [3] KATAKANA-HIRAGANA PROLONGED SOUND ...
391 | 0xE3 0x83 0xBF #Lo KATAKANA DIGRAPH KOTO
392 | 0xE3 0x84 0x85..0xAD #Lo [41] BOPOMOFO LETTER B..BOPOMOFO LETTER IH
393 | 0xE3 0x84 0xB1..0xFF #Lo [94] HANGUL LETTER KIYEOK..HANGUL L...
394 | 0xE3 0x85..0x85 0x00..0xFF #
395 | 0xE3 0x86 0x00..0x8E #
396 | 0xE3 0x86 0xA0..0xBA #Lo [27] BOPOMOFO LETTER BU..BOPOMOFO LETTE...
397 | 0xE3 0x87 0xB0..0xBF #Lo [16] KATAKANA LETTER SMALL KU..KATAKANA...
398 | 0xE3 0x90 0x80..0xFF #Lo [6582] CJK UNIFIED IDEOGRAPH-3400..C...
399 | 0xE3 0x91..0xFF 0x00..0xFF #
400 | 0xE4 0x00 0x00..0xFF #
401 | 0xE4 0x01..0xB5 0x00..0xFF #
402 | 0xE4 0xB6 0x00..0xB5 #
403 | 0xE4 0xB8 0x80..0xFF #Lo [20950] CJK UNIFIED IDEOGRAPH-...
404 | 0xE4 0xB9..0xFF 0x00..0xFF #
405 | 0xE5..0xE8 0x00..0xFF 0x00..0xFF #
406 | 0xE9 0x00 0x00..0xFF #
407 | 0xE9 0x01..0xBE 0x00..0xFF #
408 | 0xE9 0xBF 0x00..0x95 #
409 | 0xEA 0x80 0x80..0x94 #Lo [21] YI SYLLABLE IT..YI SYLLABLE E
410 | 0xEA 0x80 0x95 #Lm YI SYLLABLE WU
411 | 0xEA 0x80 0x96..0xFF #Lo [1143] YI SYLLABLE BIT..YI SYLLABLE YYR
412 | 0xEA 0x81..0x91 0x00..0xFF #
413 | 0xEA 0x92 0x00..0x8C #
414 | 0xEA 0x93 0x90..0xB7 #Lo [40] LISU LETTER BA..LISU LETTER OE
415 | 0xEA 0x93 0xB8..0xBD #Lm [6] LISU LETTER TONE MYA TI..LISU LETT...
416 | 0xEA 0x94 0x80..0xFF #Lo [268] VAI SYLLABLE EE..VAI SYLLABLE NG
417 | 0xEA 0x95..0x97 0x00..0xFF #
418 | 0xEA 0x98 0x00..0x8B #
419 | 0xEA 0x98 0x8C #Lm VAI SYLLABLE LENGTHENER
420 | 0xEA 0x98 0x90..0x9F #Lo [16] VAI SYLLABLE NDOLE FA..VAI SYMBOL ...
421 | 0xEA 0x98 0xAA..0xAB #Lo [2] VAI SYLLABLE NDOLE MA..VAI SYLLABL...
422 | 0xEA 0x99 0x80..0xAD #L& [46] CYRILLIC CAPITAL LETTER ZEMLYA..CY...
423 | 0xEA 0x99 0xAE #Lo CYRILLIC LETTER MULTIOCULAR O
424 | 0xEA 0x99 0xBF #Lm CYRILLIC PAYEROK
425 | 0xEA 0x9A 0x80..0x9B #L& [28] CYRILLIC CAPITAL LETTER DWE..CYRIL...
426 | 0xEA 0x9A 0x9C..0x9D #Lm [2] MODIFIER LETTER CYRILLIC HARD SIGN...
427 | 0xEA 0x9A 0xA0..0xFF #Lo [70] BAMUM LETTER A..BAMUM LETTER KI
428 | 0xEA 0x9B 0x00..0xA5 #
429 | 0xEA 0x9B 0xA6..0xAF #Nl [10] BAMUM LETTER MO..BAMUM LETTER KOGHOM
430 | 0xEA 0x9C 0x97..0x9F #Lm [9] MODIFIER LETTER DOT VERTICAL BAR.....
431 | 0xEA 0x9C 0xA2..0xFF #L& [78] LATIN CAPITAL LETTER EGYPTOLOGICAL...
432 | 0xEA 0x9D 0x00..0xAF #
433 | 0xEA 0x9D 0xB0 #Lm MODIFIER LETTER US
434 | 0xEA 0x9D 0xB1..0xFF #L& [23] LATIN SMALL LETTER DUM..LATIN SMAL...
435 | 0xEA 0x9E 0x00..0x87 #
436 | 0xEA 0x9E 0x88 #Lm MODIFIER LETTER LOW CIRCUMFLEX ACCENT
437 | 0xEA 0x9E 0x8B..0x8E #L& [4] LATIN CAPITAL LETTER SALTILLO..LAT...
438 | 0xEA 0x9E 0x8F #Lo LATIN LETTER SINOLOGICAL DOT
439 | 0xEA 0x9E 0x90..0xAE #L& [31] LATIN CAPITAL LETTER N WITH DESCEN...
440 | 0xEA 0x9E 0xB0..0xB7 #L& [8] LATIN CAPITAL LETTER TURNED K..LAT...
441 | 0xEA 0x9F 0xB7 #Lo LATIN EPIGRAPHIC LETTER SIDEWAYS I
442 | 0xEA 0x9F 0xB8..0xB9 #Lm [2] MODIFIER LETTER CAPITAL H WITH STR...
443 | 0xEA 0x9F 0xBA #L& LATIN LETTER SMALL CAPITAL TURNED M
444 | 0xEA 0x9F 0xBB..0xFF #Lo [7] LATIN EPIGRAPHIC LETTER REVERSED F...
445 | 0xEA 0xA0 0x00..0x81 #
446 | 0xEA 0xA0 0x83..0x85 #Lo [3] SYLOTI NAGRI LETTER U..SYLOTI NAGR...
447 | 0xEA 0xA0 0x87..0x8A #Lo [4] SYLOTI NAGRI LETTER KO..SYLOTI NAG...
448 | 0xEA 0xA0 0x8C..0xA2 #Lo [23] SYLOTI NAGRI LETTER CO..SYLOTI NAG...
449 | 0xEA 0xA1 0x80..0xB3 #Lo [52] PHAGS-PA LETTER KA..PHAGS-PA LETTE...
450 | 0xEA 0xA2 0x82..0xB3 #Lo [50] SAURASHTRA LETTER A..SAURASHTRA LE...
451 | 0xEA 0xA3 0xB2..0xB7 #Lo [6] DEVANAGARI SIGN SPACING CANDRABIND...
452 | 0xEA 0xA3 0xBB #Lo DEVANAGARI HEADSTROKE
453 | 0xEA 0xA3 0xBD #Lo DEVANAGARI JAIN OM
454 | 0xEA 0xA4 0x8A..0xA5 #Lo [28] KAYAH LI LETTER KA..KAYAH LI LETTE...
455 | 0xEA 0xA4 0xB0..0xFF #Lo [23] REJANG LETTER KA..REJANG LETTER A
456 | 0xEA 0xA5 0x00..0x86 #
457 | 0xEA 0xA5 0xA0..0xBC #Lo [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANG...
458 | 0xEA 0xA6 0x84..0xB2 #Lo [47] JAVANESE LETTER A..JAVANESE LETTER HA
459 | 0xEA 0xA7 0x8F #Lm JAVANESE PANGRANGKEP
460 | 0xEA 0xA7 0xA0..0xA4 #Lo [5] MYANMAR LETTER SHAN GHA..MYANMAR L...
461 | 0xEA 0xA7 0xA6 #Lm MYANMAR MODIFIER LETTER SHAN REDUP...
462 | 0xEA 0xA7 0xA7..0xAF #Lo [9] MYANMAR LETTER TAI LAING NYA..MYAN...
463 | 0xEA 0xA7 0xBA..0xBE #Lo [5] MYANMAR LETTER TAI LAING LLA..MYAN...
464 | 0xEA 0xA8 0x80..0xA8 #Lo [41] CHAM LETTER A..CHAM LETTER HA
465 | 0xEA 0xA9 0x80..0x82 #Lo [3] CHAM LETTER FINAL K..CHAM LETTER F...
466 | 0xEA 0xA9 0x84..0x8B #Lo [8] CHAM LETTER FINAL CH..CHAM LETTER ...
467 | 0xEA 0xA9 0xA0..0xAF #Lo [16] MYANMAR LETTER KHAMTI GA..MYANMAR ...
468 | 0xEA 0xA9 0xB0 #Lm MYANMAR MODIFIER LETTER KHAMTI RED...
469 | 0xEA 0xA9 0xB1..0xB6 #Lo [6] MYANMAR LETTER KHAMTI XA..MYANMAR ...
470 | 0xEA 0xA9 0xBA #Lo MYANMAR LETTER AITON RA
471 | 0xEA 0xA9 0xBE..0xFF #Lo [50] MYANMAR LETTER SHWE PALAUNG CHA..T...
472 | 0xEA 0xAA 0x00..0xAF #
473 | 0xEA 0xAA 0xB1 #Lo TAI VIET VOWEL AA
474 | 0xEA 0xAA 0xB5..0xB6 #Lo [2] TAI VIET VOWEL E..TAI VIET VOWEL O
475 | 0xEA 0xAA 0xB9..0xBD #Lo [5] TAI VIET VOWEL UEA..TAI VIET VOWEL AN
476 | 0xEA 0xAB 0x80 #Lo TAI VIET TONE MAI NUENG
477 | 0xEA 0xAB 0x82 #Lo TAI VIET TONE MAI SONG
478 | 0xEA 0xAB 0x9B..0x9C #Lo [2] TAI VIET SYMBOL KON..TAI VIET SYMB...
479 | 0xEA 0xAB 0x9D #Lm TAI VIET SYMBOL SAM
480 | 0xEA 0xAB 0xA0..0xAA #Lo [11] MEETEI MAYEK LETTER E..MEETEI MAYE...
481 | 0xEA 0xAB 0xB2 #Lo MEETEI MAYEK ANJI
482 | 0xEA 0xAB 0xB3..0xB4 #Lm [2] MEETEI MAYEK SYLLABLE REPETITION M...
483 | 0xEA 0xAC 0x81..0x86 #Lo [6] ETHIOPIC SYLLABLE TTHU..ETHIOPIC S...
484 | 0xEA 0xAC 0x89..0x8E #Lo [6] ETHIOPIC SYLLABLE DDHU..ETHIOPIC S...
485 | 0xEA 0xAC 0x91..0x96 #Lo [6] ETHIOPIC SYLLABLE DZU..ETHIOPIC SY...
486 | 0xEA 0xAC 0xA0..0xA6 #Lo [7] ETHIOPIC SYLLABLE CCHHA..ETHIOPIC ...
487 | 0xEA 0xAC 0xA8..0xAE #Lo [7] ETHIOPIC SYLLABLE BBA..ETHIOPIC SY...
488 | 0xEA 0xAC 0xB0..0xFF #L& [43] LATIN SMALL LETTER BARRED ALPHA..L...
489 | 0xEA 0xAD 0x00..0x9A #
490 | 0xEA 0xAD 0x9C..0x9F #Lm [4] MODIFIER LETTER SMALL HENG..MODIFI...
491 | 0xEA 0xAD 0xA0..0xA5 #L& [6] LATIN SMALL LETTER SAKHA YAT..GREE...
492 | 0xEA 0xAD 0xB0..0xFF #L& [80] CHEROKEE SMALL LETTER A..CHEROKEE ...
493 | 0xEA 0xAE 0x00..0xBF #
494 | 0xEA 0xAF 0x80..0xA2 #Lo [35] MEETEI MAYEK LETTER KOK..MEETEI MA...
495 | 0xEA 0xB0 0x80..0xFF #Lo [11172] HANGUL SYLLABLE GA..HA...
496 | 0xEA 0xB1..0xFF 0x00..0xFF #
497 | 0xEB..0xEC 0x00..0xFF 0x00..0xFF #
498 | 0xED 0x00 0x00..0xFF #
499 | 0xED 0x01..0x9D 0x00..0xFF #
500 | 0xED 0x9E 0x00..0xA3 #
501 | 0xED 0x9E 0xB0..0xFF #Lo [23] HANGUL JUNGSEONG O-YEO..HANGUL JUN...
502 | 0xED 0x9F 0x00..0x86 #
503 | 0xED 0x9F 0x8B..0xBB #Lo [49] HANGUL JONGSEONG NIEUN-RIEUL..HANG...
504 | 0xEF 0xA4 0x80..0xFF #Lo [366] CJK COMPATIBILITY IDEOGRAPH-F9...
505 | 0xEF 0xA5..0xA8 0x00..0xFF #
506 | 0xEF 0xA9 0x00..0xAD #
507 | 0xEF 0xA9 0xB0..0xFF #Lo [106] CJK COMPATIBILITY IDEOGRAPH-FA...
508 | 0xEF 0xAA..0xAA 0x00..0xFF #
509 | 0xEF 0xAB 0x00..0x99 #
510 | 0xEF 0xAC 0x80..0x86 #L& [7] LATIN SMALL LIGATURE FF..LATIN SMA...
511 | 0xEF 0xAC 0x93..0x97 #L& [5] ARMENIAN SMALL LIGATURE MEN NOW..A...
512 | 0xEF 0xAC 0x9D #Lo HEBREW LETTER YOD WITH HIRIQ
513 | 0xEF 0xAC 0x9F..0xA8 #Lo [10] HEBREW LIGATURE YIDDISH YOD YOD PA...
514 | 0xEF 0xAC 0xAA..0xB6 #Lo [13] HEBREW LETTER SHIN WITH SHIN DOT.....
515 | 0xEF 0xAC 0xB8..0xBC #Lo [5] HEBREW LETTER TET WITH DAGESH..HEB...
516 | 0xEF 0xAC 0xBE #Lo HEBREW LETTER MEM WITH DAGESH
517 | 0xEF 0xAD 0x80..0x81 #Lo [2] HEBREW LETTER NUN WITH DAGESH..HEB...
518 | 0xEF 0xAD 0x83..0x84 #Lo [2] HEBREW LETTER FINAL PE WITH DAGESH...
519 | 0xEF 0xAD 0x86..0xFF #Lo [108] HEBREW LETTER TSADI WITH DAGESH..A...
520 | 0xEF 0xAE 0x00..0xB1 #
521 | 0xEF 0xAF 0x93..0xFF #Lo [363] ARABIC LETTER NG ISOLATED FORM...
522 | 0xEF 0xB0..0xB3 0x00..0xFF #
523 | 0xEF 0xB4 0x00..0xBD #
524 | 0xEF 0xB5 0x90..0xFF #Lo [64] ARABIC LIGATURE TEH WITH JEEM WITH...
525 | 0xEF 0xB6 0x00..0x8F #
526 | 0xEF 0xB6 0x92..0xFF #Lo [54] ARABIC LIGATURE MEEM WITH JEEM WIT...
527 | 0xEF 0xB7 0x00..0x87 #
528 | 0xEF 0xB7 0xB0..0xBB #Lo [12] ARABIC LIGATURE SALLA USED AS KORA...
529 | 0xEF 0xB9 0xB0..0xB4 #Lo [5] ARABIC FATHATAN ISOLATED FORM..ARA...
530 | 0xEF 0xB9 0xB6..0xFF #Lo [135] ARABIC FATHA ISOLATED FORM..AR...
531 | 0xEF 0xBA..0xBA 0x00..0xFF #
532 | 0xEF 0xBB 0x00..0xBC #
533 | 0xEF 0xBC 0xA1..0xBA #L& [26] FULLWIDTH LATIN CAPITAL LETTER A.....
534 | 0xEF 0xBD 0x81..0x9A #L& [26] FULLWIDTH LATIN SMALL LETTER A..FU...
535 | 0xEF 0xBD 0xA6..0xAF #Lo [10] HALFWIDTH KATAKANA LETTER WO..HALF...
536 | 0xEF 0xBD 0xB0 #Lm HALFWIDTH KATAKANA-HIRAGANA PROLON...
537 | 0xEF 0xBD 0xB1..0xFF #Lo [45] HALFWIDTH KATAKANA LETTER A..HALFW...
538 | 0xEF 0xBE 0x00..0x9D #
539 | 0xEF 0xBE 0x9E..0x9F #Lm [2] HALFWIDTH KATAKANA VOICED SOUND MA...
540 | 0xEF 0xBE 0xA0..0xBE #Lo [31] HALFWIDTH HANGUL FILLER..HALFWIDTH...
541 | 0xEF 0xBF 0x82..0x87 #Lo [6] HALFWIDTH HANGUL LETTER A..HALFWID...
542 | 0xEF 0xBF 0x8A..0x8F #Lo [6] HALFWIDTH HANGUL LETTER YEO..HALFW...
543 | 0xEF 0xBF 0x92..0x97 #Lo [6] HALFWIDTH HANGUL LETTER YO..HALFWI...
544 | 0xEF 0xBF 0x9A..0x9C #Lo [3] HALFWIDTH HANGUL LETTER EU..HALFWI...
545 | 0xF0 0x90 0x80 0x80..0x8B #Lo [12] LINEAR B SYLLABLE B008 A..LINEA...
546 | 0xF0 0x90 0x80 0x8D..0xA6 #Lo [26] LINEAR B SYLLABLE B036 JO..LINE...
547 | 0xF0 0x90 0x80 0xA8..0xBA #Lo [19] LINEAR B SYLLABLE B060 RA..LINE...
548 | 0xF0 0x90 0x80 0xBC..0xBD #Lo [2] LINEAR B SYLLABLE B017 ZA..LINE...
549 | 0xF0 0x90 0x80 0xBF..0xFF #Lo [15] LINEAR B SYLLABLE B020 ZO..LINE...
550 | 0xF0 0x90 0x81 0x00..0x8D #
551 | 0xF0 0x90 0x81 0x90..0x9D #Lo [14] LINEAR B SYMBOL B018..LINEAR B ...
552 | 0xF0 0x90 0x82 0x80..0xFF #Lo [123] LINEAR B IDEOGRAM B100 MAN..LIN...
553 | 0xF0 0x90 0x83 0x00..0xBA #
554 | 0xF0 0x90 0x85 0x80..0xB4 #Nl [53] GREEK ACROPHONIC ATTIC ONE QUAR...
555 | 0xF0 0x90 0x8A 0x80..0x9C #Lo [29] LYCIAN LETTER A..LYCIAN LETTER X
556 | 0xF0 0x90 0x8A 0xA0..0xFF #Lo [49] CARIAN LETTER A..CARIAN LETTER ...
557 | 0xF0 0x90 0x8B 0x00..0x90 #
558 | 0xF0 0x90 0x8C 0x80..0x9F #Lo [32] OLD ITALIC LETTER A..OLD ITALIC...
559 | 0xF0 0x90 0x8C 0xB0..0xFF #Lo [17] GOTHIC LETTER AHSA..GOTHIC LETT...
560 | 0xF0 0x90 0x8D 0x00..0x80 #
561 | 0xF0 0x90 0x8D 0x81 #Nl GOTHIC LETTER NINETY
562 | 0xF0 0x90 0x8D 0x82..0x89 #Lo [8] GOTHIC LETTER RAIDA..GOTHIC LET...
563 | 0xF0 0x90 0x8D 0x8A #Nl GOTHIC LETTER NINE HUNDRED
564 | 0xF0 0x90 0x8D 0x90..0xB5 #Lo [38] OLD PERMIC LETTER AN..OLD PERMI...
565 | 0xF0 0x90 0x8E 0x80..0x9D #Lo [30] UGARITIC LETTER ALPA..UGARITIC ...
566 | 0xF0 0x90 0x8E 0xA0..0xFF #Lo [36] OLD PERSIAN SIGN A..OLD PERSIAN...
567 | 0xF0 0x90 0x8F 0x00..0x83 #
568 | 0xF0 0x90 0x8F 0x88..0x8F #Lo [8] OLD PERSIAN SIGN AURAMAZDAA..OL...
569 | 0xF0 0x90 0x8F 0x91..0x95 #Nl [5] OLD PERSIAN NUMBER ONE..OLD PER...
570 | 0xF0 0x90 0x90 0x80..0xFF #L& [80] DESERET CAPITAL LETTER LONG I.....
571 | 0xF0 0x90 0x91 0x00..0x8F #
572 | 0xF0 0x90 0x91 0x90..0xFF #Lo [78] SHAVIAN LETTER PEEP..OSMANYA LE...
573 | 0xF0 0x90 0x92 0x00..0x9D #
574 | 0xF0 0x90 0x92 0xB0..0xFF #L& [36] OSAGE CAPITAL LETTER A..OSAGE C...
575 | 0xF0 0x90 0x93 0x00..0x93 #
576 | 0xF0 0x90 0x93 0x98..0xBB #L& [36] OSAGE SMALL LETTER A..OSAGE SMA...
577 | 0xF0 0x90 0x94 0x80..0xA7 #Lo [40] ELBASAN LETTER A..ELBASAN LETTE...
578 | 0xF0 0x90 0x94 0xB0..0xFF #Lo [52] CAUCASIAN ALBANIAN LETTER ALT.....
579 | 0xF0 0x90 0x95 0x00..0xA3 #
580 | 0xF0 0x90 0x98 0x80..0xFF #Lo [311] LINEAR A SIGN AB001..LINE...
581 | 0xF0 0x90 0x99..0x9B 0x00..0xFF #
582 | 0xF0 0x90 0x9C 0x00..0xB6 #
583 | 0xF0 0x90 0x9D 0x80..0x95 #Lo [22] LINEAR A SIGN A701 A..LINEAR A ...
584 | 0xF0 0x90 0x9D 0xA0..0xA7 #Lo [8] LINEAR A SIGN A800..LINEAR A SI...
585 | 0xF0 0x90 0xA0 0x80..0x85 #Lo [6] CYPRIOT SYLLABLE A..CYPRIOT SYL...
586 | 0xF0 0x90 0xA0 0x88 #Lo CYPRIOT SYLLABLE JO
587 | 0xF0 0x90 0xA0 0x8A..0xB5 #Lo [44] CYPRIOT SYLLABLE KA..CYPRIOT SY...
588 | 0xF0 0x90 0xA0 0xB7..0xB8 #Lo [2] CYPRIOT SYLLABLE XA..CYPRIOT SY...
589 | 0xF0 0x90 0xA0 0xBC #Lo CYPRIOT SYLLABLE ZA
590 | 0xF0 0x90 0xA0 0xBF..0xFF #Lo [23] CYPRIOT SYLLABLE ZO..IMPERIAL A...
591 | 0xF0 0x90 0xA1 0x00..0x95 #
592 | 0xF0 0x90 0xA1 0xA0..0xB6 #Lo [23] PALMYRENE LETTER ALEPH..PALMYRE...
593 | 0xF0 0x90 0xA2 0x80..0x9E #Lo [31] NABATAEAN LETTER FINAL ALEPH..N...
594 | 0xF0 0x90 0xA3 0xA0..0xB2 #Lo [19] HATRAN LETTER ALEPH..HATRAN LET...
595 | 0xF0 0x90 0xA3 0xB4..0xB5 #Lo [2] HATRAN LETTER SHIN..HATRAN LETT...
596 | 0xF0 0x90 0xA4 0x80..0x95 #Lo [22] PHOENICIAN LETTER ALF..PHOENICI...
597 | 0xF0 0x90 0xA4 0xA0..0xB9 #Lo [26] LYDIAN LETTER A..LYDIAN LETTER C
598 | 0xF0 0x90 0xA6 0x80..0xB7 #Lo [56] MEROITIC HIEROGLYPHIC LETTER A....
599 | 0xF0 0x90 0xA6 0xBE..0xBF #Lo [2] MEROITIC CURSIVE LOGOGRAM RMT.....
600 | 0xF0 0x90 0xA8 0x80 #Lo KHAROSHTHI LETTER A
601 | 0xF0 0x90 0xA8 0x90..0x93 #Lo [4] KHAROSHTHI LETTER KA..KHAROSHTH...
602 | 0xF0 0x90 0xA8 0x95..0x97 #Lo [3] KHAROSHTHI LETTER CA..KHAROSHTH...
603 | 0xF0 0x90 0xA8 0x99..0xB3 #Lo [27] KHAROSHTHI LETTER NYA..KHAROSHT...
604 | 0xF0 0x90 0xA9 0xA0..0xBC #Lo [29] OLD SOUTH ARABIAN LETTER HE..OL...
605 | 0xF0 0x90 0xAA 0x80..0x9C #Lo [29] OLD NORTH ARABIAN LETTER HEH..O...
606 | 0xF0 0x90 0xAB 0x80..0x87 #Lo [8] MANICHAEAN LETTER ALEPH..MANICH...
607 | 0xF0 0x90 0xAB 0x89..0xA4 #Lo [28] MANICHAEAN LETTER ZAYIN..MANICH...
608 | 0xF0 0x90 0xAC 0x80..0xB5 #Lo [54] AVESTAN LETTER A..AVESTAN LETTE...
609 | 0xF0 0x90 0xAD 0x80..0x95 #Lo [22] INSCRIPTIONAL PARTHIAN LETTER A...
610 | 0xF0 0x90 0xAD 0xA0..0xB2 #Lo [19] INSCRIPTIONAL PAHLAVI LETTER AL...
611 | 0xF0 0x90 0xAE 0x80..0x91 #Lo [18] PSALTER PAHLAVI LETTER ALEPH..P...
612 | 0xF0 0x90 0xB0 0x80..0xFF #Lo [73] OLD TURKIC LETTER ORKHON A..OLD...
613 | 0xF0 0x90 0xB1 0x00..0x88 #
614 | 0xF0 0x90 0xB2 0x80..0xB2 #L& [51] OLD HUNGARIAN CAPITAL LETTER A....
615 | 0xF0 0x90 0xB3 0x80..0xB2 #L& [51] OLD HUNGARIAN SMALL LETTER A..O...
616 | 0xF0 0x91 0x80 0x83..0xB7 #Lo [53] BRAHMI SIGN JIHVAMULIYA..BRAHMI...
617 | 0xF0 0x91 0x82 0x83..0xAF #Lo [45] KAITHI LETTER A..KAITHI LETTER HA
618 | 0xF0 0x91 0x83 0x90..0xA8 #Lo [25] SORA SOMPENG LETTER SAH..SORA S...
619 | 0xF0 0x91 0x84 0x83..0xA6 #Lo [36] CHAKMA LETTER AA..CHAKMA LETTER...
620 | 0xF0 0x91 0x85 0x90..0xB2 #Lo [35] MAHAJANI LETTER A..MAHAJANI LET...
621 | 0xF0 0x91 0x85 0xB6 #Lo MAHAJANI LIGATURE SHRI
622 | 0xF0 0x91 0x86 0x83..0xB2 #Lo [48] SHARADA LETTER A..SHARADA LETTE...
623 | 0xF0 0x91 0x87 0x81..0x84 #Lo [4] SHARADA SIGN AVAGRAHA..SHARADA OM
624 | 0xF0 0x91 0x87 0x9A #Lo SHARADA EKAM
625 | 0xF0 0x91 0x87 0x9C #Lo SHARADA HEADSTROKE
626 | 0xF0 0x91 0x88 0x80..0x91 #Lo [18] KHOJKI LETTER A..KHOJKI LETTER JJA
627 | 0xF0 0x91 0x88 0x93..0xAB #Lo [25] KHOJKI LETTER NYA..KHOJKI LETTE...
628 | 0xF0 0x91 0x8A 0x80..0x86 #Lo [7] MULTANI LETTER A..MULTANI LETTE...
629 | 0xF0 0x91 0x8A 0x88 #Lo MULTANI LETTER GHA
630 | 0xF0 0x91 0x8A 0x8A..0x8D #Lo [4] MULTANI LETTER CA..MULTANI LETT...
631 | 0xF0 0x91 0x8A 0x8F..0x9D #Lo [15] MULTANI LETTER NYA..MULTANI LET...
632 | 0xF0 0x91 0x8A 0x9F..0xA8 #Lo [10] MULTANI LETTER BHA..MULTANI LET...
633 | 0xF0 0x91 0x8A 0xB0..0xFF #Lo [47] KHUDAWADI LETTER A..KHUDAWADI L...
634 | 0xF0 0x91 0x8B 0x00..0x9E #
635 | 0xF0 0x91 0x8C 0x85..0x8C #Lo [8] GRANTHA LETTER A..GRANTHA LETTE...
636 | 0xF0 0x91 0x8C 0x8F..0x90 #Lo [2] GRANTHA LETTER EE..GRANTHA LETT...
637 | 0xF0 0x91 0x8C 0x93..0xA8 #Lo [22] GRANTHA LETTER OO..GRANTHA LETT...
638 | 0xF0 0x91 0x8C 0xAA..0xB0 #Lo [7] GRANTHA LETTER PA..GRANTHA LETT...
639 | 0xF0 0x91 0x8C 0xB2..0xB3 #Lo [2] GRANTHA LETTER LA..GRANTHA LETT...
640 | 0xF0 0x91 0x8C 0xB5..0xB9 #Lo [5] GRANTHA LETTER VA..GRANTHA LETT...
641 | 0xF0 0x91 0x8C 0xBD #Lo GRANTHA SIGN AVAGRAHA
642 | 0xF0 0x91 0x8D 0x90 #Lo GRANTHA OM
643 | 0xF0 0x91 0x8D 0x9D..0xA1 #Lo [5] GRANTHA SIGN PLUTA..GRANTHA LET...
644 | 0xF0 0x91 0x90 0x80..0xB4 #Lo [53] NEWA LETTER A..NEWA LETTER HA
645 | 0xF0 0x91 0x91 0x87..0x8A #Lo [4] NEWA SIGN AVAGRAHA..NEWA SIDDHI
646 | 0xF0 0x91 0x92 0x80..0xAF #Lo [48] TIRHUTA ANJI..TIRHUTA LETTER HA
647 | 0xF0 0x91 0x93 0x84..0x85 #Lo [2] TIRHUTA SIGN AVAGRAHA..TIRHUTA ...
648 | 0xF0 0x91 0x93 0x87 #Lo TIRHUTA OM
649 | 0xF0 0x91 0x96 0x80..0xAE #Lo [47] SIDDHAM LETTER A..SIDDHAM LETTE...
650 | 0xF0 0x91 0x97 0x98..0x9B #Lo [4] SIDDHAM LETTER THREE-CIRCLE ALT...
651 | 0xF0 0x91 0x98 0x80..0xAF #Lo [48] MODI LETTER A..MODI LETTER LLA
652 | 0xF0 0x91 0x99 0x84 #Lo MODI SIGN HUVA
653 | 0xF0 0x91 0x9A 0x80..0xAA #Lo [43] TAKRI LETTER A..TAKRI LETTER RRA
654 | 0xF0 0x91 0x9C 0x80..0x99 #Lo [26] AHOM LETTER KA..AHOM LETTER JHA
655 | 0xF0 0x91 0xA2 0xA0..0xFF #L& [64] WARANG CITI CAPITAL LETTER NGAA...
656 | 0xF0 0x91 0xA3 0x00..0x9F #
657 | 0xF0 0x91 0xA3 0xBF #Lo WARANG CITI OM
658 | 0xF0 0x91 0xAB 0x80..0xB8 #Lo [57] PAU CIN HAU LETTER PA..PAU CIN ...
659 | 0xF0 0x91 0xB0 0x80..0x88 #Lo [9] BHAIKSUKI LETTER A..BHAIKSUKI L...
660 | 0xF0 0x91 0xB0 0x8A..0xAE #Lo [37] BHAIKSUKI LETTER E..BHAIKSUKI L...
661 | 0xF0 0x91 0xB1 0x80 #Lo BHAIKSUKI SIGN AVAGRAHA
662 | 0xF0 0x91 0xB1 0xB2..0xFF #Lo [30] MARCHEN LETTER KA..MARCHEN LETT...
663 | 0xF0 0x91 0xB2 0x00..0x8F #
664 | 0xF0 0x92 0x80 0x80..0xFF #Lo [922] CUNEIFORM SIGN A..CUNEIFO...
665 | 0xF0 0x92 0x81..0x8D 0x00..0xFF #
666 | 0xF0 0x92 0x8E 0x00..0x99 #
667 | 0xF0 0x92 0x90 0x80..0xFF #Nl [111] CUNEIFORM NUMERIC SIGN TWO ASH....
668 | 0xF0 0x92 0x91 0x00..0xAE #
669 | 0xF0 0x92 0x92 0x80..0xFF #Lo [196] CUNEIFORM SIGN AB TIMES N...
670 | 0xF0 0x92 0x93..0x94 0x00..0xFF #
671 | 0xF0 0x92 0x95 0x00..0x83 #
672 | 0xF0 0x93 0x80 0x80..0xFF #Lo [1071] EGYPTIAN HIEROGLYPH A001...
673 | 0xF0 0x93 0x81..0x8F 0x00..0xFF #
674 | 0xF0 0x93 0x90 0x00..0xAE #
675 | 0xF0 0x94 0x90 0x80..0xFF #Lo [583] ANATOLIAN HIEROGLYPH A001...
676 | 0xF0 0x94 0x91..0x98 0x00..0xFF #
677 | 0xF0 0x94 0x99 0x00..0x86 #
678 | 0xF0 0x96 0xA0 0x80..0xFF #Lo [569] BAMUM LETTER PHASE-A NGKU...
679 | 0xF0 0x96 0xA1..0xA7 0x00..0xFF #
680 | 0xF0 0x96 0xA8 0x00..0xB8 #
681 | 0xF0 0x96 0xA9 0x80..0x9E #Lo [31] MRO LETTER TA..MRO LETTER TEK
682 | 0xF0 0x96 0xAB 0x90..0xAD #Lo [30] BASSA VAH LETTER ENNI..BASSA VA...
683 | 0xF0 0x96 0xAC 0x80..0xAF #Lo [48] PAHAWH HMONG VOWEL KEEB..PAHAWH...
684 | 0xF0 0x96 0xAD 0x80..0x83 #Lm [4] PAHAWH HMONG SIGN VOS SEEV..PAH...
685 | 0xF0 0x96 0xAD 0xA3..0xB7 #Lo [21] PAHAWH HMONG SIGN VOS LUB..PAHA...
686 | 0xF0 0x96 0xAD 0xBD..0xFF #Lo [19] PAHAWH HMONG CLAN SIGN TSHEEJ.....
687 | 0xF0 0x96 0xAE 0x00..0x8F #
688 | 0xF0 0x96 0xBC 0x80..0xFF #Lo [69] MIAO LETTER PA..MIAO LETTER HHA
689 | 0xF0 0x96 0xBD 0x00..0x84 #
690 | 0xF0 0x96 0xBD 0x90 #Lo MIAO LETTER NASALIZATION
691 | 0xF0 0x96 0xBE 0x93..0x9F #Lm [13] MIAO LETTER TONE-2..MIAO LETTER...
692 | 0xF0 0x96 0xBF 0xA0 #Lm TANGUT ITERATION MARK
693 | 0xF0 0x97 0x80 0x80..0xFF #Lo [6125] TANGUT IDEOGRAPH-17000.....
694 | 0xF0 0x97 0x81..0xFF 0x00..0xFF #
695 | 0xF0 0x98 0x00 0x00..0xFF #
696 | 0xF0 0x98 0x01..0x9E 0x00..0xFF #
697 | 0xF0 0x98 0x9F 0x00..0xAC #
698 | 0xF0 0x98 0xA0 0x80..0xFF #Lo [755] TANGUT COMPONENT-001..TAN...
699 | 0xF0 0x98 0xA1..0xAA 0x00..0xFF #
700 | 0xF0 0x98 0xAB 0x00..0xB2 #
701 | 0xF0 0x9B 0x80 0x80..0x81 #Lo [2] KATAKANA LETTER ARCHAIC E..HIRA...
702 | 0xF0 0x9B 0xB0 0x80..0xFF #Lo [107] DUPLOYAN LETTER H..DUPLOYAN LET...
703 | 0xF0 0x9B 0xB1 0x00..0xAA #
704 | 0xF0 0x9B 0xB1 0xB0..0xBC #Lo [13] DUPLOYAN AFFIX LEFT HORIZONTAL ...
705 | 0xF0 0x9B 0xB2 0x80..0x88 #Lo [9] DUPLOYAN AFFIX HIGH ACUTE..DUPL...
706 | 0xF0 0x9B 0xB2 0x90..0x99 #Lo [10] DUPLOYAN AFFIX LOW ACUTE..DUPLO...
707 | 0xF0 0x9D 0x90 0x80..0xFF #L& [85] MATHEMATICAL BOLD CAPITAL A..MA...
708 | 0xF0 0x9D 0x91 0x00..0x94 #
709 | 0xF0 0x9D 0x91 0x96..0xFF #L& [71] MATHEMATICAL ITALIC SMALL I..MA...
710 | 0xF0 0x9D 0x92 0x00..0x9C #
711 | 0xF0 0x9D 0x92 0x9E..0x9F #L& [2] MATHEMATICAL SCRIPT CAPITAL C.....
712 | 0xF0 0x9D 0x92 0xA2 #L& MATHEMATICAL SCRIPT CAPITAL G
713 | 0xF0 0x9D 0x92 0xA5..0xA6 #L& [2] MATHEMATICAL SCRIPT CAPITAL J.....
714 | 0xF0 0x9D 0x92 0xA9..0xAC #L& [4] MATHEMATICAL SCRIPT CAPITAL N.....
715 | 0xF0 0x9D 0x92 0xAE..0xB9 #L& [12] MATHEMATICAL SCRIPT CAPITAL S.....
716 | 0xF0 0x9D 0x92 0xBB #L& MATHEMATICAL SCRIPT SMALL F
717 | 0xF0 0x9D 0x92 0xBD..0xFF #L& [7] MATHEMATICAL SCRIPT SMALL H..MA...
718 | 0xF0 0x9D 0x93 0x00..0x83 #
719 | 0xF0 0x9D 0x93 0x85..0xFF #L& [65] MATHEMATICAL SCRIPT SMALL P..MA...
720 | 0xF0 0x9D 0x94 0x00..0x85 #
721 | 0xF0 0x9D 0x94 0x87..0x8A #L& [4] MATHEMATICAL FRAKTUR CAPITAL D....
722 | 0xF0 0x9D 0x94 0x8D..0x94 #L& [8] MATHEMATICAL FRAKTUR CAPITAL J....
723 | 0xF0 0x9D 0x94 0x96..0x9C #L& [7] MATHEMATICAL FRAKTUR CAPITAL S....
724 | 0xF0 0x9D 0x94 0x9E..0xB9 #L& [28] MATHEMATICAL FRAKTUR SMALL A..M...
725 | 0xF0 0x9D 0x94 0xBB..0xBE #L& [4] MATHEMATICAL DOUBLE-STRUCK CAPI...
726 | 0xF0 0x9D 0x95 0x80..0x84 #L& [5] MATHEMATICAL DOUBLE-STRUCK CAPI...
727 | 0xF0 0x9D 0x95 0x86 #L& MATHEMATICAL DOUBLE-STRUCK CAPITAL O
728 | 0xF0 0x9D 0x95 0x8A..0x90 #L& [7] MATHEMATICAL DOUBLE-STRUCK CAPI...
729 | 0xF0 0x9D 0x95 0x92..0xFF #L& [340] MATHEMATICAL DOUBLE-STRUC...
730 | 0xF0 0x9D 0x96..0x99 0x00..0xFF #
731 | 0xF0 0x9D 0x9A 0x00..0xA5 #
732 | 0xF0 0x9D 0x9A 0xA8..0xFF #L& [25] MATHEMATICAL BOLD CAPITAL ALPHA...
733 | 0xF0 0x9D 0x9B 0x00..0x80 #
734 | 0xF0 0x9D 0x9B 0x82..0x9A #L& [25] MATHEMATICAL BOLD SMALL ALPHA.....
735 | 0xF0 0x9D 0x9B 0x9C..0xBA #L& [31] MATHEMATICAL BOLD EPSILON SYMBO...
736 | 0xF0 0x9D 0x9B 0xBC..0xFF #L& [25] MATHEMATICAL ITALIC SMALL ALPHA...
737 | 0xF0 0x9D 0x9C 0x00..0x94 #
738 | 0xF0 0x9D 0x9C 0x96..0xB4 #L& [31] MATHEMATICAL ITALIC EPSILON SYM...
739 | 0xF0 0x9D 0x9C 0xB6..0xFF #L& [25] MATHEMATICAL BOLD ITALIC SMALL ...
740 | 0xF0 0x9D 0x9D 0x00..0x8E #
741 | 0xF0 0x9D 0x9D 0x90..0xAE #L& [31] MATHEMATICAL BOLD ITALIC EPSILO...
742 | 0xF0 0x9D 0x9D 0xB0..0xFF #L& [25] MATHEMATICAL SANS-SERIF BOLD SM...
743 | 0xF0 0x9D 0x9E 0x00..0x88 #
744 | 0xF0 0x9D 0x9E 0x8A..0xA8 #L& [31] MATHEMATICAL SANS-SERIF BOLD EP...
745 | 0xF0 0x9D 0x9E 0xAA..0xFF #L& [25] MATHEMATICAL SANS-SERIF BOLD IT...
746 | 0xF0 0x9D 0x9F 0x00..0x82 #
747 | 0xF0 0x9D 0x9F 0x84..0x8B #L& [8] MATHEMATICAL SANS-SERIF BOLD IT...
748 | 0xF0 0x9E 0xA0 0x80..0xFF #Lo [197] MENDE KIKAKUI SYLLABLE M0...
749 | 0xF0 0x9E 0xA1..0xA2 0x00..0xFF #
750 | 0xF0 0x9E 0xA3 0x00..0x84 #
751 | 0xF0 0x9E 0xA4 0x80..0xFF #L& [68] ADLAM CAPITAL LETTER ALIF..ADLA...
752 | 0xF0 0x9E 0xA5 0x00..0x83 #
753 | 0xF0 0x9E 0xB8 0x80..0x83 #Lo [4] ARABIC MATHEMATICAL ALEF..ARABI...
754 | 0xF0 0x9E 0xB8 0x85..0x9F #Lo [27] ARABIC MATHEMATICAL WAW..ARABIC...
755 | 0xF0 0x9E 0xB8 0xA1..0xA2 #Lo [2] ARABIC MATHEMATICAL INITIAL BEH...
756 | 0xF0 0x9E 0xB8 0xA4 #Lo ARABIC MATHEMATICAL INITIAL HEH
757 | 0xF0 0x9E 0xB8 0xA7 #Lo ARABIC MATHEMATICAL INITIAL HAH
758 | 0xF0 0x9E 0xB8 0xA9..0xB2 #Lo [10] ARABIC MATHEMATICAL INITIAL YEH...
759 | 0xF0 0x9E 0xB8 0xB4..0xB7 #Lo [4] ARABIC MATHEMATICAL INITIAL SHE...
760 | 0xF0 0x9E 0xB8 0xB9 #Lo ARABIC MATHEMATICAL INITIAL DAD
761 | 0xF0 0x9E 0xB8 0xBB #Lo ARABIC MATHEMATICAL INITIAL GHAIN
762 | 0xF0 0x9E 0xB9 0x82 #Lo ARABIC MATHEMATICAL TAILED JEEM
763 | 0xF0 0x9E 0xB9 0x87 #Lo ARABIC MATHEMATICAL TAILED HAH
764 | 0xF0 0x9E 0xB9 0x89 #Lo ARABIC MATHEMATICAL TAILED YEH
765 | 0xF0 0x9E 0xB9 0x8B #Lo ARABIC MATHEMATICAL TAILED LAM
766 | 0xF0 0x9E 0xB9 0x8D..0x8F #Lo [3] ARABIC MATHEMATICAL TAILED NOON...
767 | 0xF0 0x9E 0xB9 0x91..0x92 #Lo [2] ARABIC MATHEMATICAL TAILED SAD....
768 | 0xF0 0x9E 0xB9 0x94 #Lo ARABIC MATHEMATICAL TAILED SHEEN
769 | 0xF0 0x9E 0xB9 0x97 #Lo ARABIC MATHEMATICAL TAILED KHAH
770 | 0xF0 0x9E 0xB9 0x99 #Lo ARABIC MATHEMATICAL TAILED DAD
771 | 0xF0 0x9E 0xB9 0x9B #Lo ARABIC MATHEMATICAL TAILED GHAIN
772 | 0xF0 0x9E 0xB9 0x9D #Lo ARABIC MATHEMATICAL TAILED DOTLESS...
773 | 0xF0 0x9E 0xB9 0x9F #Lo ARABIC MATHEMATICAL TAILED DOTLESS...
774 | 0xF0 0x9E 0xB9 0xA1..0xA2 #Lo [2] ARABIC MATHEMATICAL STRETCHED B...
775 | 0xF0 0x9E 0xB9 0xA4 #Lo ARABIC MATHEMATICAL STRETCHED HEH
776 | 0xF0 0x9E 0xB9 0xA7..0xAA #Lo [4] ARABIC MATHEMATICAL STRETCHED H...
777 | 0xF0 0x9E 0xB9 0xAC..0xB2 #Lo [7] ARABIC MATHEMATICAL STRETCHED M...
778 | 0xF0 0x9E 0xB9 0xB4..0xB7 #Lo [4] ARABIC MATHEMATICAL STRETCHED S...
779 | 0xF0 0x9E 0xB9 0xB9..0xBC #Lo [4] ARABIC MATHEMATICAL STRETCHED D...
780 | 0xF0 0x9E 0xB9 0xBE #Lo ARABIC MATHEMATICAL STRETCHED DOTL...
781 | 0xF0 0x9E 0xBA 0x80..0x89 #Lo [10] ARABIC MATHEMATICAL LOOPED ALEF...
782 | 0xF0 0x9E 0xBA 0x8B..0x9B #Lo [17] ARABIC MATHEMATICAL LOOPED LAM....
783 | 0xF0 0x9E 0xBA 0xA1..0xA3 #Lo [3] ARABIC MATHEMATICAL DOUBLE-STRU...
784 | 0xF0 0x9E 0xBA 0xA5..0xA9 #Lo [5] ARABIC MATHEMATICAL DOUBLE-STRU...
785 | 0xF0 0x9E 0xBA 0xAB..0xBB #Lo [17] ARABIC MATHEMATICAL DOUBLE-STRU...
786 | 0xF0 0xA0 0x80 0x80..0xFF #Lo [42711] CJK UNIFIED IDEOG...
787 | 0xF0 0xA0 0x81..0xFF 0x00..0xFF #
788 | 0xF0 0xA1..0xA9 0x00..0xFF 0x00..0xFF #
789 | 0xF0 0xAA 0x00 0x00..0xFF #
790 | 0xF0 0xAA 0x01..0x9A 0x00..0xFF #
791 | 0xF0 0xAA 0x9B 0x00..0x96 #
792 | 0xF0 0xAA 0x9C 0x80..0xFF #Lo [4149] CJK UNIFIED IDEOGRAPH-2A...
793 | 0xF0 0xAA 0x9D..0xFF 0x00..0xFF #
794 | 0xF0 0xAB 0x00 0x00..0xFF #
795 | 0xF0 0xAB 0x01..0x9B 0x00..0xFF #
796 | 0xF0 0xAB 0x9C 0x00..0xB4 #
797 | 0xF0 0xAB 0x9D 0x80..0xFF #Lo [222] CJK UNIFIED IDEOGRAPH-2B7...
798 | 0xF0 0xAB 0x9E..0x9F 0x00..0xFF #
799 | 0xF0 0xAB 0xA0 0x00..0x9D #
800 | 0xF0 0xAB 0xA0 0xA0..0xFF #Lo [5762] CJK UNIFIED IDEOGRAPH-2B...
801 | 0xF0 0xAB 0xA1..0xFF 0x00..0xFF #
802 | 0xF0 0xAC 0x00 0x00..0xFF #
803 | 0xF0 0xAC 0x01..0xB9 0x00..0xFF #
804 | 0xF0 0xAC 0xBA 0x00..0xA1 #
805 | 0xF0 0xAF 0xA0 0x80..0xFF #Lo [542] CJK COMPATIBILITY IDEOGRA...
806 | 0xF0 0xAF 0xA1..0xA7 0x00..0xFF #
807 | 0xF0 0xAF 0xA8 0x00..0x9D #
808 ;
809
810 ID_Continue =
811 0x30..0x39 #Nd [10] DIGIT ZERO..DIGIT NINE
812 | 0x41..0x5A #L& [26] LATIN CAPITAL LETTER A..LATIN CAPI...
813 | 0x5F #Pc LOW LINE
814 | 0x61..0x7A #L& [26] LATIN SMALL LETTER A..LATIN SMALL ...
815 | 0xC2 0xAA #Lo FEMININE ORDINAL INDICATOR
816 | 0xC2 0xB5 #L& MICRO SIGN
817 | 0xC2 0xB7 #Po MIDDLE DOT
818 | 0xC2 0xBA #Lo MASCULINE ORDINAL INDICATOR
819 | 0xC3 0x80..0x96 #L& [23] LATIN CAPITAL LETTER A WITH GRAVE....
820 | 0xC3 0x98..0xB6 #L& [31] LATIN CAPITAL LETTER O WITH STROKE...
821 | 0xC3 0xB8..0xFF #L& [195] LATIN SMALL LETTER O WITH STROKE.....
822 | 0xC4..0xC5 0x00..0xFF #
823 | 0xC6 0x00..0xBA #
824 | 0xC6 0xBB #Lo LATIN LETTER TWO WITH STROKE
825 | 0xC6 0xBC..0xBF #L& [4] LATIN CAPITAL LETTER TONE FIVE..LA...
826 | 0xC7 0x80..0x83 #Lo [4] LATIN LETTER DENTAL CLICK..LATIN L...
827 | 0xC7 0x84..0xFF #L& [208] LATIN CAPITAL LETTER DZ WITH CARON...
828 | 0xC8..0xC9 0x00..0xFF #
829 | 0xCA 0x00..0x93 #
830 | 0xCA 0x94 #Lo LATIN LETTER GLOTTAL STOP
831 | 0xCA 0x95..0xAF #L& [27] LATIN LETTER PHARYNGEAL VOICED FRI...
832 | 0xCA 0xB0..0xFF #Lm [18] MODIFIER LETTER SMALL H..MODIFIER ...
833 | 0xCB 0x00..0x81 #
834 | 0xCB 0x86..0x91 #Lm [12] MODIFIER LETTER CIRCUMFLEX ACCENT....
835 | 0xCB 0xA0..0xA4 #Lm [5] MODIFIER LETTER SMALL GAMMA..MODIF...
836 | 0xCB 0xAC #Lm MODIFIER LETTER VOICING
837 | 0xCB 0xAE #Lm MODIFIER LETTER DOUBLE APOSTROPHE
838 | 0xCC 0x80..0xFF #Mn [112] COMBINING GRAVE ACCENT..COMBINING ...
839 | 0xCD 0x00..0xAF #
840 | 0xCD 0xB0..0xB3 #L& [4] GREEK CAPITAL LETTER HETA..GREEK S...
841 | 0xCD 0xB4 #Lm GREEK NUMERAL SIGN
842 | 0xCD 0xB6..0xB7 #L& [2] GREEK CAPITAL LETTER PAMPHYLIAN DI...
843 | 0xCD 0xBA #Lm GREEK YPOGEGRAMMENI
844 | 0xCD 0xBB..0xBD #L& [3] GREEK SMALL REVERSED LUNATE SIGMA ...
845 | 0xCD 0xBF #L& GREEK CAPITAL LETTER YOT
846 | 0xCE 0x86 #L& GREEK CAPITAL LETTER ALPHA WITH TONOS
847 | 0xCE 0x87 #Po GREEK ANO TELEIA
848 | 0xCE 0x88..0x8A #L& [3] GREEK CAPITAL LETTER EPSILON WITH ...
849 | 0xCE 0x8C #L& GREEK CAPITAL LETTER OMICRON WITH ...
850 | 0xCE 0x8E..0xA1 #L& [20] GREEK CAPITAL LETTER UPSILON WITH ...
851 | 0xCE 0xA3..0xFF #L& [83] GREEK CAPITAL LETTER SIGMA..GREEK ...
852 | 0xCF 0x00..0xB5 #
853 | 0xCF 0xB7..0xFF #L& [139] GREEK CAPITAL LETTER SHO..CYRILLIC...
854 | 0xD0..0xD1 0x00..0xFF #
855 | 0xD2 0x00..0x81 #
856 | 0xD2 0x83..0x87 #Mn [5] COMBINING CYRILLIC TITLO..COMBININ...
857 | 0xD2 0x8A..0xFF #L& [166] CYRILLIC CAPITAL LETTER SHORT I WI...
858 | 0xD3..0xD3 0x00..0xFF #
859 | 0xD4 0x00..0xAF #
860 | 0xD4 0xB1..0xFF #L& [38] ARMENIAN CAPITAL LETTER AYB..ARMEN...
861 | 0xD5 0x00..0x96 #
862 | 0xD5 0x99 #Lm ARMENIAN MODIFIER LETTER LEFT HALF...
863 | 0xD5 0xA1..0xFF #L& [39] ARMENIAN SMALL LETTER AYB..ARMENIA...
864 | 0xD6 0x00..0x87 #
865 | 0xD6 0x91..0xBD #Mn [45] HEBREW ACCENT ETNAHTA..HEBREW POIN...
866 | 0xD6 0xBF #Mn HEBREW POINT RAFE
867 | 0xD7 0x81..0x82 #Mn [2] HEBREW POINT SHIN DOT..HEBREW POIN...
868 | 0xD7 0x84..0x85 #Mn [2] HEBREW MARK UPPER DOT..HEBREW MARK...
869 | 0xD7 0x87 #Mn HEBREW POINT QAMATS QATAN
870 | 0xD7 0x90..0xAA #Lo [27] HEBREW LETTER ALEF..HEBREW LETTER TAV
871 | 0xD7 0xB0..0xB2 #Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV...
872 | 0xD8 0x90..0x9A #Mn [11] ARABIC SIGN SALLALLAHOU ALAYHE WAS...
873 | 0xD8 0xA0..0xBF #Lo [32] ARABIC LETTER KASHMIRI YEH..ARABIC...
874 | 0xD9 0x80 #Lm ARABIC TATWEEL
875 | 0xD9 0x81..0x8A #Lo [10] ARABIC LETTER FEH..ARABIC LETTER YEH
876 | 0xD9 0x8B..0x9F #Mn [21] ARABIC FATHATAN..ARABIC WAVY HAMZA...
877 | 0xD9 0xA0..0xA9 #Nd [10] ARABIC-INDIC DIGIT ZERO..ARABIC-IN...
878 | 0xD9 0xAE..0xAF #Lo [2] ARABIC LETTER DOTLESS BEH..ARABIC ...
879 | 0xD9 0xB0 #Mn ARABIC LETTER SUPERSCRIPT ALEF
880 | 0xD9 0xB1..0xFF #Lo [99] ARABIC LETTER ALEF WASLA..ARABIC L...
881 | 0xDA..0xDA 0x00..0xFF #
882 | 0xDB 0x00..0x93 #
883 | 0xDB 0x95 #Lo ARABIC LETTER AE
884 | 0xDB 0x96..0x9C #Mn [7] ARABIC SMALL HIGH LIGATURE SAD WIT...
885 | 0xDB 0x9F..0xA4 #Mn [6] ARABIC SMALL HIGH ROUNDED ZERO..AR...
886 | 0xDB 0xA5..0xA6 #Lm [2] ARABIC SMALL WAW..ARABIC SMALL YEH
887 | 0xDB 0xA7..0xA8 #Mn [2] ARABIC SMALL HIGH YEH..ARABIC SMAL...
888 | 0xDB 0xAA..0xAD #Mn [4] ARABIC EMPTY CENTRE LOW STOP..ARAB...
889 | 0xDB 0xAE..0xAF #Lo [2] ARABIC LETTER DAL WITH INVERTED V....
890 | 0xDB 0xB0..0xB9 #Nd [10] EXTENDED ARABIC-INDIC DIGIT ZERO.....
891 | 0xDB 0xBA..0xBC #Lo [3] ARABIC LETTER SHEEN WITH DOT BELOW...
892 | 0xDB 0xBF #Lo ARABIC LETTER HEH WITH INVERTED V
893 | 0xDC 0x90 #Lo SYRIAC LETTER ALAPH
894 | 0xDC 0x91 #Mn SYRIAC LETTER SUPERSCRIPT ALAPH
895 | 0xDC 0x92..0xAF #Lo [30] SYRIAC LETTER BETH..SYRIAC LETTER ...
896 | 0xDC 0xB0..0xFF #Mn [27] SYRIAC PTHAHA ABOVE..SYRIAC BARREKH
897 | 0xDD 0x00..0x8A #
898 | 0xDD 0x8D..0xFF #Lo [89] SYRIAC LETTER SOGDIAN ZHAIN..THAAN...
899 | 0xDE 0x00..0xA5 #
900 | 0xDE 0xA6..0xB0 #Mn [11] THAANA ABAFILI..THAANA SUKUN
901 | 0xDE 0xB1 #Lo THAANA LETTER NAA
902 | 0xDF 0x80..0x89 #Nd [10] NKO DIGIT ZERO..NKO DIGIT NINE
903 | 0xDF 0x8A..0xAA #Lo [33] NKO LETTER A..NKO LETTER JONA RA
904 | 0xDF 0xAB..0xB3 #Mn [9] NKO COMBINING SHORT HIGH TONE..NKO...
905 | 0xDF 0xB4..0xB5 #Lm [2] NKO HIGH TONE APOSTROPHE..NKO LOW ...
906 | 0xDF 0xBA #Lm NKO LAJANYALAN
907 | 0xE0 0xA0 0x80..0x95 #Lo [22] SAMARITAN LETTER ALAF..SAMARITAN L...
908 | 0xE0 0xA0 0x96..0x99 #Mn [4] SAMARITAN MARK IN..SAMARITAN MARK ...
909 | 0xE0 0xA0 0x9A #Lm SAMARITAN MODIFIER LETTER EPENTHET...
910 | 0xE0 0xA0 0x9B..0xA3 #Mn [9] SAMARITAN MARK EPENTHETIC YUT..SAM...
911 | 0xE0 0xA0 0xA4 #Lm SAMARITAN MODIFIER LETTER SHORT A
912 | 0xE0 0xA0 0xA5..0xA7 #Mn [3] SAMARITAN VOWEL SIGN SHORT A..SAMA...
913 | 0xE0 0xA0 0xA8 #Lm SAMARITAN MODIFIER LETTER I
914 | 0xE0 0xA0 0xA9..0xAD #Mn [5] SAMARITAN VOWEL SIGN LONG I..SAMAR...
915 | 0xE0 0xA1 0x80..0x98 #Lo [25] MANDAIC LETTER HALQA..MANDAIC LETT...
916 | 0xE0 0xA1 0x99..0x9B #Mn [3] MANDAIC AFFRICATION MARK..MANDAIC ...
917 | 0xE0 0xA2 0xA0..0xB4 #Lo [21] ARABIC LETTER BEH WITH SMALL V BEL...
918 | 0xE0 0xA2 0xB6..0xBD #Lo [8] ARABIC LETTER BEH WITH SMALL MEEM ...
919 | 0xE0 0xA3 0x94..0xA1 #Mn [14] ARABIC SMALL HIGH WORD AR-RUB..ARA...
920 | 0xE0 0xA3 0xA3..0xFF #Mn [32] ARABIC TURNED DAMMA BELOW..DEVANAG...
921 | 0xE0 0xA4 0x00..0x82 #
922 | 0xE0 0xA4 0x83 #Mc DEVANAGARI SIGN VISARGA
923 | 0xE0 0xA4 0x84..0xB9 #Lo [54] DEVANAGARI LETTER SHORT A..DEVANAG...
924 | 0xE0 0xA4 0xBA #Mn DEVANAGARI VOWEL SIGN OE
925 | 0xE0 0xA4 0xBB #Mc DEVANAGARI VOWEL SIGN OOE
926 | 0xE0 0xA4 0xBC #Mn DEVANAGARI SIGN NUKTA
927 | 0xE0 0xA4 0xBD #Lo DEVANAGARI SIGN AVAGRAHA
928 | 0xE0 0xA4 0xBE..0xFF #Mc [3] DEVANAGARI VOWEL SIGN AA..DEVANAGA...
929 | 0xE0 0xA5 0x00..0x80 #
930 | 0xE0 0xA5 0x81..0x88 #Mn [8] DEVANAGARI VOWEL SIGN U..DEVANAGAR...
931 | 0xE0 0xA5 0x89..0x8C #Mc [4] DEVANAGARI VOWEL SIGN CANDRA O..DE...
932 | 0xE0 0xA5 0x8D #Mn DEVANAGARI SIGN VIRAMA
933 | 0xE0 0xA5 0x8E..0x8F #Mc [2] DEVANAGARI VOWEL SIGN PRISHTHAMATR...
934 | 0xE0 0xA5 0x90 #Lo DEVANAGARI OM
935 | 0xE0 0xA5 0x91..0x97 #Mn [7] DEVANAGARI STRESS SIGN UDATTA..DEV...
936 | 0xE0 0xA5 0x98..0xA1 #Lo [10] DEVANAGARI LETTER QA..DEVANAGARI L...
937 | 0xE0 0xA5 0xA2..0xA3 #Mn [2] DEVANAGARI VOWEL SIGN VOCALIC L..D...
938 | 0xE0 0xA5 0xA6..0xAF #Nd [10] DEVANAGARI DIGIT ZERO..DEVANAGARI ...
939 | 0xE0 0xA5 0xB1 #Lm DEVANAGARI SIGN HIGH SPACING DOT
940 | 0xE0 0xA5 0xB2..0xFF #Lo [15] DEVANAGARI LETTER CANDRA A..BENGAL...
941 | 0xE0 0xA6 0x00..0x80 #
942 | 0xE0 0xA6 0x81 #Mn BENGALI SIGN CANDRABINDU
943 | 0xE0 0xA6 0x82..0x83 #Mc [2] BENGALI SIGN ANUSVARA..BENGALI SIG...
944 | 0xE0 0xA6 0x85..0x8C #Lo [8] BENGALI LETTER A..BENGALI LETTER V...
945 | 0xE0 0xA6 0x8F..0x90 #Lo [2] BENGALI LETTER E..BENGALI LETTER AI
946 | 0xE0 0xA6 0x93..0xA8 #Lo [22] BENGALI LETTER O..BENGALI LETTER NA
947 | 0xE0 0xA6 0xAA..0xB0 #Lo [7] BENGALI LETTER PA..BENGALI LETTER RA
948 | 0xE0 0xA6 0xB2 #Lo BENGALI LETTER LA
949 | 0xE0 0xA6 0xB6..0xB9 #Lo [4] BENGALI LETTER SHA..BENGALI LETTER HA
950 | 0xE0 0xA6 0xBC #Mn BENGALI SIGN NUKTA
951 | 0xE0 0xA6 0xBD #Lo BENGALI SIGN AVAGRAHA
952 | 0xE0 0xA6 0xBE..0xFF #Mc [3] BENGALI VOWEL SIGN AA..BENGALI VOW...
953 | 0xE0 0xA7 0x00..0x80 #
954 | 0xE0 0xA7 0x81..0x84 #Mn [4] BENGALI VOWEL SIGN U..BENGALI VOWE...
955 | 0xE0 0xA7 0x87..0x88 #Mc [2] BENGALI VOWEL SIGN E..BENGALI VOWE...
956 | 0xE0 0xA7 0x8B..0x8C #Mc [2] BENGALI VOWEL SIGN O..BENGALI VOWE...
957 | 0xE0 0xA7 0x8D #Mn BENGALI SIGN VIRAMA
958 | 0xE0 0xA7 0x8E #Lo BENGALI LETTER KHANDA TA
959 | 0xE0 0xA7 0x97 #Mc BENGALI AU LENGTH MARK
960 | 0xE0 0xA7 0x9C..0x9D #Lo [2] BENGALI LETTER RRA..BENGALI LETTER...
961 | 0xE0 0xA7 0x9F..0xA1 #Lo [3] BENGALI LETTER YYA..BENGALI LETTER...
962 | 0xE0 0xA7 0xA2..0xA3 #Mn [2] BENGALI VOWEL SIGN VOCALIC L..BENG...
963 | 0xE0 0xA7 0xA6..0xAF #Nd [10] BENGALI DIGIT ZERO..BENGALI DIGIT ...
964 | 0xE0 0xA7 0xB0..0xB1 #Lo [2] BENGALI LETTER RA WITH MIDDLE DIAG...
965 | 0xE0 0xA8 0x81..0x82 #Mn [2] GURMUKHI SIGN ADAK BINDI..GURMUKHI...
966 | 0xE0 0xA8 0x83 #Mc GURMUKHI SIGN VISARGA
967 | 0xE0 0xA8 0x85..0x8A #Lo [6] GURMUKHI LETTER A..GURMUKHI LETTER UU
968 | 0xE0 0xA8 0x8F..0x90 #Lo [2] GURMUKHI LETTER EE..GURMUKHI LETTE...
969 | 0xE0 0xA8 0x93..0xA8 #Lo [22] GURMUKHI LETTER OO..GURMUKHI LETTE...
970 | 0xE0 0xA8 0xAA..0xB0 #Lo [7] GURMUKHI LETTER PA..GURMUKHI LETTE...
971 | 0xE0 0xA8 0xB2..0xB3 #Lo [2] GURMUKHI LETTER LA..GURMUKHI LETTE...
972 | 0xE0 0xA8 0xB5..0xB6 #Lo [2] GURMUKHI LETTER VA..GURMUKHI LETTE...
973 | 0xE0 0xA8 0xB8..0xB9 #Lo [2] GURMUKHI LETTER SA..GURMUKHI LETTE...
974 | 0xE0 0xA8 0xBC #Mn GURMUKHI SIGN NUKTA
975 | 0xE0 0xA8 0xBE..0xFF #Mc [3] GURMUKHI VOWEL SIGN AA..GURMUKHI V...
976 | 0xE0 0xA9 0x00..0x80 #
977 | 0xE0 0xA9 0x81..0x82 #Mn [2] GURMUKHI VOWEL SIGN U..GURMUKHI VO...
978 | 0xE0 0xA9 0x87..0x88 #Mn [2] GURMUKHI VOWEL SIGN EE..GURMUKHI V...
979 | 0xE0 0xA9 0x8B..0x8D #Mn [3] GURMUKHI VOWEL SIGN OO..GURMUKHI S...
980 | 0xE0 0xA9 0x91 #Mn GURMUKHI SIGN UDAAT
981 | 0xE0 0xA9 0x99..0x9C #Lo [4] GURMUKHI LETTER KHHA..GURMUKHI LET...
982 | 0xE0 0xA9 0x9E #Lo GURMUKHI LETTER FA
983 | 0xE0 0xA9 0xA6..0xAF #Nd [10] GURMUKHI DIGIT ZERO..GURMUKHI DIGI...
984 | 0xE0 0xA9 0xB0..0xB1 #Mn [2] GURMUKHI TIPPI..GURMUKHI ADDAK
985 | 0xE0 0xA9 0xB2..0xB4 #Lo [3] GURMUKHI IRI..GURMUKHI EK ONKAR
986 | 0xE0 0xA9 0xB5 #Mn GURMUKHI SIGN YAKASH
987 | 0xE0 0xAA 0x81..0x82 #Mn [2] GUJARATI SIGN CANDRABINDU..GUJARAT...
988 | 0xE0 0xAA 0x83 #Mc GUJARATI SIGN VISARGA
989 | 0xE0 0xAA 0x85..0x8D #Lo [9] GUJARATI LETTER A..GUJARATI VOWEL ...
990 | 0xE0 0xAA 0x8F..0x91 #Lo [3] GUJARATI LETTER E..GUJARATI VOWEL ...
991 | 0xE0 0xAA 0x93..0xA8 #Lo [22] GUJARATI LETTER O..GUJARATI LETTER NA
992 | 0xE0 0xAA 0xAA..0xB0 #Lo [7] GUJARATI LETTER PA..GUJARATI LETTE...
993 | 0xE0 0xAA 0xB2..0xB3 #Lo [2] GUJARATI LETTER LA..GUJARATI LETTE...
994 | 0xE0 0xAA 0xB5..0xB9 #Lo [5] GUJARATI LETTER VA..GUJARATI LETTE...
995 | 0xE0 0xAA 0xBC #Mn GUJARATI SIGN NUKTA
996 | 0xE0 0xAA 0xBD #Lo GUJARATI SIGN AVAGRAHA
997 | 0xE0 0xAA 0xBE..0xFF #Mc [3] GUJARATI VOWEL SIGN AA..GUJARATI V...
998 | 0xE0 0xAB 0x00..0x80 #
999 | 0xE0 0xAB 0x81..0x85 #Mn [5] GUJARATI VOWEL SIGN U..GUJARATI VO...
1000 | 0xE0 0xAB 0x87..0x88 #Mn [2] GUJARATI VOWEL SIGN E..GUJARATI VO...
1001 | 0xE0 0xAB 0x89 #Mc GUJARATI VOWEL SIGN CANDRA O
1002 | 0xE0 0xAB 0x8B..0x8C #Mc [2] GUJARATI VOWEL SIGN O..GUJARATI VO...
1003 | 0xE0 0xAB 0x8D #Mn GUJARATI SIGN VIRAMA
1004 | 0xE0 0xAB 0x90 #Lo GUJARATI OM
1005 | 0xE0 0xAB 0xA0..0xA1 #Lo [2] GUJARATI LETTER VOCALIC RR..GUJARA...
1006 | 0xE0 0xAB 0xA2..0xA3 #Mn [2] GUJARATI VOWEL SIGN VOCALIC L..GUJ...
1007 | 0xE0 0xAB 0xA6..0xAF #Nd [10] GUJARATI DIGIT ZERO..GUJARATI DIGI...
1008 | 0xE0 0xAB 0xB9 #Lo GUJARATI LETTER ZHA
1009 | 0xE0 0xAC 0x81 #Mn ORIYA SIGN CANDRABINDU
1010 | 0xE0 0xAC 0x82..0x83 #Mc [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VI...
1011 | 0xE0 0xAC 0x85..0x8C #Lo [8] ORIYA LETTER A..ORIYA LETTER VOCAL...
1012 | 0xE0 0xAC 0x8F..0x90 #Lo [2] ORIYA LETTER E..ORIYA LETTER AI
1013 | 0xE0 0xAC 0x93..0xA8 #Lo [22] ORIYA LETTER O..ORIYA LETTER NA
1014 | 0xE0 0xAC 0xAA..0xB0 #Lo [7] ORIYA LETTER PA..ORIYA LETTER RA
1015 | 0xE0 0xAC 0xB2..0xB3 #Lo [2] ORIYA LETTER LA..ORIYA LETTER LLA
1016 | 0xE0 0xAC 0xB5..0xB9 #Lo [5] ORIYA LETTER VA..ORIYA LETTER HA
1017 | 0xE0 0xAC 0xBC #Mn ORIYA SIGN NUKTA
1018 | 0xE0 0xAC 0xBD #Lo ORIYA SIGN AVAGRAHA
1019 | 0xE0 0xAC 0xBE #Mc ORIYA VOWEL SIGN AA
1020 | 0xE0 0xAC 0xBF #Mn ORIYA VOWEL SIGN I
1021 | 0xE0 0xAD 0x80 #Mc ORIYA VOWEL SIGN II
1022 | 0xE0 0xAD 0x81..0x84 #Mn [4] ORIYA VOWEL SIGN U..ORIYA VOWEL SI...
1023 | 0xE0 0xAD 0x87..0x88 #Mc [2] ORIYA VOWEL SIGN E..ORIYA VOWEL SI...
1024 | 0xE0 0xAD 0x8B..0x8C #Mc [2] ORIYA VOWEL SIGN O..ORIYA VOWEL SI...
1025 | 0xE0 0xAD 0x8D #Mn ORIYA SIGN VIRAMA
1026 | 0xE0 0xAD 0x96 #Mn ORIYA AI LENGTH MARK
1027 | 0xE0 0xAD 0x97 #Mc ORIYA AU LENGTH MARK
1028 | 0xE0 0xAD 0x9C..0x9D #Lo [2] ORIYA LETTER RRA..ORIYA LETTER RHA
1029 | 0xE0 0xAD 0x9F..0xA1 #Lo [3] ORIYA LETTER YYA..ORIYA LETTER VOC...
1030 | 0xE0 0xAD 0xA2..0xA3 #Mn [2] ORIYA VOWEL SIGN VOCALIC L..ORIYA ...
1031 | 0xE0 0xAD 0xA6..0xAF #Nd [10] ORIYA DIGIT ZERO..ORIYA DIGIT NINE
1032 | 0xE0 0xAD 0xB1 #Lo ORIYA LETTER WA
1033 | 0xE0 0xAE 0x82 #Mn TAMIL SIGN ANUSVARA
1034 | 0xE0 0xAE 0x83 #Lo TAMIL SIGN VISARGA
1035 | 0xE0 0xAE 0x85..0x8A #Lo [6] TAMIL LETTER A..TAMIL LETTER UU
1036 | 0xE0 0xAE 0x8E..0x90 #Lo [3] TAMIL LETTER E..TAMIL LETTER AI
1037 | 0xE0 0xAE 0x92..0x95 #Lo [4] TAMIL LETTER O..TAMIL LETTER KA
1038 | 0xE0 0xAE 0x99..0x9A #Lo [2] TAMIL LETTER NGA..TAMIL LETTER CA
1039 | 0xE0 0xAE 0x9C #Lo TAMIL LETTER JA
1040 | 0xE0 0xAE 0x9E..0x9F #Lo [2] TAMIL LETTER NYA..TAMIL LETTER TTA
1041 | 0xE0 0xAE 0xA3..0xA4 #Lo [2] TAMIL LETTER NNA..TAMIL LETTER TA
1042 | 0xE0 0xAE 0xA8..0xAA #Lo [3] TAMIL LETTER NA..TAMIL LETTER PA
1043 | 0xE0 0xAE 0xAE..0xB9 #Lo [12] TAMIL LETTER MA..TAMIL LETTER HA
1044 | 0xE0 0xAE 0xBE..0xBF #Mc [2] TAMIL VOWEL SIGN AA..TAMIL VOWEL S...
1045 | 0xE0 0xAF 0x80 #Mn TAMIL VOWEL SIGN II
1046 | 0xE0 0xAF 0x81..0x82 #Mc [2] TAMIL VOWEL SIGN U..TAMIL VOWEL SI...
1047 | 0xE0 0xAF 0x86..0x88 #Mc [3] TAMIL VOWEL SIGN E..TAMIL VOWEL SI...
1048 | 0xE0 0xAF 0x8A..0x8C #Mc [3] TAMIL VOWEL SIGN O..TAMIL VOWEL SI...
1049 | 0xE0 0xAF 0x8D #Mn TAMIL SIGN VIRAMA
1050 | 0xE0 0xAF 0x90 #Lo TAMIL OM
1051 | 0xE0 0xAF 0x97 #Mc TAMIL AU LENGTH MARK
1052 | 0xE0 0xAF 0xA6..0xAF #Nd [10] TAMIL DIGIT ZERO..TAMIL DIGIT NINE
1053 | 0xE0 0xB0 0x80 #Mn TELUGU SIGN COMBINING CANDRABINDU ...
1054 | 0xE0 0xB0 0x81..0x83 #Mc [3] TELUGU SIGN CANDRABINDU..TELUGU SI...
1055 | 0xE0 0xB0 0x85..0x8C #Lo [8] TELUGU LETTER A..TELUGU LETTER VOC...
1056 | 0xE0 0xB0 0x8E..0x90 #Lo [3] TELUGU LETTER E..TELUGU LETTER AI
1057 | 0xE0 0xB0 0x92..0xA8 #Lo [23] TELUGU LETTER O..TELUGU LETTER NA
1058 | 0xE0 0xB0 0xAA..0xB9 #Lo [16] TELUGU LETTER PA..TELUGU LETTER HA
1059 | 0xE0 0xB0 0xBD #Lo TELUGU SIGN AVAGRAHA
1060 | 0xE0 0xB0 0xBE..0xFF #Mn [3] TELUGU VOWEL SIGN AA..TELUGU VOWEL...
1061 | 0xE0 0xB1 0x00..0x80 #
1062 | 0xE0 0xB1 0x81..0x84 #Mc [4] TELUGU VOWEL SIGN U..TELUGU VOWEL ...
1063 | 0xE0 0xB1 0x86..0x88 #Mn [3] TELUGU VOWEL SIGN E..TELUGU VOWEL ...
1064 | 0xE0 0xB1 0x8A..0x8D #Mn [4] TELUGU VOWEL SIGN O..TELUGU SIGN V...
1065 | 0xE0 0xB1 0x95..0x96 #Mn [2] TELUGU LENGTH MARK..TELUGU AI LENG...
1066 | 0xE0 0xB1 0x98..0x9A #Lo [3] TELUGU LETTER TSA..TELUGU LETTER RRRA
1067 | 0xE0 0xB1 0xA0..0xA1 #Lo [2] TELUGU LETTER VOCALIC RR..TELUGU L...
1068 | 0xE0 0xB1 0xA2..0xA3 #Mn [2] TELUGU VOWEL SIGN VOCALIC L..TELUG...
1069 | 0xE0 0xB1 0xA6..0xAF #Nd [10] TELUGU DIGIT ZERO..TELUGU DIGIT NINE
1070 | 0xE0 0xB2 0x80 #Lo KANNADA SIGN SPACING CANDRABINDU
1071 | 0xE0 0xB2 0x81 #Mn KANNADA SIGN CANDRABINDU
1072 | 0xE0 0xB2 0x82..0x83 #Mc [2] KANNADA SIGN ANUSVARA..KANNADA SIG...
1073 | 0xE0 0xB2 0x85..0x8C #Lo [8] KANNADA LETTER A..KANNADA LETTER V...
1074 | 0xE0 0xB2 0x8E..0x90 #Lo [3] KANNADA LETTER E..KANNADA LETTER AI
1075 | 0xE0 0xB2 0x92..0xA8 #Lo [23] KANNADA LETTER O..KANNADA LETTER NA
1076 | 0xE0 0xB2 0xAA..0xB3 #Lo [10] KANNADA LETTER PA..KANNADA LETTER LLA
1077 | 0xE0 0xB2 0xB5..0xB9 #Lo [5] KANNADA LETTER VA..KANNADA LETTER HA
1078 | 0xE0 0xB2 0xBC #Mn KANNADA SIGN NUKTA
1079 | 0xE0 0xB2 0xBD #Lo KANNADA SIGN AVAGRAHA
1080 | 0xE0 0xB2 0xBE #Mc KANNADA VOWEL SIGN AA
1081 | 0xE0 0xB2 0xBF #Mn KANNADA VOWEL SIGN I
1082 | 0xE0 0xB3 0x80..0x84 #Mc [5] KANNADA VOWEL SIGN II..KANNADA VOW...
1083 | 0xE0 0xB3 0x86 #Mn KANNADA VOWEL SIGN E
1084 | 0xE0 0xB3 0x87..0x88 #Mc [2] KANNADA VOWEL SIGN EE..KANNADA VOW...
1085 | 0xE0 0xB3 0x8A..0x8B #Mc [2] KANNADA VOWEL SIGN O..KANNADA VOWE...
1086 | 0xE0 0xB3 0x8C..0x8D #Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIG...
1087 | 0xE0 0xB3 0x95..0x96 #Mc [2] KANNADA LENGTH MARK..KANNADA AI LE...
1088 | 0xE0 0xB3 0x9E #Lo KANNADA LETTER FA
1089 | 0xE0 0xB3 0xA0..0xA1 #Lo [2] KANNADA LETTER VOCALIC RR..KANNADA...
1090 | 0xE0 0xB3 0xA2..0xA3 #Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANN...
1091 | 0xE0 0xB3 0xA6..0xAF #Nd [10] KANNADA DIGIT ZERO..KANNADA DIGIT ...
1092 | 0xE0 0xB3 0xB1..0xB2 #Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA ...
1093 | 0xE0 0xB4 0x81 #Mn MALAYALAM SIGN CANDRABINDU
1094 | 0xE0 0xB4 0x82..0x83 #Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM...
1095 | 0xE0 0xB4 0x85..0x8C #Lo [8] MALAYALAM LETTER A..MALAYALAM LETT...
1096 | 0xE0 0xB4 0x8E..0x90 #Lo [3] MALAYALAM LETTER E..MALAYALAM LETT...
1097 | 0xE0 0xB4 0x92..0xBA #Lo [41] MALAYALAM LETTER O..MALAYALAM LETT...
1098 | 0xE0 0xB4 0xBD #Lo MALAYALAM SIGN AVAGRAHA
1099 | 0xE0 0xB4 0xBE..0xFF #Mc [3] MALAYALAM VOWEL SIGN AA..MALAYALAM...
1100 | 0xE0 0xB5 0x00..0x80 #
1101 | 0xE0 0xB5 0x81..0x84 #Mn [4] MALAYALAM VOWEL SIGN U..MALAYALAM ...
1102 | 0xE0 0xB5 0x86..0x88 #Mc [3] MALAYALAM VOWEL SIGN E..MALAYALAM ...
1103 | 0xE0 0xB5 0x8A..0x8C #Mc [3] MALAYALAM VOWEL SIGN O..MALAYALAM ...
1104 | 0xE0 0xB5 0x8D #Mn MALAYALAM SIGN VIRAMA
1105 | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH
1106 | 0xE0 0xB5 0x94..0x96 #Lo [3] MALAYALAM LETTER CHILLU M..MALAYAL...
1107 | 0xE0 0xB5 0x97 #Mc MALAYALAM AU LENGTH MARK
1108 | 0xE0 0xB5 0x9F..0xA1 #Lo [3] MALAYALAM LETTER ARCHAIC II..MALAY...
1109 | 0xE0 0xB5 0xA2..0xA3 #Mn [2] MALAYALAM VOWEL SIGN VOCALIC L..MA...
1110 | 0xE0 0xB5 0xA6..0xAF #Nd [10] MALAYALAM DIGIT ZERO..MALAYALAM DI...
1111 | 0xE0 0xB5 0xBA..0xBF #Lo [6] MALAYALAM LETTER CHILLU NN..MALAYA...
1112 | 0xE0 0xB6 0x82..0x83 #Mc [2] SINHALA SIGN ANUSVARAYA..SINHALA S...
1113 | 0xE0 0xB6 0x85..0x96 #Lo [18] SINHALA LETTER AYANNA..SINHALA LET...
1114 | 0xE0 0xB6 0x9A..0xB1 #Lo [24] SINHALA LETTER ALPAPRAANA KAYANNA....
1115 | 0xE0 0xB6 0xB3..0xBB #Lo [9] SINHALA LETTER SANYAKA DAYANNA..SI...
1116 | 0xE0 0xB6 0xBD #Lo SINHALA LETTER DANTAJA LAYANNA
1117 | 0xE0 0xB7 0x80..0x86 #Lo [7] SINHALA LETTER VAYANNA..SINHALA LE...
1118 | 0xE0 0xB7 0x8A #Mn SINHALA SIGN AL-LAKUNA
1119 | 0xE0 0xB7 0x8F..0x91 #Mc [3] SINHALA VOWEL SIGN AELA-PILLA..SIN...
1120 | 0xE0 0xB7 0x92..0x94 #Mn [3] SINHALA VOWEL SIGN KETTI IS-PILLA....
1121 | 0xE0 0xB7 0x96 #Mn SINHALA VOWEL SIGN DIGA PAA-PILLA
1122 | 0xE0 0xB7 0x98..0x9F #Mc [8] SINHALA VOWEL SIGN GAETTA-PILLA..S...
1123 | 0xE0 0xB7 0xA6..0xAF #Nd [10] SINHALA LITH DIGIT ZERO..SINHALA L...
1124 | 0xE0 0xB7 0xB2..0xB3 #Mc [2] SINHALA VOWEL SIGN DIGA GAETTA-PIL...
1125 | 0xE0 0xB8 0x81..0xB0 #Lo [48] THAI CHARACTER KO KAI..THAI CHARAC...
1126 | 0xE0 0xB8 0xB1 #Mn THAI CHARACTER MAI HAN-AKAT
1127 | 0xE0 0xB8 0xB2..0xB3 #Lo [2] THAI CHARACTER SARA AA..THAI CHARA...
1128 | 0xE0 0xB8 0xB4..0xBA #Mn [7] THAI CHARACTER SARA I..THAI CHARAC...
1129 | 0xE0 0xB9 0x80..0x85 #Lo [6] THAI CHARACTER SARA E..THAI CHARAC...
1130 | 0xE0 0xB9 0x86 #Lm THAI CHARACTER MAIYAMOK
1131 | 0xE0 0xB9 0x87..0x8E #Mn [8] THAI CHARACTER MAITAIKHU..THAI CHA...
1132 | 0xE0 0xB9 0x90..0x99 #Nd [10] THAI DIGIT ZERO..THAI DIGIT NINE
1133 | 0xE0 0xBA 0x81..0x82 #Lo [2] LAO LETTER KO..LAO LETTER KHO SUNG
1134 | 0xE0 0xBA 0x84 #Lo LAO LETTER KHO TAM
1135 | 0xE0 0xBA 0x87..0x88 #Lo [2] LAO LETTER NGO..LAO LETTER CO
1136 | 0xE0 0xBA 0x8A #Lo LAO LETTER SO TAM
1137 | 0xE0 0xBA 0x8D #Lo LAO LETTER NYO
1138 | 0xE0 0xBA 0x94..0x97 #Lo [4] LAO LETTER DO..LAO LETTER THO TAM
1139 | 0xE0 0xBA 0x99..0x9F #Lo [7] LAO LETTER NO..LAO LETTER FO SUNG
1140 | 0xE0 0xBA 0xA1..0xA3 #Lo [3] LAO LETTER MO..LAO LETTER LO LING
1141 | 0xE0 0xBA 0xA5 #Lo LAO LETTER LO LOOT
1142 | 0xE0 0xBA 0xA7 #Lo LAO LETTER WO
1143 | 0xE0 0xBA 0xAA..0xAB #Lo [2] LAO LETTER SO SUNG..LAO LETTER HO ...
1144 | 0xE0 0xBA 0xAD..0xB0 #Lo [4] LAO LETTER O..LAO VOWEL SIGN A
1145 | 0xE0 0xBA 0xB1 #Mn LAO VOWEL SIGN MAI KAN
1146 | 0xE0 0xBA 0xB2..0xB3 #Lo [2] LAO VOWEL SIGN AA..LAO VOWEL SIGN AM
1147 | 0xE0 0xBA 0xB4..0xB9 #Mn [6] LAO VOWEL SIGN I..LAO VOWEL SIGN UU
1148 | 0xE0 0xBA 0xBB..0xBC #Mn [2] LAO VOWEL SIGN MAI KON..LAO SEMIVO...
1149 | 0xE0 0xBA 0xBD #Lo LAO SEMIVOWEL SIGN NYO
1150 | 0xE0 0xBB 0x80..0x84 #Lo [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI
1151 | 0xE0 0xBB 0x86 #Lm LAO KO LA
1152 | 0xE0 0xBB 0x88..0x8D #Mn [6] LAO TONE MAI EK..LAO NIGGAHITA
1153 | 0xE0 0xBB 0x90..0x99 #Nd [10] LAO DIGIT ZERO..LAO DIGIT NINE
1154 | 0xE0 0xBB 0x9C..0x9F #Lo [4] LAO HO NO..LAO LETTER KHMU NYO
1155 | 0xE0 0xBC 0x80 #Lo TIBETAN SYLLABLE OM
1156 | 0xE0 0xBC 0x98..0x99 #Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD P...
1157 | 0xE0 0xBC 0xA0..0xA9 #Nd [10] TIBETAN DIGIT ZERO..TIBETAN DIGIT ...
1158 | 0xE0 0xBC 0xB5 #Mn TIBETAN MARK NGAS BZUNG NYI ZLA
1159 | 0xE0 0xBC 0xB7 #Mn TIBETAN MARK NGAS BZUNG SGOR RTAGS
1160 | 0xE0 0xBC 0xB9 #Mn TIBETAN MARK TSA -PHRU
1161 | 0xE0 0xBC 0xBE..0xBF #Mc [2] TIBETAN SIGN YAR TSHES..TIBETAN SI...
1162 | 0xE0 0xBD 0x80..0x87 #Lo [8] TIBETAN LETTER KA..TIBETAN LETTER JA
1163 | 0xE0 0xBD 0x89..0xAC #Lo [36] TIBETAN LETTER NYA..TIBETAN LETTER...
1164 | 0xE0 0xBD 0xB1..0xBE #Mn [14] TIBETAN VOWEL SIGN AA..TIBETAN SIG...
1165 | 0xE0 0xBD 0xBF #Mc TIBETAN SIGN RNAM BCAD
1166 | 0xE0 0xBE 0x80..0x84 #Mn [5] TIBETAN VOWEL SIGN REVERSED I..TIB...
1167 | 0xE0 0xBE 0x86..0x87 #Mn [2] TIBETAN SIGN LCI RTAGS..TIBETAN SI...
1168 | 0xE0 0xBE 0x88..0x8C #Lo [5] TIBETAN SIGN LCE TSA CAN..TIBETAN ...
1169 | 0xE0 0xBE 0x8D..0x97 #Mn [11] TIBETAN SUBJOINED SIGN LCE TSA CAN...
1170 | 0xE0 0xBE 0x99..0xBC #Mn [36] TIBETAN SUBJOINED LETTER NYA..TIBE...
1171 | 0xE0 0xBF 0x86 #Mn TIBETAN SYMBOL PADMA GDAN
1172 | 0xE1 0x80 0x80..0xAA #Lo [43] MYANMAR LETTER KA..MYANMAR LETTER AU
1173 | 0xE1 0x80 0xAB..0xAC #Mc [2] MYANMAR VOWEL SIGN TALL AA..MYANMA...
1174 | 0xE1 0x80 0xAD..0xB0 #Mn [4] MYANMAR VOWEL SIGN I..MYANMAR VOWE...
1175 | 0xE1 0x80 0xB1 #Mc MYANMAR VOWEL SIGN E
1176 | 0xE1 0x80 0xB2..0xB7 #Mn [6] MYANMAR VOWEL SIGN AI..MYANMAR SIG...
1177 | 0xE1 0x80 0xB8 #Mc MYANMAR SIGN VISARGA
1178 | 0xE1 0x80 0xB9..0xBA #Mn [2] MYANMAR SIGN VIRAMA..MYANMAR SIGN ...
1179 | 0xE1 0x80 0xBB..0xBC #Mc [2] MYANMAR CONSONANT SIGN MEDIAL YA.....
1180 | 0xE1 0x80 0xBD..0xBE #Mn [2] MYANMAR CONSONANT SIGN MEDIAL WA.....
1181 | 0xE1 0x80 0xBF #Lo MYANMAR LETTER GREAT SA
1182 | 0xE1 0x81 0x80..0x89 #Nd [10] MYANMAR DIGIT ZERO..MYANMAR DIGIT ...
1183 | 0xE1 0x81 0x90..0x95 #Lo [6] MYANMAR LETTER SHA..MYANMAR LETTER...
1184 | 0xE1 0x81 0x96..0x97 #Mc [2] MYANMAR VOWEL SIGN VOCALIC R..MYAN...
1185 | 0xE1 0x81 0x98..0x99 #Mn [2] MYANMAR VOWEL SIGN VOCALIC L..MYAN...
1186 | 0xE1 0x81 0x9A..0x9D #Lo [4] MYANMAR LETTER MON NGA..MYANMAR LE...
1187 | 0xE1 0x81 0x9E..0xA0 #Mn [3] MYANMAR CONSONANT SIGN MON MEDIAL ...
1188 | 0xE1 0x81 0xA1 #Lo MYANMAR LETTER SGAW KAREN SHA
1189 | 0xE1 0x81 0xA2..0xA4 #Mc [3] MYANMAR VOWEL SIGN SGAW KAREN EU.....
1190 | 0xE1 0x81 0xA5..0xA6 #Lo [2] MYANMAR LETTER WESTERN PWO KAREN T...
1191 | 0xE1 0x81 0xA7..0xAD #Mc [7] MYANMAR VOWEL SIGN WESTERN PWO KAR...
1192 | 0xE1 0x81 0xAE..0xB0 #Lo [3] MYANMAR LETTER EASTERN PWO KAREN N...
1193 | 0xE1 0x81 0xB1..0xB4 #Mn [4] MYANMAR VOWEL SIGN GEBA KAREN I..M...
1194 | 0xE1 0x81 0xB5..0xFF #Lo [13] MYANMAR LETTER SHAN KA..MYANMAR LE...
1195 | 0xE1 0x82 0x00..0x81 #
1196 | 0xE1 0x82 0x82 #Mn MYANMAR CONSONANT SIGN SHAN MEDIAL WA
1197 | 0xE1 0x82 0x83..0x84 #Mc [2] MYANMAR VOWEL SIGN SHAN AA..MYANMA...
1198 | 0xE1 0x82 0x85..0x86 #Mn [2] MYANMAR VOWEL SIGN SHAN E ABOVE..M...
1199 | 0xE1 0x82 0x87..0x8C #Mc [6] MYANMAR SIGN SHAN TONE-2..MYANMAR ...
1200 | 0xE1 0x82 0x8D #Mn MYANMAR SIGN SHAN COUNCIL EMPHATIC...
1201 | 0xE1 0x82 0x8E #Lo MYANMAR LETTER RUMAI PALAUNG FA
1202 | 0xE1 0x82 0x8F #Mc MYANMAR SIGN RUMAI PALAUNG TONE-5
1203 | 0xE1 0x82 0x90..0x99 #Nd [10] MYANMAR SHAN DIGIT ZERO..MYANMAR S...
1204 | 0xE1 0x82 0x9A..0x9C #Mc [3] MYANMAR SIGN KHAMTI TONE-1..MYANMA...
1205 | 0xE1 0x82 0x9D #Mn MYANMAR VOWEL SIGN AITON AI
1206 | 0xE1 0x82 0xA0..0xFF #L& [38] GEORGIAN CAPITAL LETTER AN..GEORGI...
1207 | 0xE1 0x83 0x00..0x85 #
1208 | 0xE1 0x83 0x87 #L& GEORGIAN CAPITAL LETTER YN
1209 | 0xE1 0x83 0x8D #L& GEORGIAN CAPITAL LETTER AEN
1210 | 0xE1 0x83 0x90..0xBA #Lo [43] GEORGIAN LETTER AN..GEORGIAN LETTE...
1211 | 0xE1 0x83 0xBC #Lm MODIFIER LETTER GEORGIAN NAR
1212 | 0xE1 0x83 0xBD..0xFF #Lo [332] GEORGIAN LETTER AEN..ETHIOPIC ...
1213 | 0xE1 0x84..0x88 0x00..0xFF #
1214 | 0xE1 0x89 0x00..0x88 #
1215 | 0xE1 0x89 0x8A..0x8D #Lo [4] ETHIOPIC SYLLABLE QWI..ETHIOPIC SY...
1216 | 0xE1 0x89 0x90..0x96 #Lo [7] ETHIOPIC SYLLABLE QHA..ETHIOPIC SY...
1217 | 0xE1 0x89 0x98 #Lo ETHIOPIC SYLLABLE QHWA
1218 | 0xE1 0x89 0x9A..0x9D #Lo [4] ETHIOPIC SYLLABLE QHWI..ETHIOPIC S...
1219 | 0xE1 0x89 0xA0..0xFF #Lo [41] ETHIOPIC SYLLABLE BA..ETHIOPIC SYL...
1220 | 0xE1 0x8A 0x00..0x88 #
1221 | 0xE1 0x8A 0x8A..0x8D #Lo [4] ETHIOPIC SYLLABLE XWI..ETHIOPIC SY...
1222 | 0xE1 0x8A 0x90..0xB0 #Lo [33] ETHIOPIC SYLLABLE NA..ETHIOPIC SYL...
1223 | 0xE1 0x8A 0xB2..0xB5 #Lo [4] ETHIOPIC SYLLABLE KWI..ETHIOPIC SY...
1224 | 0xE1 0x8A 0xB8..0xBE #Lo [7] ETHIOPIC SYLLABLE KXA..ETHIOPIC SY...
1225 | 0xE1 0x8B 0x80 #Lo ETHIOPIC SYLLABLE KXWA
1226 | 0xE1 0x8B 0x82..0x85 #Lo [4] ETHIOPIC SYLLABLE KXWI..ETHIOPIC S...
1227 | 0xE1 0x8B 0x88..0x96 #Lo [15] ETHIOPIC SYLLABLE WA..ETHIOPIC SYL...
1228 | 0xE1 0x8B 0x98..0xFF #Lo [57] ETHIOPIC SYLLABLE ZA..ETHIOPIC SYL...
1229 | 0xE1 0x8C 0x00..0x90 #
1230 | 0xE1 0x8C 0x92..0x95 #Lo [4] ETHIOPIC SYLLABLE GWI..ETHIOPIC SY...
1231 | 0xE1 0x8C 0x98..0xFF #Lo [67] ETHIOPIC SYLLABLE GGA..ETHIOPIC SY...
1232 | 0xE1 0x8D 0x00..0x9A #
1233 | 0xE1 0x8D 0x9D..0x9F #Mn [3] ETHIOPIC COMBINING GEMINATION AND ...
1234 | 0xE1 0x8D 0xA9..0xB1 #No [9] ETHIOPIC DIGIT ONE..ETHIOPIC DIGIT...
1235 | 0xE1 0x8E 0x80..0x8F #Lo [16] ETHIOPIC SYLLABLE SEBATBEIT MWA..E...
1236 | 0xE1 0x8E 0xA0..0xFF #L& [86] CHEROKEE LETTER A..CHEROKEE LETTER MV
1237 | 0xE1 0x8F 0x00..0xB5 #
1238 | 0xE1 0x8F 0xB8..0xBD #L& [6] CHEROKEE SMALL LETTER YE..CHEROKEE...
1239 | 0xE1 0x90 0x81..0xFF #Lo [620] CANADIAN SYLLABICS E..CANADIAN...
1240 | 0xE1 0x91..0x98 0x00..0xFF #
1241 | 0xE1 0x99 0x00..0xAC #
1242 | 0xE1 0x99 0xAF..0xBF #Lo [17] CANADIAN SYLLABICS QAI..CANADIAN S...
1243 | 0xE1 0x9A 0x81..0x9A #Lo [26] OGHAM LETTER BEITH..OGHAM LETTER P...
1244 | 0xE1 0x9A 0xA0..0xFF #Lo [75] RUNIC LETTER FEHU FEOH FE F..RUNIC...
1245 | 0xE1 0x9B 0x00..0xAA #
1246 | 0xE1 0x9B 0xAE..0xB0 #Nl [3] RUNIC ARLAUG SYMBOL..RUNIC BELGTHO...
1247 | 0xE1 0x9B 0xB1..0xB8 #Lo [8] RUNIC LETTER K..RUNIC LETTER FRANK...
1248 | 0xE1 0x9C 0x80..0x8C #Lo [13] TAGALOG LETTER A..TAGALOG LETTER YA
1249 | 0xE1 0x9C 0x8E..0x91 #Lo [4] TAGALOG LETTER LA..TAGALOG LETTER HA
1250 | 0xE1 0x9C 0x92..0x94 #Mn [3] TAGALOG VOWEL SIGN I..TAGALOG SIGN...
1251 | 0xE1 0x9C 0xA0..0xB1 #Lo [18] HANUNOO LETTER A..HANUNOO LETTER HA
1252 | 0xE1 0x9C 0xB2..0xB4 #Mn [3] HANUNOO VOWEL SIGN I..HANUNOO SIGN...
1253 | 0xE1 0x9D 0x80..0x91 #Lo [18] BUHID LETTER A..BUHID LETTER HA
1254 | 0xE1 0x9D 0x92..0x93 #Mn [2] BUHID VOWEL SIGN I..BUHID VOWEL SI...
1255 | 0xE1 0x9D 0xA0..0xAC #Lo [13] TAGBANWA LETTER A..TAGBANWA LETTER YA
1256 | 0xE1 0x9D 0xAE..0xB0 #Lo [3] TAGBANWA LETTER LA..TAGBANWA LETTE...
1257 | 0xE1 0x9D 0xB2..0xB3 #Mn [2] TAGBANWA VOWEL SIGN I..TAGBANWA VO...
1258 | 0xE1 0x9E 0x80..0xB3 #Lo [52] KHMER LETTER KA..KHMER INDEPENDENT...
1259 | 0xE1 0x9E 0xB4..0xB5 #Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOW...
1260 | 0xE1 0x9E 0xB6 #Mc KHMER VOWEL SIGN AA
1261 | 0xE1 0x9E 0xB7..0xBD #Mn [7] KHMER VOWEL SIGN I..KHMER VOWEL SI...
1262 | 0xE1 0x9E 0xBE..0xFF #Mc [8] KHMER VOWEL SIGN OE..KHMER VOWEL S...
1263 | 0xE1 0x9F 0x00..0x85 #
1264 | 0xE1 0x9F 0x86 #Mn KHMER SIGN NIKAHIT
1265 | 0xE1 0x9F 0x87..0x88 #Mc [2] KHMER SIGN REAHMUK..KHMER SIGN YUU...
1266 | 0xE1 0x9F 0x89..0x93 #Mn [11] KHMER SIGN MUUSIKATOAN..KHMER SIGN...
1267 | 0xE1 0x9F 0x97 #Lm KHMER SIGN LEK TOO
1268 | 0xE1 0x9F 0x9C #Lo KHMER SIGN AVAKRAHASANYA
1269 | 0xE1 0x9F 0x9D #Mn KHMER SIGN ATTHACAN
1270 | 0xE1 0x9F 0xA0..0xA9 #Nd [10] KHMER DIGIT ZERO..KHMER DIGIT NINE
1271 | 0xE1 0xA0 0x8B..0x8D #Mn [3] MONGOLIAN FREE VARIATION SELECTOR ...
1272 | 0xE1 0xA0 0x90..0x99 #Nd [10] MONGOLIAN DIGIT ZERO..MONGOLIAN DI...
1273 | 0xE1 0xA0 0xA0..0xFF #Lo [35] MONGOLIAN LETTER A..MONGOLIAN LETT...
1274 | 0xE1 0xA1 0x00..0x82 #
1275 | 0xE1 0xA1 0x83 #Lm MONGOLIAN LETTER TODO LONG VOWEL SIGN
1276 | 0xE1 0xA1 0x84..0xB7 #Lo [52] MONGOLIAN LETTER TODO E..MONGOLIAN...
1277 | 0xE1 0xA2 0x80..0x84 #Lo [5] MONGOLIAN LETTER ALI GALI ANUSVARA...
1278 | 0xE1 0xA2 0x85..0x86 #Mn [2] MONGOLIAN LETTER ALI GALI BALUDA.....
1279 | 0xE1 0xA2 0x87..0xA8 #Lo [34] MONGOLIAN LETTER ALI GALI A..MONGO...
1280 | 0xE1 0xA2 0xA9 #Mn MONGOLIAN LETTER ALI GALI DAGALGA
1281 | 0xE1 0xA2 0xAA #Lo MONGOLIAN LETTER MANCHU ALI GALI LHA
1282 | 0xE1 0xA2 0xB0..0xFF #Lo [70] CANADIAN SYLLABICS OY..CANADIAN SY...
1283 | 0xE1 0xA3 0x00..0xB5 #
1284 | 0xE1 0xA4 0x80..0x9E #Lo [31] LIMBU VOWEL-CARRIER LETTER..LIMBU ...
1285 | 0xE1 0xA4 0xA0..0xA2 #Mn [3] LIMBU VOWEL SIGN A..LIMBU VOWEL SI...
1286 | 0xE1 0xA4 0xA3..0xA6 #Mc [4] LIMBU VOWEL SIGN EE..LIMBU VOWEL S...
1287 | 0xE1 0xA4 0xA7..0xA8 #Mn [2] LIMBU VOWEL SIGN E..LIMBU VOWEL SI...
1288 | 0xE1 0xA4 0xA9..0xAB #Mc [3] LIMBU SUBJOINED LETTER YA..LIMBU S...
1289 | 0xE1 0xA4 0xB0..0xB1 #Mc [2] LIMBU SMALL LETTER KA..LIMBU SMALL...
1290 | 0xE1 0xA4 0xB2 #Mn LIMBU SMALL LETTER ANUSVARA
1291 | 0xE1 0xA4 0xB3..0xB8 #Mc [6] LIMBU SMALL LETTER TA..LIMBU SMALL...
1292 | 0xE1 0xA4 0xB9..0xBB #Mn [3] LIMBU SIGN MUKPHRENG..LIMBU SIGN SA-I
1293 | 0xE1 0xA5 0x86..0x8F #Nd [10] LIMBU DIGIT ZERO..LIMBU DIGIT NINE
1294 | 0xE1 0xA5 0x90..0xAD #Lo [30] TAI LE LETTER KA..TAI LE LETTER AI
1295 | 0xE1 0xA5 0xB0..0xB4 #Lo [5] TAI LE LETTER TONE-2..TAI LE LETTE...
1296 | 0xE1 0xA6 0x80..0xAB #Lo [44] NEW TAI LUE LETTER HIGH QA..NEW TA...
1297 | 0xE1 0xA6 0xB0..0xFF #Lo [26] NEW TAI LUE VOWEL SIGN VOWEL SHORT...
1298 | 0xE1 0xA7 0x00..0x89 #
1299 | 0xE1 0xA7 0x90..0x99 #Nd [10] NEW TAI LUE DIGIT ZERO..NEW TAI LU...
1300 | 0xE1 0xA7 0x9A #No NEW TAI LUE THAM DIGIT ONE
1301 | 0xE1 0xA8 0x80..0x96 #Lo [23] BUGINESE LETTER KA..BUGINESE LETTE...
1302 | 0xE1 0xA8 0x97..0x98 #Mn [2] BUGINESE VOWEL SIGN I..BUGINESE VO...
1303 | 0xE1 0xA8 0x99..0x9A #Mc [2] BUGINESE VOWEL SIGN E..BUGINESE VO...
1304 | 0xE1 0xA8 0x9B #Mn BUGINESE VOWEL SIGN AE
1305 | 0xE1 0xA8 0xA0..0xFF #Lo [53] TAI THAM LETTER HIGH KA..TAI THAM ...
1306 | 0xE1 0xA9 0x00..0x94 #
1307 | 0xE1 0xA9 0x95 #Mc TAI THAM CONSONANT SIGN MEDIAL RA
1308 | 0xE1 0xA9 0x96 #Mn TAI THAM CONSONANT SIGN MEDIAL LA
1309 | 0xE1 0xA9 0x97 #Mc TAI THAM CONSONANT SIGN LA TANG LAI
1310 | 0xE1 0xA9 0x98..0x9E #Mn [7] TAI THAM SIGN MAI KANG LAI..TAI TH...
1311 | 0xE1 0xA9 0xA0 #Mn TAI THAM SIGN SAKOT
1312 | 0xE1 0xA9 0xA1 #Mc TAI THAM VOWEL SIGN A
1313 | 0xE1 0xA9 0xA2 #Mn TAI THAM VOWEL SIGN MAI SAT
1314 | 0xE1 0xA9 0xA3..0xA4 #Mc [2] TAI THAM VOWEL SIGN AA..TAI THAM V...
1315 | 0xE1 0xA9 0xA5..0xAC #Mn [8] TAI THAM VOWEL SIGN I..TAI THAM VO...
1316 | 0xE1 0xA9 0xAD..0xB2 #Mc [6] TAI THAM VOWEL SIGN OY..TAI THAM V...
1317 | 0xE1 0xA9 0xB3..0xBC #Mn [10] TAI THAM VOWEL SIGN OA ABOVE..TAI ...
1318 | 0xE1 0xA9 0xBF #Mn TAI THAM COMBINING CRYPTOGRAMMIC DOT
1319 | 0xE1 0xAA 0x80..0x89 #Nd [10] TAI THAM HORA DIGIT ZERO..TAI THAM...
1320 | 0xE1 0xAA 0x90..0x99 #Nd [10] TAI THAM THAM DIGIT ZERO..TAI THAM...
1321 | 0xE1 0xAA 0xA7 #Lm TAI THAM SIGN MAI YAMOK
1322 | 0xE1 0xAA 0xB0..0xBD #Mn [14] COMBINING DOUBLED CIRCUMFLEX ACCEN...
1323 | 0xE1 0xAC 0x80..0x83 #Mn [4] BALINESE SIGN ULU RICEM..BALINESE ...
1324 | 0xE1 0xAC 0x84 #Mc BALINESE SIGN BISAH
1325 | 0xE1 0xAC 0x85..0xB3 #Lo [47] BALINESE LETTER AKARA..BALINESE LE...
1326 | 0xE1 0xAC 0xB4 #Mn BALINESE SIGN REREKAN
1327 | 0xE1 0xAC 0xB5 #Mc BALINESE VOWEL SIGN TEDUNG
1328 | 0xE1 0xAC 0xB6..0xBA #Mn [5] BALINESE VOWEL SIGN ULU..BALINESE ...
1329 | 0xE1 0xAC 0xBB #Mc BALINESE VOWEL SIGN RA REPA TEDUNG
1330 | 0xE1 0xAC 0xBC #Mn BALINESE VOWEL SIGN LA LENGA
1331 | 0xE1 0xAC 0xBD..0xFF #Mc [5] BALINESE VOWEL SIGN LA LENGA TEDUN...
1332 | 0xE1 0xAD 0x00..0x81 #
1333 | 0xE1 0xAD 0x82 #Mn BALINESE VOWEL SIGN PEPET
1334 | 0xE1 0xAD 0x83..0x84 #Mc [2] BALINESE VOWEL SIGN PEPET TEDUNG.....
1335 | 0xE1 0xAD 0x85..0x8B #Lo [7] BALINESE LETTER KAF SASAK..BALINES...
1336 | 0xE1 0xAD 0x90..0x99 #Nd [10] BALINESE DIGIT ZERO..BALINESE DIGI...
1337 | 0xE1 0xAD 0xAB..0xB3 #Mn [9] BALINESE MUSICAL SYMBOL COMBINING ...
1338 | 0xE1 0xAE 0x80..0x81 #Mn [2] SUNDANESE SIGN PANYECEK..SUNDANESE...
1339 | 0xE1 0xAE 0x82 #Mc SUNDANESE SIGN PANGWISAD
1340 | 0xE1 0xAE 0x83..0xA0 #Lo [30] SUNDANESE LETTER A..SUNDANESE LETT...
1341 | 0xE1 0xAE 0xA1 #Mc SUNDANESE CONSONANT SIGN PAMINGKAL
1342 | 0xE1 0xAE 0xA2..0xA5 #Mn [4] SUNDANESE CONSONANT SIGN PANYAKRA....
1343 | 0xE1 0xAE 0xA6..0xA7 #Mc [2] SUNDANESE VOWEL SIGN PANAELAENG..S...
1344 | 0xE1 0xAE 0xA8..0xA9 #Mn [2] SUNDANESE VOWEL SIGN PAMEPET..SUND...
1345 | 0xE1 0xAE 0xAA #Mc SUNDANESE SIGN PAMAAEH
1346 | 0xE1 0xAE 0xAB..0xAD #Mn [3] SUNDANESE SIGN VIRAMA..SUNDANESE C...
1347 | 0xE1 0xAE 0xAE..0xAF #Lo [2] SUNDANESE LETTER KHA..SUNDANESE LE...
1348 | 0xE1 0xAE 0xB0..0xB9 #Nd [10] SUNDANESE DIGIT ZERO..SUNDANESE DI...
1349 | 0xE1 0xAE 0xBA..0xFF #Lo [44] SUNDANESE AVAGRAHA..BATAK LETTER U
1350 | 0xE1 0xAF 0x00..0xA5 #
1351 | 0xE1 0xAF 0xA6 #Mn BATAK SIGN TOMPI
1352 | 0xE1 0xAF 0xA7 #Mc BATAK VOWEL SIGN E
1353 | 0xE1 0xAF 0xA8..0xA9 #Mn [2] BATAK VOWEL SIGN PAKPAK E..BATAK V...
1354 | 0xE1 0xAF 0xAA..0xAC #Mc [3] BATAK VOWEL SIGN I..BATAK VOWEL SI...
1355 | 0xE1 0xAF 0xAD #Mn BATAK VOWEL SIGN KARO O
1356 | 0xE1 0xAF 0xAE #Mc BATAK VOWEL SIGN U
1357 | 0xE1 0xAF 0xAF..0xB1 #Mn [3] BATAK VOWEL SIGN U FOR SIMALUNGUN ...
1358 | 0xE1 0xAF 0xB2..0xB3 #Mc [2] BATAK PANGOLAT..BATAK PANONGONAN
1359 | 0xE1 0xB0 0x80..0xA3 #Lo [36] LEPCHA LETTER KA..LEPCHA LETTER A
1360 | 0xE1 0xB0 0xA4..0xAB #Mc [8] LEPCHA SUBJOINED LETTER YA..LEPCHA...
1361 | 0xE1 0xB0 0xAC..0xB3 #Mn [8] LEPCHA VOWEL SIGN E..LEPCHA CONSON...
1362 | 0xE1 0xB0 0xB4..0xB5 #Mc [2] LEPCHA CONSONANT SIGN NYIN-DO..LEP...
1363 | 0xE1 0xB0 0xB6..0xB7 #Mn [2] LEPCHA SIGN RAN..LEPCHA SIGN NUKTA
1364 | 0xE1 0xB1 0x80..0x89 #Nd [10] LEPCHA DIGIT ZERO..LEPCHA DIGIT NINE
1365 | 0xE1 0xB1 0x8D..0x8F #Lo [3] LEPCHA LETTER TTA..LEPCHA LETTER DDA
1366 | 0xE1 0xB1 0x90..0x99 #Nd [10] OL CHIKI DIGIT ZERO..OL CHIKI DIGI...
1367 | 0xE1 0xB1 0x9A..0xB7 #Lo [30] OL CHIKI LETTER LA..OL CHIKI LETTE...
1368 | 0xE1 0xB1 0xB8..0xBD #Lm [6] OL CHIKI MU TTUDDAG..OL CHIKI AHAD
1369 | 0xE1 0xB2 0x80..0x88 #L& [9] CYRILLIC SMALL LETTER ROUNDED VE.....
1370 | 0xE1 0xB3 0x90..0x92 #Mn [3] VEDIC TONE KARSHANA..VEDIC TONE PR...
1371 | 0xE1 0xB3 0x94..0xA0 #Mn [13] VEDIC SIGN YAJURVEDIC MIDLINE SVAR...
1372 | 0xE1 0xB3 0xA1 #Mc VEDIC TONE ATHARVAVEDIC INDEPENDEN...
1373 | 0xE1 0xB3 0xA2..0xA8 #Mn [7] VEDIC SIGN VISARGA SVARITA..VEDIC ...
1374 | 0xE1 0xB3 0xA9..0xAC #Lo [4] VEDIC SIGN ANUSVARA ANTARGOMUKHA.....
1375 | 0xE1 0xB3 0xAD #Mn VEDIC SIGN TIRYAK
1376 | 0xE1 0xB3 0xAE..0xB1 #Lo [4] VEDIC SIGN HEXIFORM LONG ANUSVARA....
1377 | 0xE1 0xB3 0xB2..0xB3 #Mc [2] VEDIC SIGN ARDHAVISARGA..VEDIC SIG...
1378 | 0xE1 0xB3 0xB4 #Mn VEDIC TONE CANDRA ABOVE
1379 | 0xE1 0xB3 0xB5..0xB6 #Lo [2] VEDIC SIGN JIHVAMULIYA..VEDIC SIGN...
1380 | 0xE1 0xB3 0xB8..0xB9 #Mn [2] VEDIC TONE RING ABOVE..VEDIC TONE ...
1381 | 0xE1 0xB4 0x80..0xAB #L& [44] LATIN LETTER SMALL CAPITAL A..CYRI...
1382 | 0xE1 0xB4 0xAC..0xFF #Lm [63] MODIFIER LETTER CAPITAL A..GREEK S...
1383 | 0xE1 0xB5 0x00..0xAA #
1384 | 0xE1 0xB5 0xAB..0xB7 #L& [13] LATIN SMALL LETTER UE..LATIN SMALL...
1385 | 0xE1 0xB5 0xB8 #Lm MODIFIER LETTER CYRILLIC EN
1386 | 0xE1 0xB5 0xB9..0xFF #L& [34] LATIN SMALL LETTER INSULAR G..LATI...
1387 | 0xE1 0xB6 0x00..0x9A #
1388 | 0xE1 0xB6 0x9B..0xBF #Lm [37] MODIFIER LETTER SMALL TURNED ALPHA...
1389 | 0xE1 0xB7 0x80..0xB5 #Mn [54] COMBINING DOTTED GRAVE ACCENT..COM...
1390 | 0xE1 0xB7 0xBB..0xBF #Mn [5] COMBINING DELETION MARK..COMBINING...
1391 | 0xE1 0xB8 0x80..0xFF #L& [278] LATIN CAPITAL LETTER A WITH RI...
1392 | 0xE1 0xB9..0xBB 0x00..0xFF #
1393 | 0xE1 0xBC 0x00..0x95 #
1394 | 0xE1 0xBC 0x98..0x9D #L& [6] GREEK CAPITAL LETTER EPSILON WITH ...
1395 | 0xE1 0xBC 0xA0..0xFF #L& [38] GREEK SMALL LETTER ETA WITH PSILI....
1396 | 0xE1 0xBD 0x00..0x85 #
1397 | 0xE1 0xBD 0x88..0x8D #L& [6] GREEK CAPITAL LETTER OMICRON WITH ...
1398 | 0xE1 0xBD 0x90..0x97 #L& [8] GREEK SMALL LETTER UPSILON WITH PS...
1399 | 0xE1 0xBD 0x99 #L& GREEK CAPITAL LETTER UPSILON WITH ...
1400 | 0xE1 0xBD 0x9B #L& GREEK CAPITAL LETTER UPSILON WITH ...
1401 | 0xE1 0xBD 0x9D #L& GREEK CAPITAL LETTER UPSILON WITH ...
1402 | 0xE1 0xBD 0x9F..0xBD #L& [31] GREEK CAPITAL LETTER UPSILON WITH ...
1403 | 0xE1 0xBE 0x80..0xB4 #L& [53] GREEK SMALL LETTER ALPHA WITH PSIL...
1404 | 0xE1 0xBE 0xB6..0xBC #L& [7] GREEK SMALL LETTER ALPHA WITH PERI...
1405 | 0xE1 0xBE 0xBE #L& GREEK PROSGEGRAMMENI
1406 | 0xE1 0xBF 0x82..0x84 #L& [3] GREEK SMALL LETTER ETA WITH VARIA ...
1407 | 0xE1 0xBF 0x86..0x8C #L& [7] GREEK SMALL LETTER ETA WITH PERISP...
1408 | 0xE1 0xBF 0x90..0x93 #L& [4] GREEK SMALL LETTER IOTA WITH VRACH...
1409 | 0xE1 0xBF 0x96..0x9B #L& [6] GREEK SMALL LETTER IOTA WITH PERIS...
1410 | 0xE1 0xBF 0xA0..0xAC #L& [13] GREEK SMALL LETTER UPSILON WITH VR...
1411 | 0xE1 0xBF 0xB2..0xB4 #L& [3] GREEK SMALL LETTER OMEGA WITH VARI...
1412 | 0xE1 0xBF 0xB6..0xBC #L& [7] GREEK SMALL LETTER OMEGA WITH PERI...
1413 | 0xE2 0x80 0xBF..0xFF #Pc [2] UNDERTIE..CHARACTER TIE
1414 | 0xE2 0x81 0x00..0x80 #
1415 | 0xE2 0x81 0x94 #Pc INVERTED UNDERTIE
1416 | 0xE2 0x81 0xB1 #Lm SUPERSCRIPT LATIN SMALL LETTER I
1417 | 0xE2 0x81 0xBF #Lm SUPERSCRIPT LATIN SMALL LETTER N
1418 | 0xE2 0x82 0x90..0x9C #Lm [13] LATIN SUBSCRIPT SMALL LETTER A..LA...
1419 | 0xE2 0x83 0x90..0x9C #Mn [13] COMBINING LEFT HARPOON ABOVE..COMB...
1420 | 0xE2 0x83 0xA1 #Mn COMBINING LEFT RIGHT ARROW ABOVE
1421 | 0xE2 0x83 0xA5..0xB0 #Mn [12] COMBINING REVERSE SOLIDUS OVERLAY....
1422 | 0xE2 0x84 0x82 #L& DOUBLE-STRUCK CAPITAL C
1423 | 0xE2 0x84 0x87 #L& EULER CONSTANT
1424 | 0xE2 0x84 0x8A..0x93 #L& [10] SCRIPT SMALL G..SCRIPT SMALL L
1425 | 0xE2 0x84 0x95 #L& DOUBLE-STRUCK CAPITAL N
1426 | 0xE2 0x84 0x98 #Sm SCRIPT CAPITAL P
1427 | 0xE2 0x84 0x99..0x9D #L& [5] DOUBLE-STRUCK CAPITAL P..DOUBLE-ST...
1428 | 0xE2 0x84 0xA4 #L& DOUBLE-STRUCK CAPITAL Z
1429 | 0xE2 0x84 0xA6 #L& OHM SIGN
1430 | 0xE2 0x84 0xA8 #L& BLACK-LETTER CAPITAL Z
1431 | 0xE2 0x84 0xAA..0xAD #L& [4] KELVIN SIGN..BLACK-LETTER CAPITAL C
1432 | 0xE2 0x84 0xAE #So ESTIMATED SYMBOL
1433 | 0xE2 0x84 0xAF..0xB4 #L& [6] SCRIPT SMALL E..SCRIPT SMALL O
1434 | 0xE2 0x84 0xB5..0xB8 #Lo [4] ALEF SYMBOL..DALET SYMBOL
1435 | 0xE2 0x84 0xB9 #L& INFORMATION SOURCE
1436 | 0xE2 0x84 0xBC..0xBF #L& [4] DOUBLE-STRUCK SMALL PI..DOUBLE-STR...
1437 | 0xE2 0x85 0x85..0x89 #L& [5] DOUBLE-STRUCK ITALIC CAPITAL D..DO...
1438 | 0xE2 0x85 0x8E #L& TURNED SMALL F
1439 | 0xE2 0x85 0xA0..0xFF #Nl [35] ROMAN NUMERAL ONE..ROMAN NUMERAL T...
1440 | 0xE2 0x86 0x00..0x82 #
1441 | 0xE2 0x86 0x83..0x84 #L& [2] ROMAN NUMERAL REVERSED ONE HUNDRED...
1442 | 0xE2 0x86 0x85..0x88 #Nl [4] ROMAN NUMERAL SIX LATE FORM..ROMAN...
1443 | 0xE2 0xB0 0x80..0xAE #L& [47] GLAGOLITIC CAPITAL LETTER AZU..GLA...
1444 | 0xE2 0xB0 0xB0..0xFF #L& [47] GLAGOLITIC SMALL LETTER AZU..GLAGO...
1445 | 0xE2 0xB1 0x00..0x9E #
1446 | 0xE2 0xB1 0xA0..0xBB #L& [28] LATIN CAPITAL LETTER L WITH DOUBLE...
1447 | 0xE2 0xB1 0xBC..0xBD #Lm [2] LATIN SUBSCRIPT SMALL LETTER J..MO...
1448 | 0xE2 0xB1 0xBE..0xFF #L& [103] LATIN CAPITAL LETTER S WITH SW...
1449 | 0xE2 0xB2..0xB2 0x00..0xFF #
1450 | 0xE2 0xB3 0x00..0xA4 #
1451 | 0xE2 0xB3 0xAB..0xAE #L& [4] COPTIC CAPITAL LETTER CRYPTOGRAMMI...
1452 | 0xE2 0xB3 0xAF..0xB1 #Mn [3] COPTIC COMBINING NI ABOVE..COPTIC ...
1453 | 0xE2 0xB3 0xB2..0xB3 #L& [2] COPTIC CAPITAL LETTER BOHAIRIC KHE...
1454 | 0xE2 0xB4 0x80..0xA5 #L& [38] GEORGIAN SMALL LETTER AN..GEORGIAN...
1455 | 0xE2 0xB4 0xA7 #L& GEORGIAN SMALL LETTER YN
1456 | 0xE2 0xB4 0xAD #L& GEORGIAN SMALL LETTER AEN
1457 | 0xE2 0xB4 0xB0..0xFF #Lo [56] TIFINAGH LETTER YA..TIFINAGH LETTE...
1458 | 0xE2 0xB5 0x00..0xA7 #
1459 | 0xE2 0xB5 0xAF #Lm TIFINAGH MODIFIER LETTER LABIALIZA...
1460 | 0xE2 0xB5 0xBF #Mn TIFINAGH CONSONANT JOINER
1461 | 0xE2 0xB6 0x80..0x96 #Lo [23] ETHIOPIC SYLLABLE LOA..ETHIOPIC SY...
1462 | 0xE2 0xB6 0xA0..0xA6 #Lo [7] ETHIOPIC SYLLABLE SSA..ETHIOPIC SY...
1463 | 0xE2 0xB6 0xA8..0xAE #Lo [7] ETHIOPIC SYLLABLE CCA..ETHIOPIC SY...
1464 | 0xE2 0xB6 0xB0..0xB6 #Lo [7] ETHIOPIC SYLLABLE ZZA..ETHIOPIC SY...
1465 | 0xE2 0xB6 0xB8..0xBE #Lo [7] ETHIOPIC SYLLABLE CCHA..ETHIOPIC S...
1466 | 0xE2 0xB7 0x80..0x86 #Lo [7] ETHIOPIC SYLLABLE QYA..ETHIOPIC SY...
1467 | 0xE2 0xB7 0x88..0x8E #Lo [7] ETHIOPIC SYLLABLE KYA..ETHIOPIC SY...
1468 | 0xE2 0xB7 0x90..0x96 #Lo [7] ETHIOPIC SYLLABLE XYA..ETHIOPIC SY...
1469 | 0xE2 0xB7 0x98..0x9E #Lo [7] ETHIOPIC SYLLABLE GYA..ETHIOPIC SY...
1470 | 0xE2 0xB7 0xA0..0xBF #Mn [32] COMBINING CYRILLIC LETTER BE..COMB...
1471 | 0xE3 0x80 0x85 #Lm IDEOGRAPHIC ITERATION MARK
1472 | 0xE3 0x80 0x86 #Lo IDEOGRAPHIC CLOSING MARK
1473 | 0xE3 0x80 0x87 #Nl IDEOGRAPHIC NUMBER ZERO
1474 | 0xE3 0x80 0xA1..0xA9 #Nl [9] HANGZHOU NUMERAL ONE..HANGZHOU NUM...
1475 | 0xE3 0x80 0xAA..0xAD #Mn [4] IDEOGRAPHIC LEVEL TONE MARK..IDEOG...
1476 | 0xE3 0x80 0xAE..0xAF #Mc [2] HANGUL SINGLE DOT TONE MARK..HANGU...
1477 | 0xE3 0x80 0xB1..0xB5 #Lm [5] VERTICAL KANA REPEAT MARK..VERTICA...
1478 | 0xE3 0x80 0xB8..0xBA #Nl [3] HANGZHOU NUMERAL TEN..HANGZHOU NUM...
1479 | 0xE3 0x80 0xBB #Lm VERTICAL IDEOGRAPHIC ITERATION MARK
1480 | 0xE3 0x80 0xBC #Lo MASU MARK
1481 | 0xE3 0x81 0x81..0xFF #Lo [86] HIRAGANA LETTER SMALL A..HIRAGANA ...
1482 | 0xE3 0x82 0x00..0x96 #
1483 | 0xE3 0x82 0x99..0x9A #Mn [2] COMBINING KATAKANA-HIRAGANA VOICED...
1484 | 0xE3 0x82 0x9B..0x9C #Sk [2] KATAKANA-HIRAGANA VOICED SOUND MAR...
1485 | 0xE3 0x82 0x9D..0x9E #Lm [2] HIRAGANA ITERATION MARK..HIRAGANA ...
1486 | 0xE3 0x82 0x9F #Lo HIRAGANA DIGRAPH YORI
1487 | 0xE3 0x82 0xA1..0xFF #Lo [90] KATAKANA LETTER SMALL A..KATAKANA ...
1488 | 0xE3 0x83 0x00..0xBA #
1489 | 0xE3 0x83 0xBC..0xBE #Lm [3] KATAKANA-HIRAGANA PROLONGED SOUND ...
1490 | 0xE3 0x83 0xBF #Lo KATAKANA DIGRAPH KOTO
1491 | 0xE3 0x84 0x85..0xAD #Lo [41] BOPOMOFO LETTER B..BOPOMOFO LETTER IH
1492 | 0xE3 0x84 0xB1..0xFF #Lo [94] HANGUL LETTER KIYEOK..HANGUL L...
1493 | 0xE3 0x85..0x85 0x00..0xFF #
1494 | 0xE3 0x86 0x00..0x8E #
1495 | 0xE3 0x86 0xA0..0xBA #Lo [27] BOPOMOFO LETTER BU..BOPOMOFO LETTE...
1496 | 0xE3 0x87 0xB0..0xBF #Lo [16] KATAKANA LETTER SMALL KU..KATAKANA...
1497 | 0xE3 0x90 0x80..0xFF #Lo [6582] CJK UNIFIED IDEOGRAPH-3400..C...
1498 | 0xE3 0x91..0xFF 0x00..0xFF #
1499 | 0xE4 0x00 0x00..0xFF #
1500 | 0xE4 0x01..0xB5 0x00..0xFF #
1501 | 0xE4 0xB6 0x00..0xB5 #
1502 | 0xE4 0xB8 0x80..0xFF #Lo [20950] CJK UNIFIED IDEOGRAPH-...
1503 | 0xE4 0xB9..0xFF 0x00..0xFF #
1504 | 0xE5..0xE8 0x00..0xFF 0x00..0xFF #
1505 | 0xE9 0x00 0x00..0xFF #
1506 | 0xE9 0x01..0xBE 0x00..0xFF #
1507 | 0xE9 0xBF 0x00..0x95 #
1508 | 0xEA 0x80 0x80..0x94 #Lo [21] YI SYLLABLE IT..YI SYLLABLE E
1509 | 0xEA 0x80 0x95 #Lm YI SYLLABLE WU
1510 | 0xEA 0x80 0x96..0xFF #Lo [1143] YI SYLLABLE BIT..YI SYLLABLE YYR
1511 | 0xEA 0x81..0x91 0x00..0xFF #
1512 | 0xEA 0x92 0x00..0x8C #
1513 | 0xEA 0x93 0x90..0xB7 #Lo [40] LISU LETTER BA..LISU LETTER OE
1514 | 0xEA 0x93 0xB8..0xBD #Lm [6] LISU LETTER TONE MYA TI..LISU LETT...
1515 | 0xEA 0x94 0x80..0xFF #Lo [268] VAI SYLLABLE EE..VAI SYLLABLE NG
1516 | 0xEA 0x95..0x97 0x00..0xFF #
1517 | 0xEA 0x98 0x00..0x8B #
1518 | 0xEA 0x98 0x8C #Lm VAI SYLLABLE LENGTHENER
1519 | 0xEA 0x98 0x90..0x9F #Lo [16] VAI SYLLABLE NDOLE FA..VAI SYMBOL ...
1520 | 0xEA 0x98 0xA0..0xA9 #Nd [10] VAI DIGIT ZERO..VAI DIGIT NINE
1521 | 0xEA 0x98 0xAA..0xAB #Lo [2] VAI SYLLABLE NDOLE MA..VAI SYLLABL...
1522 | 0xEA 0x99 0x80..0xAD #L& [46] CYRILLIC CAPITAL LETTER ZEMLYA..CY...
1523 | 0xEA 0x99 0xAE #Lo CYRILLIC LETTER MULTIOCULAR O
1524 | 0xEA 0x99 0xAF #Mn COMBINING CYRILLIC VZMET
1525 | 0xEA 0x99 0xB4..0xBD #Mn [10] COMBINING CYRILLIC LETTER UKRAINIA...
1526 | 0xEA 0x99 0xBF #Lm CYRILLIC PAYEROK
1527 | 0xEA 0x9A 0x80..0x9B #L& [28] CYRILLIC CAPITAL LETTER DWE..CYRIL...
1528 | 0xEA 0x9A 0x9C..0x9D #Lm [2] MODIFIER LETTER CYRILLIC HARD SIGN...
1529 | 0xEA 0x9A 0x9E..0x9F #Mn [2] COMBINING CYRILLIC LETTER EF..COMB...
1530 | 0xEA 0x9A 0xA0..0xFF #Lo [70] BAMUM LETTER A..BAMUM LETTER KI
1531 | 0xEA 0x9B 0x00..0xA5 #
1532 | 0xEA 0x9B 0xA6..0xAF #Nl [10] BAMUM LETTER MO..BAMUM LETTER KOGHOM
1533 | 0xEA 0x9B 0xB0..0xB1 #Mn [2] BAMUM COMBINING MARK KOQNDON..BAMU...
1534 | 0xEA 0x9C 0x97..0x9F #Lm [9] MODIFIER LETTER DOT VERTICAL BAR.....
1535 | 0xEA 0x9C 0xA2..0xFF #L& [78] LATIN CAPITAL LETTER EGYPTOLOGICAL...
1536 | 0xEA 0x9D 0x00..0xAF #
1537 | 0xEA 0x9D 0xB0 #Lm MODIFIER LETTER US
1538 | 0xEA 0x9D 0xB1..0xFF #L& [23] LATIN SMALL LETTER DUM..LATIN SMAL...
1539 | 0xEA 0x9E 0x00..0x87 #
1540 | 0xEA 0x9E 0x88 #Lm MODIFIER LETTER LOW CIRCUMFLEX ACCENT
1541 | 0xEA 0x9E 0x8B..0x8E #L& [4] LATIN CAPITAL LETTER SALTILLO..LAT...
1542 | 0xEA 0x9E 0x8F #Lo LATIN LETTER SINOLOGICAL DOT
1543 | 0xEA 0x9E 0x90..0xAE #L& [31] LATIN CAPITAL LETTER N WITH DESCEN...
1544 | 0xEA 0x9E 0xB0..0xB7 #L& [8] LATIN CAPITAL LETTER TURNED K..LAT...
1545 | 0xEA 0x9F 0xB7 #Lo LATIN EPIGRAPHIC LETTER SIDEWAYS I
1546 | 0xEA 0x9F 0xB8..0xB9 #Lm [2] MODIFIER LETTER CAPITAL H WITH STR...
1547 | 0xEA 0x9F 0xBA #L& LATIN LETTER SMALL CAPITAL TURNED M
1548 | 0xEA 0x9F 0xBB..0xFF #Lo [7] LATIN EPIGRAPHIC LETTER REVERSED F...
1549 | 0xEA 0xA0 0x00..0x81 #
1550 | 0xEA 0xA0 0x82 #Mn SYLOTI NAGRI SIGN DVISVARA
1551 | 0xEA 0xA0 0x83..0x85 #Lo [3] SYLOTI NAGRI LETTER U..SYLOTI NAGR...
1552 | 0xEA 0xA0 0x86 #Mn SYLOTI NAGRI SIGN HASANTA
1553 | 0xEA 0xA0 0x87..0x8A #Lo [4] SYLOTI NAGRI LETTER KO..SYLOTI NAG...
1554 | 0xEA 0xA0 0x8B #Mn SYLOTI NAGRI SIGN ANUSVARA
1555 | 0xEA 0xA0 0x8C..0xA2 #Lo [23] SYLOTI NAGRI LETTER CO..SYLOTI NAG...
1556 | 0xEA 0xA0 0xA3..0xA4 #Mc [2] SYLOTI NAGRI VOWEL SIGN A..SYLOTI ...
1557 | 0xEA 0xA0 0xA5..0xA6 #Mn [2] SYLOTI NAGRI VOWEL SIGN U..SYLOTI ...
1558 | 0xEA 0xA0 0xA7 #Mc SYLOTI NAGRI VOWEL SIGN OO
1559 | 0xEA 0xA1 0x80..0xB3 #Lo [52] PHAGS-PA LETTER KA..PHAGS-PA LETTE...
1560 | 0xEA 0xA2 0x80..0x81 #Mc [2] SAURASHTRA SIGN ANUSVARA..SAURASHT...
1561 | 0xEA 0xA2 0x82..0xB3 #Lo [50] SAURASHTRA LETTER A..SAURASHTRA LE...
1562 | 0xEA 0xA2 0xB4..0xFF #Mc [16] SAURASHTRA CONSONANT SIGN HAARU..S...
1563 | 0xEA 0xA3 0x00..0x83 #
1564 | 0xEA 0xA3 0x84..0x85 #Mn [2] SAURASHTRA SIGN VIRAMA..SAURASHTRA...
1565 | 0xEA 0xA3 0x90..0x99 #Nd [10] SAURASHTRA DIGIT ZERO..SAURASHTRA ...
1566 | 0xEA 0xA3 0xA0..0xB1 #Mn [18] COMBINING DEVANAGARI DIGIT ZERO..C...
1567 | 0xEA 0xA3 0xB2..0xB7 #Lo [6] DEVANAGARI SIGN SPACING CANDRABIND...
1568 | 0xEA 0xA3 0xBB #Lo DEVANAGARI HEADSTROKE
1569 | 0xEA 0xA3 0xBD #Lo DEVANAGARI JAIN OM
1570 | 0xEA 0xA4 0x80..0x89 #Nd [10] KAYAH LI DIGIT ZERO..KAYAH LI DIGI...
1571 | 0xEA 0xA4 0x8A..0xA5 #Lo [28] KAYAH LI LETTER KA..KAYAH LI LETTE...
1572 | 0xEA 0xA4 0xA6..0xAD #Mn [8] KAYAH LI VOWEL UE..KAYAH LI TONE C...
1573 | 0xEA 0xA4 0xB0..0xFF #Lo [23] REJANG LETTER KA..REJANG LETTER A
1574 | 0xEA 0xA5 0x00..0x86 #
1575 | 0xEA 0xA5 0x87..0x91 #Mn [11] REJANG VOWEL SIGN I..REJANG CONSON...
1576 | 0xEA 0xA5 0x92..0x93 #Mc [2] REJANG CONSONANT SIGN H..REJANG VI...
1577 | 0xEA 0xA5 0xA0..0xBC #Lo [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANG...
1578 | 0xEA 0xA6 0x80..0x82 #Mn [3] JAVANESE SIGN PANYANGGA..JAVANESE ...
1579 | 0xEA 0xA6 0x83 #Mc JAVANESE SIGN WIGNYAN
1580 | 0xEA 0xA6 0x84..0xB2 #Lo [47] JAVANESE LETTER A..JAVANESE LETTER HA
1581 | 0xEA 0xA6 0xB3 #Mn JAVANESE SIGN CECAK TELU
1582 | 0xEA 0xA6 0xB4..0xB5 #Mc [2] JAVANESE VOWEL SIGN TARUNG..JAVANE...
1583 | 0xEA 0xA6 0xB6..0xB9 #Mn [4] JAVANESE VOWEL SIGN WULU..JAVANESE...
1584 | 0xEA 0xA6 0xBA..0xBB #Mc [2] JAVANESE VOWEL SIGN TALING..JAVANE...
1585 | 0xEA 0xA6 0xBC #Mn JAVANESE VOWEL SIGN PEPET
1586 | 0xEA 0xA6 0xBD..0xFF #Mc [4] JAVANESE CONSONANT SIGN KERET..JAV...
1587 | 0xEA 0xA7 0x00..0x80 #
1588 | 0xEA 0xA7 0x8F #Lm JAVANESE PANGRANGKEP
1589 | 0xEA 0xA7 0x90..0x99 #Nd [10] JAVANESE DIGIT ZERO..JAVANESE DIGI...
1590 | 0xEA 0xA7 0xA0..0xA4 #Lo [5] MYANMAR LETTER SHAN GHA..MYANMAR L...
1591 | 0xEA 0xA7 0xA5 #Mn MYANMAR SIGN SHAN SAW
1592 | 0xEA 0xA7 0xA6 #Lm MYANMAR MODIFIER LETTER SHAN REDUP...
1593 | 0xEA 0xA7 0xA7..0xAF #Lo [9] MYANMAR LETTER TAI LAING NYA..MYAN...
1594 | 0xEA 0xA7 0xB0..0xB9 #Nd [10] MYANMAR TAI LAING DIGIT ZERO..MYAN...
1595 | 0xEA 0xA7 0xBA..0xBE #Lo [5] MYANMAR LETTER TAI LAING LLA..MYAN...
1596 | 0xEA 0xA8 0x80..0xA8 #Lo [41] CHAM LETTER A..CHAM LETTER HA
1597 | 0xEA 0xA8 0xA9..0xAE #Mn [6] CHAM VOWEL SIGN AA..CHAM VOWEL SIG...
1598 | 0xEA 0xA8 0xAF..0xB0 #Mc [2] CHAM VOWEL SIGN O..CHAM VOWEL SIGN AI
1599 | 0xEA 0xA8 0xB1..0xB2 #Mn [2] CHAM VOWEL SIGN AU..CHAM VOWEL SIG...
1600 | 0xEA 0xA8 0xB3..0xB4 #Mc [2] CHAM CONSONANT SIGN YA..CHAM CONSO...
1601 | 0xEA 0xA8 0xB5..0xB6 #Mn [2] CHAM CONSONANT SIGN LA..CHAM CONSO...
1602 | 0xEA 0xA9 0x80..0x82 #Lo [3] CHAM LETTER FINAL K..CHAM LETTER F...
1603 | 0xEA 0xA9 0x83 #Mn CHAM CONSONANT SIGN FINAL NG
1604 | 0xEA 0xA9 0x84..0x8B #Lo [8] CHAM LETTER FINAL CH..CHAM LETTER ...
1605 | 0xEA 0xA9 0x8C #Mn CHAM CONSONANT SIGN FINAL M
1606 | 0xEA 0xA9 0x8D #Mc CHAM CONSONANT SIGN FINAL H
1607 | 0xEA 0xA9 0x90..0x99 #Nd [10] CHAM DIGIT ZERO..CHAM DIGIT NINE
1608 | 0xEA 0xA9 0xA0..0xAF #Lo [16] MYANMAR LETTER KHAMTI GA..MYANMAR ...
1609 | 0xEA 0xA9 0xB0 #Lm MYANMAR MODIFIER LETTER KHAMTI RED...
1610 | 0xEA 0xA9 0xB1..0xB6 #Lo [6] MYANMAR LETTER KHAMTI XA..MYANMAR ...
1611 | 0xEA 0xA9 0xBA #Lo MYANMAR LETTER AITON RA
1612 | 0xEA 0xA9 0xBB #Mc MYANMAR SIGN PAO KAREN TONE
1613 | 0xEA 0xA9 0xBC #Mn MYANMAR SIGN TAI LAING TONE-2
1614 | 0xEA 0xA9 0xBD #Mc MYANMAR SIGN TAI LAING TONE-5
1615 | 0xEA 0xA9 0xBE..0xFF #Lo [50] MYANMAR LETTER SHWE PALAUNG CHA..T...
1616 | 0xEA 0xAA 0x00..0xAF #
1617 | 0xEA 0xAA 0xB0 #Mn TAI VIET MAI KANG
1618 | 0xEA 0xAA 0xB1 #Lo TAI VIET VOWEL AA
1619 | 0xEA 0xAA 0xB2..0xB4 #Mn [3] TAI VIET VOWEL I..TAI VIET VOWEL U
1620 | 0xEA 0xAA 0xB5..0xB6 #Lo [2] TAI VIET VOWEL E..TAI VIET VOWEL O
1621 | 0xEA 0xAA 0xB7..0xB8 #Mn [2] TAI VIET MAI KHIT..TAI VIET VOWEL IA
1622 | 0xEA 0xAA 0xB9..0xBD #Lo [5] TAI VIET VOWEL UEA..TAI VIET VOWEL AN
1623 | 0xEA 0xAA 0xBE..0xBF #Mn [2] TAI VIET VOWEL AM..TAI VIET TONE M...
1624 | 0xEA 0xAB 0x80 #Lo TAI VIET TONE MAI NUENG
1625 | 0xEA 0xAB 0x81 #Mn TAI VIET TONE MAI THO
1626 | 0xEA 0xAB 0x82 #Lo TAI VIET TONE MAI SONG
1627 | 0xEA 0xAB 0x9B..0x9C #Lo [2] TAI VIET SYMBOL KON..TAI VIET SYMB...
1628 | 0xEA 0xAB 0x9D #Lm TAI VIET SYMBOL SAM
1629 | 0xEA 0xAB 0xA0..0xAA #Lo [11] MEETEI MAYEK LETTER E..MEETEI MAYE...
1630 | 0xEA 0xAB 0xAB #Mc MEETEI MAYEK VOWEL SIGN II
1631 | 0xEA 0xAB 0xAC..0xAD #Mn [2] MEETEI MAYEK VOWEL SIGN UU..MEETEI...
1632 | 0xEA 0xAB 0xAE..0xAF #Mc [2] MEETEI MAYEK VOWEL SIGN AU..MEETEI...
1633 | 0xEA 0xAB 0xB2 #Lo MEETEI MAYEK ANJI
1634 | 0xEA 0xAB 0xB3..0xB4 #Lm [2] MEETEI MAYEK SYLLABLE REPETITION M...
1635 | 0xEA 0xAB 0xB5 #Mc MEETEI MAYEK VOWEL SIGN VISARGA
1636 | 0xEA 0xAB 0xB6 #Mn MEETEI MAYEK VIRAMA
1637 | 0xEA 0xAC 0x81..0x86 #Lo [6] ETHIOPIC SYLLABLE TTHU..ETHIOPIC S...
1638 | 0xEA 0xAC 0x89..0x8E #Lo [6] ETHIOPIC SYLLABLE DDHU..ETHIOPIC S...
1639 | 0xEA 0xAC 0x91..0x96 #Lo [6] ETHIOPIC SYLLABLE DZU..ETHIOPIC SY...
1640 | 0xEA 0xAC 0xA0..0xA6 #Lo [7] ETHIOPIC SYLLABLE CCHHA..ETHIOPIC ...
1641 | 0xEA 0xAC 0xA8..0xAE #Lo [7] ETHIOPIC SYLLABLE BBA..ETHIOPIC SY...
1642 | 0xEA 0xAC 0xB0..0xFF #L& [43] LATIN SMALL LETTER BARRED ALPHA..L...
1643 | 0xEA 0xAD 0x00..0x9A #
1644 | 0xEA 0xAD 0x9C..0x9F #Lm [4] MODIFIER LETTER SMALL HENG..MODIFI...
1645 | 0xEA 0xAD 0xA0..0xA5 #L& [6] LATIN SMALL LETTER SAKHA YAT..GREE...
1646 | 0xEA 0xAD 0xB0..0xFF #L& [80] CHEROKEE SMALL LETTER A..CHEROKEE ...
1647 | 0xEA 0xAE 0x00..0xBF #
1648 | 0xEA 0xAF 0x80..0xA2 #Lo [35] MEETEI MAYEK LETTER KOK..MEETEI MA...
1649 | 0xEA 0xAF 0xA3..0xA4 #Mc [2] MEETEI MAYEK VOWEL SIGN ONAP..MEET...
1650 | 0xEA 0xAF 0xA5 #Mn MEETEI MAYEK VOWEL SIGN ANAP
1651 | 0xEA 0xAF 0xA6..0xA7 #Mc [2] MEETEI MAYEK VOWEL SIGN YENAP..MEE...
1652 | 0xEA 0xAF 0xA8 #Mn MEETEI MAYEK VOWEL SIGN UNAP
1653 | 0xEA 0xAF 0xA9..0xAA #Mc [2] MEETEI MAYEK VOWEL SIGN CHEINAP..M...
1654 | 0xEA 0xAF 0xAC #Mc MEETEI MAYEK LUM IYEK
1655 | 0xEA 0xAF 0xAD #Mn MEETEI MAYEK APUN IYEK
1656 | 0xEA 0xAF 0xB0..0xB9 #Nd [10] MEETEI MAYEK DIGIT ZERO..MEETEI MA...
1657 | 0xEA 0xB0 0x80..0xFF #Lo [11172] HANGUL SYLLABLE GA..HA...
1658 | 0xEA 0xB1..0xFF 0x00..0xFF #
1659 | 0xEB..0xEC 0x00..0xFF 0x00..0xFF #
1660 | 0xED 0x00 0x00..0xFF #
1661 | 0xED 0x01..0x9D 0x00..0xFF #
1662 | 0xED 0x9E 0x00..0xA3 #
1663 | 0xED 0x9E 0xB0..0xFF #Lo [23] HANGUL JUNGSEONG O-YEO..HANGUL JUN...
1664 | 0xED 0x9F 0x00..0x86 #
1665 | 0xED 0x9F 0x8B..0xBB #Lo [49] HANGUL JONGSEONG NIEUN-RIEUL..HANG...
1666 | 0xEF 0xA4 0x80..0xFF #Lo [366] CJK COMPATIBILITY IDEOGRAPH-F9...
1667 | 0xEF 0xA5..0xA8 0x00..0xFF #
1668 | 0xEF 0xA9 0x00..0xAD #
1669 | 0xEF 0xA9 0xB0..0xFF #Lo [106] CJK COMPATIBILITY IDEOGRAPH-FA...
1670 | 0xEF 0xAA..0xAA 0x00..0xFF #
1671 | 0xEF 0xAB 0x00..0x99 #
1672 | 0xEF 0xAC 0x80..0x86 #L& [7] LATIN SMALL LIGATURE FF..LATIN SMA...
1673 | 0xEF 0xAC 0x93..0x97 #L& [5] ARMENIAN SMALL LIGATURE MEN NOW..A...
1674 | 0xEF 0xAC 0x9D #Lo HEBREW LETTER YOD WITH HIRIQ
1675 | 0xEF 0xAC 0x9E #Mn HEBREW POINT JUDEO-SPANISH VARIKA
1676 | 0xEF 0xAC 0x9F..0xA8 #Lo [10] HEBREW LIGATURE YIDDISH YOD YOD PA...
1677 | 0xEF 0xAC 0xAA..0xB6 #Lo [13] HEBREW LETTER SHIN WITH SHIN DOT.....
1678 | 0xEF 0xAC 0xB8..0xBC #Lo [5] HEBREW LETTER TET WITH DAGESH..HEB...
1679 | 0xEF 0xAC 0xBE #Lo HEBREW LETTER MEM WITH DAGESH
1680 | 0xEF 0xAD 0x80..0x81 #Lo [2] HEBREW LETTER NUN WITH DAGESH..HEB...
1681 | 0xEF 0xAD 0x83..0x84 #Lo [2] HEBREW LETTER FINAL PE WITH DAGESH...
1682 | 0xEF 0xAD 0x86..0xFF #Lo [108] HEBREW LETTER TSADI WITH DAGESH..A...
1683 | 0xEF 0xAE 0x00..0xB1 #
1684 | 0xEF 0xAF 0x93..0xFF #Lo [363] ARABIC LETTER NG ISOLATED FORM...
1685 | 0xEF 0xB0..0xB3 0x00..0xFF #
1686 | 0xEF 0xB4 0x00..0xBD #
1687 | 0xEF 0xB5 0x90..0xFF #Lo [64] ARABIC LIGATURE TEH WITH JEEM WITH...
1688 | 0xEF 0xB6 0x00..0x8F #
1689 | 0xEF 0xB6 0x92..0xFF #Lo [54] ARABIC LIGATURE MEEM WITH JEEM WIT...
1690 | 0xEF 0xB7 0x00..0x87 #
1691 | 0xEF 0xB7 0xB0..0xBB #Lo [12] ARABIC LIGATURE SALLA USED AS KORA...
1692 | 0xEF 0xB8 0x80..0x8F #Mn [16] VARIATION SELECTOR-1..VARIATION SE...
1693 | 0xEF 0xB8 0xA0..0xAF #Mn [16] COMBINING LIGATURE LEFT HALF..COMB...
1694 | 0xEF 0xB8 0xB3..0xB4 #Pc [2] PRESENTATION FORM FOR VERTICAL LOW...
1695 | 0xEF 0xB9 0x8D..0x8F #Pc [3] DASHED LOW LINE..WAVY LOW LINE
1696 | 0xEF 0xB9 0xB0..0xB4 #Lo [5] ARABIC FATHATAN ISOLATED FORM..ARA...
1697 | 0xEF 0xB9 0xB6..0xFF #Lo [135] ARABIC FATHA ISOLATED FORM..AR...
1698 | 0xEF 0xBA..0xBA 0x00..0xFF #
1699 | 0xEF 0xBB 0x00..0xBC #
1700 | 0xEF 0xBC 0x90..0x99 #Nd [10] FULLWIDTH DIGIT ZERO..FULLWIDTH DI...
1701 | 0xEF 0xBC 0xA1..0xBA #L& [26] FULLWIDTH LATIN CAPITAL LETTER A.....
1702 | 0xEF 0xBC 0xBF #Pc FULLWIDTH LOW LINE
1703 | 0xEF 0xBD 0x81..0x9A #L& [26] FULLWIDTH LATIN SMALL LETTER A..FU...
1704 | 0xEF 0xBD 0xA6..0xAF #Lo [10] HALFWIDTH KATAKANA LETTER WO..HALF...
1705 | 0xEF 0xBD 0xB0 #Lm HALFWIDTH KATAKANA-HIRAGANA PROLON...
1706 | 0xEF 0xBD 0xB1..0xFF #Lo [45] HALFWIDTH KATAKANA LETTER A..HALFW...
1707 | 0xEF 0xBE 0x00..0x9D #
1708 | 0xEF 0xBE 0x9E..0x9F #Lm [2] HALFWIDTH KATAKANA VOICED SOUND MA...
1709 | 0xEF 0xBE 0xA0..0xBE #Lo [31] HALFWIDTH HANGUL FILLER..HALFWIDTH...
1710 | 0xEF 0xBF 0x82..0x87 #Lo [6] HALFWIDTH HANGUL LETTER A..HALFWID...
1711 | 0xEF 0xBF 0x8A..0x8F #Lo [6] HALFWIDTH HANGUL LETTER YEO..HALFW...
1712 | 0xEF 0xBF 0x92..0x97 #Lo [6] HALFWIDTH HANGUL LETTER YO..HALFWI...
1713 | 0xEF 0xBF 0x9A..0x9C #Lo [3] HALFWIDTH HANGUL LETTER EU..HALFWI...
1714 | 0xF0 0x90 0x80 0x80..0x8B #Lo [12] LINEAR B SYLLABLE B008 A..LINEA...
1715 | 0xF0 0x90 0x80 0x8D..0xA6 #Lo [26] LINEAR B SYLLABLE B036 JO..LINE...
1716 | 0xF0 0x90 0x80 0xA8..0xBA #Lo [19] LINEAR B SYLLABLE B060 RA..LINE...
1717 | 0xF0 0x90 0x80 0xBC..0xBD #Lo [2] LINEAR B SYLLABLE B017 ZA..LINE...
1718 | 0xF0 0x90 0x80 0xBF..0xFF #Lo [15] LINEAR B SYLLABLE B020 ZO..LINE...
1719 | 0xF0 0x90 0x81 0x00..0x8D #
1720 | 0xF0 0x90 0x81 0x90..0x9D #Lo [14] LINEAR B SYMBOL B018..LINEAR B ...
1721 | 0xF0 0x90 0x82 0x80..0xFF #Lo [123] LINEAR B IDEOGRAM B100 MAN..LIN...
1722 | 0xF0 0x90 0x83 0x00..0xBA #
1723 | 0xF0 0x90 0x85 0x80..0xB4 #Nl [53] GREEK ACROPHONIC ATTIC ONE QUAR...
1724 | 0xF0 0x90 0x87 0xBD #Mn PHAISTOS DISC SIGN COMBINING OBLIQ...
1725 | 0xF0 0x90 0x8A 0x80..0x9C #Lo [29] LYCIAN LETTER A..LYCIAN LETTER X
1726 | 0xF0 0x90 0x8A 0xA0..0xFF #Lo [49] CARIAN LETTER A..CARIAN LETTER ...
1727 | 0xF0 0x90 0x8B 0x00..0x90 #
1728 | 0xF0 0x90 0x8B 0xA0 #Mn COPTIC EPACT THOUSANDS MARK
1729 | 0xF0 0x90 0x8C 0x80..0x9F #Lo [32] OLD ITALIC LETTER A..OLD ITALIC...
1730 | 0xF0 0x90 0x8C 0xB0..0xFF #Lo [17] GOTHIC LETTER AHSA..GOTHIC LETT...
1731 | 0xF0 0x90 0x8D 0x00..0x80 #
1732 | 0xF0 0x90 0x8D 0x81 #Nl GOTHIC LETTER NINETY
1733 | 0xF0 0x90 0x8D 0x82..0x89 #Lo [8] GOTHIC LETTER RAIDA..GOTHIC LET...
1734 | 0xF0 0x90 0x8D 0x8A #Nl GOTHIC LETTER NINE HUNDRED
1735 | 0xF0 0x90 0x8D 0x90..0xB5 #Lo [38] OLD PERMIC LETTER AN..OLD PERMI...
1736 | 0xF0 0x90 0x8D 0xB6..0xBA #Mn [5] COMBINING OLD PERMIC LETTER AN....
1737 | 0xF0 0x90 0x8E 0x80..0x9D #Lo [30] UGARITIC LETTER ALPA..UGARITIC ...
1738 | 0xF0 0x90 0x8E 0xA0..0xFF #Lo [36] OLD PERSIAN SIGN A..OLD PERSIAN...
1739 | 0xF0 0x90 0x8F 0x00..0x83 #
1740 | 0xF0 0x90 0x8F 0x88..0x8F #Lo [8] OLD PERSIAN SIGN AURAMAZDAA..OL...
1741 | 0xF0 0x90 0x8F 0x91..0x95 #Nl [5] OLD PERSIAN NUMBER ONE..OLD PER...
1742 | 0xF0 0x90 0x90 0x80..0xFF #L& [80] DESERET CAPITAL LETTER LONG I.....
1743 | 0xF0 0x90 0x91 0x00..0x8F #
1744 | 0xF0 0x90 0x91 0x90..0xFF #Lo [78] SHAVIAN LETTER PEEP..OSMANYA LE...
1745 | 0xF0 0x90 0x92 0x00..0x9D #
1746 | 0xF0 0x90 0x92 0xA0..0xA9 #Nd [10] OSMANYA DIGIT ZERO..OSMANYA DIG...
1747 | 0xF0 0x90 0x92 0xB0..0xFF #L& [36] OSAGE CAPITAL LETTER A..OSAGE C...
1748 | 0xF0 0x90 0x93 0x00..0x93 #
1749 | 0xF0 0x90 0x93 0x98..0xBB #L& [36] OSAGE SMALL LETTER A..OSAGE SMA...
1750 | 0xF0 0x90 0x94 0x80..0xA7 #Lo [40] ELBASAN LETTER A..ELBASAN LETTE...
1751 | 0xF0 0x90 0x94 0xB0..0xFF #Lo [52] CAUCASIAN ALBANIAN LETTER ALT.....
1752 | 0xF0 0x90 0x95 0x00..0xA3 #
1753 | 0xF0 0x90 0x98 0x80..0xFF #Lo [311] LINEAR A SIGN AB001..LINE...
1754 | 0xF0 0x90 0x99..0x9B 0x00..0xFF #
1755 | 0xF0 0x90 0x9C 0x00..0xB6 #
1756 | 0xF0 0x90 0x9D 0x80..0x95 #Lo [22] LINEAR A SIGN A701 A..LINEAR A ...
1757 | 0xF0 0x90 0x9D 0xA0..0xA7 #Lo [8] LINEAR A SIGN A800..LINEAR A SI...
1758 | 0xF0 0x90 0xA0 0x80..0x85 #Lo [6] CYPRIOT SYLLABLE A..CYPRIOT SYL...
1759 | 0xF0 0x90 0xA0 0x88 #Lo CYPRIOT SYLLABLE JO
1760 | 0xF0 0x90 0xA0 0x8A..0xB5 #Lo [44] CYPRIOT SYLLABLE KA..CYPRIOT SY...
1761 | 0xF0 0x90 0xA0 0xB7..0xB8 #Lo [2] CYPRIOT SYLLABLE XA..CYPRIOT SY...
1762 | 0xF0 0x90 0xA0 0xBC #Lo CYPRIOT SYLLABLE ZA
1763 | 0xF0 0x90 0xA0 0xBF..0xFF #Lo [23] CYPRIOT SYLLABLE ZO..IMPERIAL A...
1764 | 0xF0 0x90 0xA1 0x00..0x95 #
1765 | 0xF0 0x90 0xA1 0xA0..0xB6 #Lo [23] PALMYRENE LETTER ALEPH..PALMYRE...
1766 | 0xF0 0x90 0xA2 0x80..0x9E #Lo [31] NABATAEAN LETTER FINAL ALEPH..N...
1767 | 0xF0 0x90 0xA3 0xA0..0xB2 #Lo [19] HATRAN LETTER ALEPH..HATRAN LET...
1768 | 0xF0 0x90 0xA3 0xB4..0xB5 #Lo [2] HATRAN LETTER SHIN..HATRAN LETT...
1769 | 0xF0 0x90 0xA4 0x80..0x95 #Lo [22] PHOENICIAN LETTER ALF..PHOENICI...
1770 | 0xF0 0x90 0xA4 0xA0..0xB9 #Lo [26] LYDIAN LETTER A..LYDIAN LETTER C
1771 | 0xF0 0x90 0xA6 0x80..0xB7 #Lo [56] MEROITIC HIEROGLYPHIC LETTER A....
1772 | 0xF0 0x90 0xA6 0xBE..0xBF #Lo [2] MEROITIC CURSIVE LOGOGRAM RMT.....
1773 | 0xF0 0x90 0xA8 0x80 #Lo KHAROSHTHI LETTER A
1774 | 0xF0 0x90 0xA8 0x81..0x83 #Mn [3] KHAROSHTHI VOWEL SIGN I..KHAROS...
1775 | 0xF0 0x90 0xA8 0x85..0x86 #Mn [2] KHAROSHTHI VOWEL SIGN E..KHAROS...
1776 | 0xF0 0x90 0xA8 0x8C..0x8F #Mn [4] KHAROSHTHI VOWEL LENGTH MARK..K...
1777 | 0xF0 0x90 0xA8 0x90..0x93 #Lo [4] KHAROSHTHI LETTER KA..KHAROSHTH...
1778 | 0xF0 0x90 0xA8 0x95..0x97 #Lo [3] KHAROSHTHI LETTER CA..KHAROSHTH...
1779 | 0xF0 0x90 0xA8 0x99..0xB3 #Lo [27] KHAROSHTHI LETTER NYA..KHAROSHT...
1780 | 0xF0 0x90 0xA8 0xB8..0xBA #Mn [3] KHAROSHTHI SIGN BAR ABOVE..KHAR...
1781 | 0xF0 0x90 0xA8 0xBF #Mn KHAROSHTHI VIRAMA
1782 | 0xF0 0x90 0xA9 0xA0..0xBC #Lo [29] OLD SOUTH ARABIAN LETTER HE..OL...
1783 | 0xF0 0x90 0xAA 0x80..0x9C #Lo [29] OLD NORTH ARABIAN LETTER HEH..O...
1784 | 0xF0 0x90 0xAB 0x80..0x87 #Lo [8] MANICHAEAN LETTER ALEPH..MANICH...
1785 | 0xF0 0x90 0xAB 0x89..0xA4 #Lo [28] MANICHAEAN LETTER ZAYIN..MANICH...
1786 | 0xF0 0x90 0xAB 0xA5..0xA6 #Mn [2] MANICHAEAN ABBREVIATION MARK AB...
1787 | 0xF0 0x90 0xAC 0x80..0xB5 #Lo [54] AVESTAN LETTER A..AVESTAN LETTE...
1788 | 0xF0 0x90 0xAD 0x80..0x95 #Lo [22] INSCRIPTIONAL PARTHIAN LETTER A...
1789 | 0xF0 0x90 0xAD 0xA0..0xB2 #Lo [19] INSCRIPTIONAL PAHLAVI LETTER AL...
1790 | 0xF0 0x90 0xAE 0x80..0x91 #Lo [18] PSALTER PAHLAVI LETTER ALEPH..P...
1791 | 0xF0 0x90 0xB0 0x80..0xFF #Lo [73] OLD TURKIC LETTER ORKHON A..OLD...
1792 | 0xF0 0x90 0xB1 0x00..0x88 #
1793 | 0xF0 0x90 0xB2 0x80..0xB2 #L& [51] OLD HUNGARIAN CAPITAL LETTER A....
1794 | 0xF0 0x90 0xB3 0x80..0xB2 #L& [51] OLD HUNGARIAN SMALL LETTER A..O...
1795 | 0xF0 0x91 0x80 0x80 #Mc BRAHMI SIGN CANDRABINDU
1796 | 0xF0 0x91 0x80 0x81 #Mn BRAHMI SIGN ANUSVARA
1797 | 0xF0 0x91 0x80 0x82 #Mc BRAHMI SIGN VISARGA
1798 | 0xF0 0x91 0x80 0x83..0xB7 #Lo [53] BRAHMI SIGN JIHVAMULIYA..BRAHMI...
1799 | 0xF0 0x91 0x80 0xB8..0xFF #Mn [15] BRAHMI VOWEL SIGN AA..BRAHMI VI...
1800 | 0xF0 0x91 0x81 0x00..0x86 #
1801 | 0xF0 0x91 0x81 0xA6..0xAF #Nd [10] BRAHMI DIGIT ZERO..BRAHMI DIGIT...
1802 | 0xF0 0x91 0x81 0xBF..0xFF #Mn [3] BRAHMI NUMBER JOINER..KAITHI SI...
1803 | 0xF0 0x91 0x82 0x00..0x81 #
1804 | 0xF0 0x91 0x82 0x82 #Mc KAITHI SIGN VISARGA
1805 | 0xF0 0x91 0x82 0x83..0xAF #Lo [45] KAITHI LETTER A..KAITHI LETTER HA
1806 | 0xF0 0x91 0x82 0xB0..0xB2 #Mc [3] KAITHI VOWEL SIGN AA..KAITHI VO...
1807 | 0xF0 0x91 0x82 0xB3..0xB6 #Mn [4] KAITHI VOWEL SIGN U..KAITHI VOW...
1808 | 0xF0 0x91 0x82 0xB7..0xB8 #Mc [2] KAITHI VOWEL SIGN O..KAITHI VOW...
1809 | 0xF0 0x91 0x82 0xB9..0xBA #Mn [2] KAITHI SIGN VIRAMA..KAITHI SIGN...
1810 | 0xF0 0x91 0x83 0x90..0xA8 #Lo [25] SORA SOMPENG LETTER SAH..SORA S...
1811 | 0xF0 0x91 0x83 0xB0..0xB9 #Nd [10] SORA SOMPENG DIGIT ZERO..SORA S...
1812 | 0xF0 0x91 0x84 0x80..0x82 #Mn [3] CHAKMA SIGN CANDRABINDU..CHAKMA...
1813 | 0xF0 0x91 0x84 0x83..0xA6 #Lo [36] CHAKMA LETTER AA..CHAKMA LETTER...
1814 | 0xF0 0x91 0x84 0xA7..0xAB #Mn [5] CHAKMA VOWEL SIGN A..CHAKMA VOW...
1815 | 0xF0 0x91 0x84 0xAC #Mc CHAKMA VOWEL SIGN E
1816 | 0xF0 0x91 0x84 0xAD..0xB4 #Mn [8] CHAKMA VOWEL SIGN AI..CHAKMA MA...
1817 | 0xF0 0x91 0x84 0xB6..0xBF #Nd [10] CHAKMA DIGIT ZERO..CHAKMA DIGIT...
1818 | 0xF0 0x91 0x85 0x90..0xB2 #Lo [35] MAHAJANI LETTER A..MAHAJANI LET...
1819 | 0xF0 0x91 0x85 0xB3 #Mn MAHAJANI SIGN NUKTA
1820 | 0xF0 0x91 0x85 0xB6 #Lo MAHAJANI LIGATURE SHRI
1821 | 0xF0 0x91 0x86 0x80..0x81 #Mn [2] SHARADA SIGN CANDRABINDU..SHARA...
1822 | 0xF0 0x91 0x86 0x82 #Mc SHARADA SIGN VISARGA
1823 | 0xF0 0x91 0x86 0x83..0xB2 #Lo [48] SHARADA LETTER A..SHARADA LETTE...
1824 | 0xF0 0x91 0x86 0xB3..0xB5 #Mc [3] SHARADA VOWEL SIGN AA..SHARADA ...
1825 | 0xF0 0x91 0x86 0xB6..0xBE #Mn [9] SHARADA VOWEL SIGN U..SHARADA V...
1826 | 0xF0 0x91 0x86 0xBF..0xFF #Mc [2] SHARADA VOWEL SIGN AU..SHARADA ...
1827 | 0xF0 0x91 0x87 0x00..0x80 #
1828 | 0xF0 0x91 0x87 0x81..0x84 #Lo [4] SHARADA SIGN AVAGRAHA..SHARADA OM
1829 | 0xF0 0x91 0x87 0x8A..0x8C #Mn [3] SHARADA SIGN NUKTA..SHARADA EXT...
1830 | 0xF0 0x91 0x87 0x90..0x99 #Nd [10] SHARADA DIGIT ZERO..SHARADA DIG...
1831 | 0xF0 0x91 0x87 0x9A #Lo SHARADA EKAM
1832 | 0xF0 0x91 0x87 0x9C #Lo SHARADA HEADSTROKE
1833 | 0xF0 0x91 0x88 0x80..0x91 #Lo [18] KHOJKI LETTER A..KHOJKI LETTER JJA
1834 | 0xF0 0x91 0x88 0x93..0xAB #Lo [25] KHOJKI LETTER NYA..KHOJKI LETTE...
1835 | 0xF0 0x91 0x88 0xAC..0xAE #Mc [3] KHOJKI VOWEL SIGN AA..KHOJKI VO...
1836 | 0xF0 0x91 0x88 0xAF..0xB1 #Mn [3] KHOJKI VOWEL SIGN U..KHOJKI VOW...
1837 | 0xF0 0x91 0x88 0xB2..0xB3 #Mc [2] KHOJKI VOWEL SIGN O..KHOJKI VOW...
1838 | 0xF0 0x91 0x88 0xB4 #Mn KHOJKI SIGN ANUSVARA
1839 | 0xF0 0x91 0x88 0xB5 #Mc KHOJKI SIGN VIRAMA
1840 | 0xF0 0x91 0x88 0xB6..0xB7 #Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN ...
1841 | 0xF0 0x91 0x88 0xBE #Mn KHOJKI SIGN SUKUN
1842 | 0xF0 0x91 0x8A 0x80..0x86 #Lo [7] MULTANI LETTER A..MULTANI LETTE...
1843 | 0xF0 0x91 0x8A 0x88 #Lo MULTANI LETTER GHA
1844 | 0xF0 0x91 0x8A 0x8A..0x8D #Lo [4] MULTANI LETTER CA..MULTANI LETT...
1845 | 0xF0 0x91 0x8A 0x8F..0x9D #Lo [15] MULTANI LETTER NYA..MULTANI LET...
1846 | 0xF0 0x91 0x8A 0x9F..0xA8 #Lo [10] MULTANI LETTER BHA..MULTANI LET...
1847 | 0xF0 0x91 0x8A 0xB0..0xFF #Lo [47] KHUDAWADI LETTER A..KHUDAWADI L...
1848 | 0xF0 0x91 0x8B 0x00..0x9E #
1849 | 0xF0 0x91 0x8B 0x9F #Mn KHUDAWADI SIGN ANUSVARA
1850 | 0xF0 0x91 0x8B 0xA0..0xA2 #Mc [3] KHUDAWADI VOWEL SIGN AA..KHUDAW...
1851 | 0xF0 0x91 0x8B 0xA3..0xAA #Mn [8] KHUDAWADI VOWEL SIGN U..KHUDAWA...
1852 | 0xF0 0x91 0x8B 0xB0..0xB9 #Nd [10] KHUDAWADI DIGIT ZERO..KHUDAWADI...
1853 | 0xF0 0x91 0x8C 0x80..0x81 #Mn [2] GRANTHA SIGN COMBINING ANUSVARA...
1854 | 0xF0 0x91 0x8C 0x82..0x83 #Mc [2] GRANTHA SIGN ANUSVARA..GRANTHA ...
1855 | 0xF0 0x91 0x8C 0x85..0x8C #Lo [8] GRANTHA LETTER A..GRANTHA LETTE...
1856 | 0xF0 0x91 0x8C 0x8F..0x90 #Lo [2] GRANTHA LETTER EE..GRANTHA LETT...
1857 | 0xF0 0x91 0x8C 0x93..0xA8 #Lo [22] GRANTHA LETTER OO..GRANTHA LETT...
1858 | 0xF0 0x91 0x8C 0xAA..0xB0 #Lo [7] GRANTHA LETTER PA..GRANTHA LETT...
1859 | 0xF0 0x91 0x8C 0xB2..0xB3 #Lo [2] GRANTHA LETTER LA..GRANTHA LETT...
1860 | 0xF0 0x91 0x8C 0xB5..0xB9 #Lo [5] GRANTHA LETTER VA..GRANTHA LETT...
1861 | 0xF0 0x91 0x8C 0xBC #Mn GRANTHA SIGN NUKTA
1862 | 0xF0 0x91 0x8C 0xBD #Lo GRANTHA SIGN AVAGRAHA
1863 | 0xF0 0x91 0x8C 0xBE..0xBF #Mc [2] GRANTHA VOWEL SIGN AA..GRANTHA ...
1864 | 0xF0 0x91 0x8D 0x80 #Mn GRANTHA VOWEL SIGN II
1865 | 0xF0 0x91 0x8D 0x81..0x84 #Mc [4] GRANTHA VOWEL SIGN U..GRANTHA V...
1866 | 0xF0 0x91 0x8D 0x87..0x88 #Mc [2] GRANTHA VOWEL SIGN EE..GRANTHA ...
1867 | 0xF0 0x91 0x8D 0x8B..0x8D #Mc [3] GRANTHA VOWEL SIGN OO..GRANTHA ...
1868 | 0xF0 0x91 0x8D 0x90 #Lo GRANTHA OM
1869 | 0xF0 0x91 0x8D 0x97 #Mc GRANTHA AU LENGTH MARK
1870 | 0xF0 0x91 0x8D 0x9D..0xA1 #Lo [5] GRANTHA SIGN PLUTA..GRANTHA LET...
1871 | 0xF0 0x91 0x8D 0xA2..0xA3 #Mc [2] GRANTHA VOWEL SIGN VOCALIC L..G...
1872 | 0xF0 0x91 0x8D 0xA6..0xAC #Mn [7] COMBINING GRANTHA DIGIT ZERO..C...
1873 | 0xF0 0x91 0x8D 0xB0..0xB4 #Mn [5] COMBINING GRANTHA LETTER A..COM...
1874 | 0xF0 0x91 0x90 0x80..0xB4 #Lo [53] NEWA LETTER A..NEWA LETTER HA
1875 | 0xF0 0x91 0x90 0xB5..0xB7 #Mc [3] NEWA VOWEL SIGN AA..NEWA VOWEL ...
1876 | 0xF0 0x91 0x90 0xB8..0xBF #Mn [8] NEWA VOWEL SIGN U..NEWA VOWEL S...
1877 | 0xF0 0x91 0x91 0x80..0x81 #Mc [2] NEWA VOWEL SIGN O..NEWA VOWEL S...
1878 | 0xF0 0x91 0x91 0x82..0x84 #Mn [3] NEWA SIGN VIRAMA..NEWA SIGN ANU...
1879 | 0xF0 0x91 0x91 0x85 #Mc NEWA SIGN VISARGA
1880 | 0xF0 0x91 0x91 0x86 #Mn NEWA SIGN NUKTA
1881 | 0xF0 0x91 0x91 0x87..0x8A #Lo [4] NEWA SIGN AVAGRAHA..NEWA SIDDHI
1882 | 0xF0 0x91 0x91 0x90..0x99 #Nd [10] NEWA DIGIT ZERO..NEWA DIGIT NINE
1883 | 0xF0 0x91 0x92 0x80..0xAF #Lo [48] TIRHUTA ANJI..TIRHUTA LETTER HA
1884 | 0xF0 0x91 0x92 0xB0..0xB2 #Mc [3] TIRHUTA VOWEL SIGN AA..TIRHUTA ...
1885 | 0xF0 0x91 0x92 0xB3..0xB8 #Mn [6] TIRHUTA VOWEL SIGN U..TIRHUTA V...
1886 | 0xF0 0x91 0x92 0xB9 #Mc TIRHUTA VOWEL SIGN E
1887 | 0xF0 0x91 0x92 0xBA #Mn TIRHUTA VOWEL SIGN SHORT E
1888 | 0xF0 0x91 0x92 0xBB..0xBE #Mc [4] TIRHUTA VOWEL SIGN AI..TIRHUTA ...
1889 | 0xF0 0x91 0x92 0xBF..0xFF #Mn [2] TIRHUTA SIGN CANDRABINDU..TIRHU...
1890 | 0xF0 0x91 0x93 0x00..0x80 #
1891 | 0xF0 0x91 0x93 0x81 #Mc TIRHUTA SIGN VISARGA
1892 | 0xF0 0x91 0x93 0x82..0x83 #Mn [2] TIRHUTA SIGN VIRAMA..TIRHUTA SI...
1893 | 0xF0 0x91 0x93 0x84..0x85 #Lo [2] TIRHUTA SIGN AVAGRAHA..TIRHUTA ...
1894 | 0xF0 0x91 0x93 0x87 #Lo TIRHUTA OM
1895 | 0xF0 0x91 0x93 0x90..0x99 #Nd [10] TIRHUTA DIGIT ZERO..TIRHUTA DIG...
1896 | 0xF0 0x91 0x96 0x80..0xAE #Lo [47] SIDDHAM LETTER A..SIDDHAM LETTE...
1897 | 0xF0 0x91 0x96 0xAF..0xB1 #Mc [3] SIDDHAM VOWEL SIGN AA..SIDDHAM ...
1898 | 0xF0 0x91 0x96 0xB2..0xB5 #Mn [4] SIDDHAM VOWEL SIGN U..SIDDHAM V...
1899 | 0xF0 0x91 0x96 0xB8..0xBB #Mc [4] SIDDHAM VOWEL SIGN E..SIDDHAM V...
1900 | 0xF0 0x91 0x96 0xBC..0xBD #Mn [2] SIDDHAM SIGN CANDRABINDU..SIDDH...
1901 | 0xF0 0x91 0x96 0xBE #Mc SIDDHAM SIGN VISARGA
1902 | 0xF0 0x91 0x96 0xBF..0xFF #Mn [2] SIDDHAM SIGN VIRAMA..SIDDHAM SI...
1903 | 0xF0 0x91 0x97 0x00..0x80 #
1904 | 0xF0 0x91 0x97 0x98..0x9B #Lo [4] SIDDHAM LETTER THREE-CIRCLE ALT...
1905 | 0xF0 0x91 0x97 0x9C..0x9D #Mn [2] SIDDHAM VOWEL SIGN ALTERNATE U....
1906 | 0xF0 0x91 0x98 0x80..0xAF #Lo [48] MODI LETTER A..MODI LETTER LLA
1907 | 0xF0 0x91 0x98 0xB0..0xB2 #Mc [3] MODI VOWEL SIGN AA..MODI VOWEL ...
1908 | 0xF0 0x91 0x98 0xB3..0xBA #Mn [8] MODI VOWEL SIGN U..MODI VOWEL S...
1909 | 0xF0 0x91 0x98 0xBB..0xBC #Mc [2] MODI VOWEL SIGN O..MODI VOWEL S...
1910 | 0xF0 0x91 0x98 0xBD #Mn MODI SIGN ANUSVARA
1911 | 0xF0 0x91 0x98 0xBE #Mc MODI SIGN VISARGA
1912 | 0xF0 0x91 0x98 0xBF..0xFF #Mn [2] MODI SIGN VIRAMA..MODI SIGN ARD...
1913 | 0xF0 0x91 0x99 0x00..0x80 #
1914 | 0xF0 0x91 0x99 0x84 #Lo MODI SIGN HUVA
1915 | 0xF0 0x91 0x99 0x90..0x99 #Nd [10] MODI DIGIT ZERO..MODI DIGIT NINE
1916 | 0xF0 0x91 0x9A 0x80..0xAA #Lo [43] TAKRI LETTER A..TAKRI LETTER RRA
1917 | 0xF0 0x91 0x9A 0xAB #Mn TAKRI SIGN ANUSVARA
1918 | 0xF0 0x91 0x9A 0xAC #Mc TAKRI SIGN VISARGA
1919 | 0xF0 0x91 0x9A 0xAD #Mn TAKRI VOWEL SIGN AA
1920 | 0xF0 0x91 0x9A 0xAE..0xAF #Mc [2] TAKRI VOWEL SIGN I..TAKRI VOWEL...
1921 | 0xF0 0x91 0x9A 0xB0..0xB5 #Mn [6] TAKRI VOWEL SIGN U..TAKRI VOWEL...
1922 | 0xF0 0x91 0x9A 0xB6 #Mc TAKRI SIGN VIRAMA
1923 | 0xF0 0x91 0x9A 0xB7 #Mn TAKRI SIGN NUKTA
1924 | 0xF0 0x91 0x9B 0x80..0x89 #Nd [10] TAKRI DIGIT ZERO..TAKRI DIGIT NINE
1925 | 0xF0 0x91 0x9C 0x80..0x99 #Lo [26] AHOM LETTER KA..AHOM LETTER JHA
1926 | 0xF0 0x91 0x9C 0x9D..0x9F #Mn [3] AHOM CONSONANT SIGN MEDIAL LA.....
1927 | 0xF0 0x91 0x9C 0xA0..0xA1 #Mc [2] AHOM VOWEL SIGN A..AHOM VOWEL S...
1928 | 0xF0 0x91 0x9C 0xA2..0xA5 #Mn [4] AHOM VOWEL SIGN I..AHOM VOWEL S...
1929 | 0xF0 0x91 0x9C 0xA6 #Mc AHOM VOWEL SIGN E
1930 | 0xF0 0x91 0x9C 0xA7..0xAB #Mn [5] AHOM VOWEL SIGN AW..AHOM SIGN K...
1931 | 0xF0 0x91 0x9C 0xB0..0xB9 #Nd [10] AHOM DIGIT ZERO..AHOM DIGIT NINE
1932 | 0xF0 0x91 0xA2 0xA0..0xFF #L& [64] WARANG CITI CAPITAL LETTER NGAA...
1933 | 0xF0 0x91 0xA3 0x00..0x9F #
1934 | 0xF0 0x91 0xA3 0xA0..0xA9 #Nd [10] WARANG CITI DIGIT ZERO..WARANG ...
1935 | 0xF0 0x91 0xA3 0xBF #Lo WARANG CITI OM
1936 | 0xF0 0x91 0xAB 0x80..0xB8 #Lo [57] PAU CIN HAU LETTER PA..PAU CIN ...
1937 | 0xF0 0x91 0xB0 0x80..0x88 #Lo [9] BHAIKSUKI LETTER A..BHAIKSUKI L...
1938 | 0xF0 0x91 0xB0 0x8A..0xAE #Lo [37] BHAIKSUKI LETTER E..BHAIKSUKI L...
1939 | 0xF0 0x91 0xB0 0xAF #Mc BHAIKSUKI VOWEL SIGN AA
1940 | 0xF0 0x91 0xB0 0xB0..0xB6 #Mn [7] BHAIKSUKI VOWEL SIGN I..BHAIKSU...
1941 | 0xF0 0x91 0xB0 0xB8..0xBD #Mn [6] BHAIKSUKI VOWEL SIGN E..BHAIKSU...
1942 | 0xF0 0x91 0xB0 0xBE #Mc BHAIKSUKI SIGN VISARGA
1943 | 0xF0 0x91 0xB0 0xBF #Mn BHAIKSUKI SIGN VIRAMA
1944 | 0xF0 0x91 0xB1 0x80 #Lo BHAIKSUKI SIGN AVAGRAHA
1945 | 0xF0 0x91 0xB1 0x90..0x99 #Nd [10] BHAIKSUKI DIGIT ZERO..BHAIKSUKI...
1946 | 0xF0 0x91 0xB1 0xB2..0xFF #Lo [30] MARCHEN LETTER KA..MARCHEN LETT...
1947 | 0xF0 0x91 0xB2 0x00..0x8F #
1948 | 0xF0 0x91 0xB2 0x92..0xA7 #Mn [22] MARCHEN SUBJOINED LETTER KA..MA...
1949 | 0xF0 0x91 0xB2 0xA9 #Mc MARCHEN SUBJOINED LETTER YA
1950 | 0xF0 0x91 0xB2 0xAA..0xB0 #Mn [7] MARCHEN SUBJOINED LETTER RA..MA...
1951 | 0xF0 0x91 0xB2 0xB1 #Mc MARCHEN VOWEL SIGN I
1952 | 0xF0 0x91 0xB2 0xB2..0xB3 #Mn [2] MARCHEN VOWEL SIGN U..MARCHEN V...
1953 | 0xF0 0x91 0xB2 0xB4 #Mc MARCHEN VOWEL SIGN O
1954 | 0xF0 0x91 0xB2 0xB5..0xB6 #Mn [2] MARCHEN SIGN ANUSVARA..MARCHEN ...
1955 | 0xF0 0x92 0x80 0x80..0xFF #Lo [922] CUNEIFORM SIGN A..CUNEIFO...
1956 | 0xF0 0x92 0x81..0x8D 0x00..0xFF #
1957 | 0xF0 0x92 0x8E 0x00..0x99 #
1958 | 0xF0 0x92 0x90 0x80..0xFF #Nl [111] CUNEIFORM NUMERIC SIGN TWO ASH....
1959 | 0xF0 0x92 0x91 0x00..0xAE #
1960 | 0xF0 0x92 0x92 0x80..0xFF #Lo [196] CUNEIFORM SIGN AB TIMES N...
1961 | 0xF0 0x92 0x93..0x94 0x00..0xFF #
1962 | 0xF0 0x92 0x95 0x00..0x83 #
1963 | 0xF0 0x93 0x80 0x80..0xFF #Lo [1071] EGYPTIAN HIEROGLYPH A001...
1964 | 0xF0 0x93 0x81..0x8F 0x00..0xFF #
1965 | 0xF0 0x93 0x90 0x00..0xAE #
1966 | 0xF0 0x94 0x90 0x80..0xFF #Lo [583] ANATOLIAN HIEROGLYPH A001...
1967 | 0xF0 0x94 0x91..0x98 0x00..0xFF #
1968 | 0xF0 0x94 0x99 0x00..0x86 #
1969 | 0xF0 0x96 0xA0 0x80..0xFF #Lo [569] BAMUM LETTER PHASE-A NGKU...
1970 | 0xF0 0x96 0xA1..0xA7 0x00..0xFF #
1971 | 0xF0 0x96 0xA8 0x00..0xB8 #
1972 | 0xF0 0x96 0xA9 0x80..0x9E #Lo [31] MRO LETTER TA..MRO LETTER TEK
1973 | 0xF0 0x96 0xA9 0xA0..0xA9 #Nd [10] MRO DIGIT ZERO..MRO DIGIT NINE
1974 | 0xF0 0x96 0xAB 0x90..0xAD #Lo [30] BASSA VAH LETTER ENNI..BASSA VA...
1975 | 0xF0 0x96 0xAB 0xB0..0xB4 #Mn [5] BASSA VAH COMBINING HIGH TONE.....
1976 | 0xF0 0x96 0xAC 0x80..0xAF #Lo [48] PAHAWH HMONG VOWEL KEEB..PAHAWH...
1977 | 0xF0 0x96 0xAC 0xB0..0xB6 #Mn [7] PAHAWH HMONG MARK CIM TUB..PAHA...
1978 | 0xF0 0x96 0xAD 0x80..0x83 #Lm [4] PAHAWH HMONG SIGN VOS SEEV..PAH...
1979 | 0xF0 0x96 0xAD 0x90..0x99 #Nd [10] PAHAWH HMONG DIGIT ZERO..PAHAWH...
1980 | 0xF0 0x96 0xAD 0xA3..0xB7 #Lo [21] PAHAWH HMONG SIGN VOS LUB..PAHA...
1981 | 0xF0 0x96 0xAD 0xBD..0xFF #Lo [19] PAHAWH HMONG CLAN SIGN TSHEEJ.....
1982 | 0xF0 0x96 0xAE 0x00..0x8F #
1983 | 0xF0 0x96 0xBC 0x80..0xFF #Lo [69] MIAO LETTER PA..MIAO LETTER HHA
1984 | 0xF0 0x96 0xBD 0x00..0x84 #
1985 | 0xF0 0x96 0xBD 0x90 #Lo MIAO LETTER NASALIZATION
1986 | 0xF0 0x96 0xBD 0x91..0xBE #Mc [46] MIAO SIGN ASPIRATION..MIAO VOWE...
1987 | 0xF0 0x96 0xBE 0x8F..0x92 #Mn [4] MIAO TONE RIGHT..MIAO TONE BELOW
1988 | 0xF0 0x96 0xBE 0x93..0x9F #Lm [13] MIAO LETTER TONE-2..MIAO LETTER...
1989 | 0xF0 0x96 0xBF 0xA0 #Lm TANGUT ITERATION MARK
1990 | 0xF0 0x97 0x80 0x80..0xFF #Lo [6125] TANGUT IDEOGRAPH-17000.....
1991 | 0xF0 0x97 0x81..0xFF 0x00..0xFF #
1992 | 0xF0 0x98 0x00 0x00..0xFF #
1993 | 0xF0 0x98 0x01..0x9E 0x00..0xFF #
1994 | 0xF0 0x98 0x9F 0x00..0xAC #
1995 | 0xF0 0x98 0xA0 0x80..0xFF #Lo [755] TANGUT COMPONENT-001..TAN...
1996 | 0xF0 0x98 0xA1..0xAA 0x00..0xFF #
1997 | 0xF0 0x98 0xAB 0x00..0xB2 #
1998 | 0xF0 0x9B 0x80 0x80..0x81 #Lo [2] KATAKANA LETTER ARCHAIC E..HIRA...
1999 | 0xF0 0x9B 0xB0 0x80..0xFF #Lo [107] DUPLOYAN LETTER H..DUPLOYAN LET...
2000 | 0xF0 0x9B 0xB1 0x00..0xAA #
2001 | 0xF0 0x9B 0xB1 0xB0..0xBC #Lo [13] DUPLOYAN AFFIX LEFT HORIZONTAL ...
2002 | 0xF0 0x9B 0xB2 0x80..0x88 #Lo [9] DUPLOYAN AFFIX HIGH ACUTE..DUPL...
2003 | 0xF0 0x9B 0xB2 0x90..0x99 #Lo [10] DUPLOYAN AFFIX LOW ACUTE..DUPLO...
2004 | 0xF0 0x9B 0xB2 0x9D..0x9E #Mn [2] DUPLOYAN THICK LETTER SELECTOR....
2005 | 0xF0 0x9D 0x85 0xA5..0xA6 #Mc [2] MUSICAL SYMBOL COMBINING STEM.....
2006 | 0xF0 0x9D 0x85 0xA7..0xA9 #Mn [3] MUSICAL SYMBOL COMBINING TREMOL...
2007 | 0xF0 0x9D 0x85 0xAD..0xB2 #Mc [6] MUSICAL SYMBOL COMBINING AUGMEN...
2008 | 0xF0 0x9D 0x85 0xBB..0xFF #Mn [8] MUSICAL SYMBOL COMBINING ACCENT...
2009 | 0xF0 0x9D 0x86 0x00..0x82 #
2010 | 0xF0 0x9D 0x86 0x85..0x8B #Mn [7] MUSICAL SYMBOL COMBINING DOIT.....
2011 | 0xF0 0x9D 0x86 0xAA..0xAD #Mn [4] MUSICAL SYMBOL COMBINING DOWN B...
2012 | 0xF0 0x9D 0x89 0x82..0x84 #Mn [3] COMBINING GREEK MUSICAL TRISEME...
2013 | 0xF0 0x9D 0x90 0x80..0xFF #L& [85] MATHEMATICAL BOLD CAPITAL A..MA...
2014 | 0xF0 0x9D 0x91 0x00..0x94 #
2015 | 0xF0 0x9D 0x91 0x96..0xFF #L& [71] MATHEMATICAL ITALIC SMALL I..MA...
2016 | 0xF0 0x9D 0x92 0x00..0x9C #
2017 | 0xF0 0x9D 0x92 0x9E..0x9F #L& [2] MATHEMATICAL SCRIPT CAPITAL C.....
2018 | 0xF0 0x9D 0x92 0xA2 #L& MATHEMATICAL SCRIPT CAPITAL G
2019 | 0xF0 0x9D 0x92 0xA5..0xA6 #L& [2] MATHEMATICAL SCRIPT CAPITAL J.....
2020 | 0xF0 0x9D 0x92 0xA9..0xAC #L& [4] MATHEMATICAL SCRIPT CAPITAL N.....
2021 | 0xF0 0x9D 0x92 0xAE..0xB9 #L& [12] MATHEMATICAL SCRIPT CAPITAL S.....
2022 | 0xF0 0x9D 0x92 0xBB #L& MATHEMATICAL SCRIPT SMALL F
2023 | 0xF0 0x9D 0x92 0xBD..0xFF #L& [7] MATHEMATICAL SCRIPT SMALL H..MA...
2024 | 0xF0 0x9D 0x93 0x00..0x83 #
2025 | 0xF0 0x9D 0x93 0x85..0xFF #L& [65] MATHEMATICAL SCRIPT SMALL P..MA...
2026 | 0xF0 0x9D 0x94 0x00..0x85 #
2027 | 0xF0 0x9D 0x94 0x87..0x8A #L& [4] MATHEMATICAL FRAKTUR CAPITAL D....
2028 | 0xF0 0x9D 0x94 0x8D..0x94 #L& [8] MATHEMATICAL FRAKTUR CAPITAL J....
2029 | 0xF0 0x9D 0x94 0x96..0x9C #L& [7] MATHEMATICAL FRAKTUR CAPITAL S....
2030 | 0xF0 0x9D 0x94 0x9E..0xB9 #L& [28] MATHEMATICAL FRAKTUR SMALL A..M...
2031 | 0xF0 0x9D 0x94 0xBB..0xBE #L& [4] MATHEMATICAL DOUBLE-STRUCK CAPI...
2032 | 0xF0 0x9D 0x95 0x80..0x84 #L& [5] MATHEMATICAL DOUBLE-STRUCK CAPI...
2033 | 0xF0 0x9D 0x95 0x86 #L& MATHEMATICAL DOUBLE-STRUCK CAPITAL O
2034 | 0xF0 0x9D 0x95 0x8A..0x90 #L& [7] MATHEMATICAL DOUBLE-STRUCK CAPI...
2035 | 0xF0 0x9D 0x95 0x92..0xFF #L& [340] MATHEMATICAL DOUBLE-STRUC...
2036 | 0xF0 0x9D 0x96..0x99 0x00..0xFF #
2037 | 0xF0 0x9D 0x9A 0x00..0xA5 #
2038 | 0xF0 0x9D 0x9A 0xA8..0xFF #L& [25] MATHEMATICAL BOLD CAPITAL ALPHA...
2039 | 0xF0 0x9D 0x9B 0x00..0x80 #
2040 | 0xF0 0x9D 0x9B 0x82..0x9A #L& [25] MATHEMATICAL BOLD SMALL ALPHA.....
2041 | 0xF0 0x9D 0x9B 0x9C..0xBA #L& [31] MATHEMATICAL BOLD EPSILON SYMBO...
2042 | 0xF0 0x9D 0x9B 0xBC..0xFF #L& [25] MATHEMATICAL ITALIC SMALL ALPHA...
2043 | 0xF0 0x9D 0x9C 0x00..0x94 #
2044 | 0xF0 0x9D 0x9C 0x96..0xB4 #L& [31] MATHEMATICAL ITALIC EPSILON SYM...
2045 | 0xF0 0x9D 0x9C 0xB6..0xFF #L& [25] MATHEMATICAL BOLD ITALIC SMALL ...
2046 | 0xF0 0x9D 0x9D 0x00..0x8E #
2047 | 0xF0 0x9D 0x9D 0x90..0xAE #L& [31] MATHEMATICAL BOLD ITALIC EPSILO...
2048 | 0xF0 0x9D 0x9D 0xB0..0xFF #L& [25] MATHEMATICAL SANS-SERIF BOLD SM...
2049 | 0xF0 0x9D 0x9E 0x00..0x88 #
2050 | 0xF0 0x9D 0x9E 0x8A..0xA8 #L& [31] MATHEMATICAL SANS-SERIF BOLD EP...
2051 | 0xF0 0x9D 0x9E 0xAA..0xFF #L& [25] MATHEMATICAL SANS-SERIF BOLD IT...
2052 | 0xF0 0x9D 0x9F 0x00..0x82 #
2053 | 0xF0 0x9D 0x9F 0x84..0x8B #L& [8] MATHEMATICAL SANS-SERIF BOLD IT...
2054 | 0xF0 0x9D 0x9F 0x8E..0xBF #Nd [50] MATHEMATICAL BOLD DIGIT ZERO..M...
2055 | 0xF0 0x9D 0xA8 0x80..0xB6 #Mn [55] SIGNWRITING HEAD RIM..SIGNWRITI...
2056 | 0xF0 0x9D 0xA8 0xBB..0xFF #Mn [50] SIGNWRITING MOUTH CLOSED NEUTRA...
2057 | 0xF0 0x9D 0xA9 0x00..0xAC #
2058 | 0xF0 0x9D 0xA9 0xB5 #Mn SIGNWRITING UPPER BODY TILTING FRO...
2059 | 0xF0 0x9D 0xAA 0x84 #Mn SIGNWRITING LOCATION HEAD NECK
2060 | 0xF0 0x9D 0xAA 0x9B..0x9F #Mn [5] SIGNWRITING FILL MODIFIER-2..SI...
2061 | 0xF0 0x9D 0xAA 0xA1..0xAF #Mn [15] SIGNWRITING ROTATION MODIFIER-2...
2062 | 0xF0 0x9E 0x80 0x80..0x86 #Mn [7] COMBINING GLAGOLITIC LETTER AZU...
2063 | 0xF0 0x9E 0x80 0x88..0x98 #Mn [17] COMBINING GLAGOLITIC LETTER ZEM...
2064 | 0xF0 0x9E 0x80 0x9B..0xA1 #Mn [7] COMBINING GLAGOLITIC LETTER SHT...
2065 | 0xF0 0x9E 0x80 0xA3..0xA4 #Mn [2] COMBINING GLAGOLITIC LETTER YU....
2066 | 0xF0 0x9E 0x80 0xA6..0xAA #Mn [5] COMBINING GLAGOLITIC LETTER YO....
2067 | 0xF0 0x9E 0xA0 0x80..0xFF #Lo [197] MENDE KIKAKUI SYLLABLE M0...
2068 | 0xF0 0x9E 0xA1..0xA2 0x00..0xFF #
2069 | 0xF0 0x9E 0xA3 0x00..0x84 #
2070 | 0xF0 0x9E 0xA3 0x90..0x96 #Mn [7] MENDE KIKAKUI COMBINING NUMBER ...
2071 | 0xF0 0x9E 0xA4 0x80..0xFF #L& [68] ADLAM CAPITAL LETTER ALIF..ADLA...
2072 | 0xF0 0x9E 0xA5 0x00..0x83 #
2073 | 0xF0 0x9E 0xA5 0x84..0x8A #Mn [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA
2074 | 0xF0 0x9E 0xA5 0x90..0x99 #Nd [10] ADLAM DIGIT ZERO..ADLAM DIGIT NINE
2075 | 0xF0 0x9E 0xB8 0x80..0x83 #Lo [4] ARABIC MATHEMATICAL ALEF..ARABI...
2076 | 0xF0 0x9E 0xB8 0x85..0x9F #Lo [27] ARABIC MATHEMATICAL WAW..ARABIC...
2077 | 0xF0 0x9E 0xB8 0xA1..0xA2 #Lo [2] ARABIC MATHEMATICAL INITIAL BEH...
2078 | 0xF0 0x9E 0xB8 0xA4 #Lo ARABIC MATHEMATICAL INITIAL HEH
2079 | 0xF0 0x9E 0xB8 0xA7 #Lo ARABIC MATHEMATICAL INITIAL HAH
2080 | 0xF0 0x9E 0xB8 0xA9..0xB2 #Lo [10] ARABIC MATHEMATICAL INITIAL YEH...
2081 | 0xF0 0x9E 0xB8 0xB4..0xB7 #Lo [4] ARABIC MATHEMATICAL INITIAL SHE...
2082 | 0xF0 0x9E 0xB8 0xB9 #Lo ARABIC MATHEMATICAL INITIAL DAD
2083 | 0xF0 0x9E 0xB8 0xBB #Lo ARABIC MATHEMATICAL INITIAL GHAIN
2084 | 0xF0 0x9E 0xB9 0x82 #Lo ARABIC MATHEMATICAL TAILED JEEM
2085 | 0xF0 0x9E 0xB9 0x87 #Lo ARABIC MATHEMATICAL TAILED HAH
2086 | 0xF0 0x9E 0xB9 0x89 #Lo ARABIC MATHEMATICAL TAILED YEH
2087 | 0xF0 0x9E 0xB9 0x8B #Lo ARABIC MATHEMATICAL TAILED LAM
2088 | 0xF0 0x9E 0xB9 0x8D..0x8F #Lo [3] ARABIC MATHEMATICAL TAILED NOON...
2089 | 0xF0 0x9E 0xB9 0x91..0x92 #Lo [2] ARABIC MATHEMATICAL TAILED SAD....
2090 | 0xF0 0x9E 0xB9 0x94 #Lo ARABIC MATHEMATICAL TAILED SHEEN
2091 | 0xF0 0x9E 0xB9 0x97 #Lo ARABIC MATHEMATICAL TAILED KHAH
2092 | 0xF0 0x9E 0xB9 0x99 #Lo ARABIC MATHEMATICAL TAILED DAD
2093 | 0xF0 0x9E 0xB9 0x9B #Lo ARABIC MATHEMATICAL TAILED GHAIN
2094 | 0xF0 0x9E 0xB9 0x9D #Lo ARABIC MATHEMATICAL TAILED DOTLESS...
2095 | 0xF0 0x9E 0xB9 0x9F #Lo ARABIC MATHEMATICAL TAILED DOTLESS...
2096 | 0xF0 0x9E 0xB9 0xA1..0xA2 #Lo [2] ARABIC MATHEMATICAL STRETCHED B...
2097 | 0xF0 0x9E 0xB9 0xA4 #Lo ARABIC MATHEMATICAL STRETCHED HEH
2098 | 0xF0 0x9E 0xB9 0xA7..0xAA #Lo [4] ARABIC MATHEMATICAL STRETCHED H...
2099 | 0xF0 0x9E 0xB9 0xAC..0xB2 #Lo [7] ARABIC MATHEMATICAL STRETCHED M...
2100 | 0xF0 0x9E 0xB9 0xB4..0xB7 #Lo [4] ARABIC MATHEMATICAL STRETCHED S...
2101 | 0xF0 0x9E 0xB9 0xB9..0xBC #Lo [4] ARABIC MATHEMATICAL STRETCHED D...
2102 | 0xF0 0x9E 0xB9 0xBE #Lo ARABIC MATHEMATICAL STRETCHED DOTL...
2103 | 0xF0 0x9E 0xBA 0x80..0x89 #Lo [10] ARABIC MATHEMATICAL LOOPED ALEF...
2104 | 0xF0 0x9E 0xBA 0x8B..0x9B #Lo [17] ARABIC MATHEMATICAL LOOPED LAM....
2105 | 0xF0 0x9E 0xBA 0xA1..0xA3 #Lo [3] ARABIC MATHEMATICAL DOUBLE-STRU...
2106 | 0xF0 0x9E 0xBA 0xA5..0xA9 #Lo [5] ARABIC MATHEMATICAL DOUBLE-STRU...
2107 | 0xF0 0x9E 0xBA 0xAB..0xBB #Lo [17] ARABIC MATHEMATICAL DOUBLE-STRU...
2108 | 0xF0 0xA0 0x80 0x80..0xFF #Lo [42711] CJK UNIFIED IDEOG...
2109 | 0xF0 0xA0 0x81..0xFF 0x00..0xFF #
2110 | 0xF0 0xA1..0xA9 0x00..0xFF 0x00..0xFF #
2111 | 0xF0 0xAA 0x00 0x00..0xFF #
2112 | 0xF0 0xAA 0x01..0x9A 0x00..0xFF #
2113 | 0xF0 0xAA 0x9B 0x00..0x96 #
2114 | 0xF0 0xAA 0x9C 0x80..0xFF #Lo [4149] CJK UNIFIED IDEOGRAPH-2A...
2115 | 0xF0 0xAA 0x9D..0xFF 0x00..0xFF #
2116 | 0xF0 0xAB 0x00 0x00..0xFF #
2117 | 0xF0 0xAB 0x01..0x9B 0x00..0xFF #
2118 | 0xF0 0xAB 0x9C 0x00..0xB4 #
2119 | 0xF0 0xAB 0x9D 0x80..0xFF #Lo [222] CJK UNIFIED IDEOGRAPH-2B7...
2120 | 0xF0 0xAB 0x9E..0x9F 0x00..0xFF #
2121 | 0xF0 0xAB 0xA0 0x00..0x9D #
2122 | 0xF0 0xAB 0xA0 0xA0..0xFF #Lo [5762] CJK UNIFIED IDEOGRAPH-2B...
2123 | 0xF0 0xAB 0xA1..0xFF 0x00..0xFF #
2124 | 0xF0 0xAC 0x00 0x00..0xFF #
2125 | 0xF0 0xAC 0x01..0xB9 0x00..0xFF #
2126 | 0xF0 0xAC 0xBA 0x00..0xA1 #
2127 | 0xF0 0xAF 0xA0 0x80..0xFF #Lo [542] CJK COMPATIBILITY IDEOGRA...
2128 | 0xF0 0xAF 0xA1..0xA7 0x00..0xFF #
2129 | 0xF0 0xAF 0xA8 0x00..0x9D #
2130 | 0xF3 0xA0 0x84 0x80..0xFF #Mn [240] VARIATION SELECTOR-17..VA...
2131 | 0xF3 0xA0 0x85..0x86 0x00..0xFF #
2132 | 0xF3 0xA0 0x87 0x00..0xAF #
2133 ;
2134
2135}%%
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go
new file mode 100644
index 0000000..eeee1a5
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go
@@ -0,0 +1,86 @@
1package hclsyntax
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// Variables returns all of the variables referenced within a given experssion.
8//
9// This is the implementation of the "Variables" method on every native
10// expression.
11func Variables(expr Expression) []hcl.Traversal {
12 var vars []hcl.Traversal
13
14 walker := &variablesWalker{
15 Callback: func(t hcl.Traversal) {
16 vars = append(vars, t)
17 },
18 }
19
20 Walk(expr, walker)
21
22 return vars
23}
24
25// variablesWalker is a Walker implementation that calls its callback for any
26// root scope traversal found while walking.
27type variablesWalker struct {
28 Callback func(hcl.Traversal)
29 localScopes []map[string]struct{}
30}
31
32func (w *variablesWalker) Enter(n Node) hcl.Diagnostics {
33 switch tn := n.(type) {
34 case *ScopeTraversalExpr:
35 t := tn.Traversal
36
37 // Check if the given root name appears in any of the active
38 // local scopes. We don't want to return local variables here, since
39 // the goal of walking variables is to tell the calling application
40 // which names it needs to populate in the _root_ scope.
41 name := t.RootName()
42 for _, names := range w.localScopes {
43 if _, localized := names[name]; localized {
44 return nil
45 }
46 }
47
48 w.Callback(t)
49 case ChildScope:
50 w.localScopes = append(w.localScopes, tn.LocalNames)
51 }
52 return nil
53}
54
55func (w *variablesWalker) Exit(n Node) hcl.Diagnostics {
56 switch n.(type) {
57 case ChildScope:
58 // pop the latest local scope, assuming that the walker will
59 // behave symmetrically as promised.
60 w.localScopes = w.localScopes[:len(w.localScopes)-1]
61 }
62 return nil
63}
64
65// ChildScope is a synthetic AST node that is visited during a walk to
66// indicate that its descendent will be evaluated in a child scope, which
67// may mask certain variables from the parent scope as locals.
68//
69// ChildScope nodes don't really exist in the AST, but are rather synthesized
70// on the fly during walk. Therefore it doesn't do any good to transform them;
71// instead, transform either parent node that created a scope or the expression
72// that the child scope struct wraps.
73type ChildScope struct {
74 LocalNames map[string]struct{}
75 Expr *Expression // pointer because it can be replaced on walk
76}
77
78func (e ChildScope) walkChildNodes(w internalWalkFunc) {
79 *(e.Expr) = w(*(e.Expr)).(Expression)
80}
81
82// Range returns the range of the expression that the ChildScope is
83// encapsulating. It isn't really very useful to call Range on a ChildScope.
84func (e ChildScope) Range() hcl.Range {
85 return (*e.Expr).Range()
86}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go
new file mode 100644
index 0000000..3405d26
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go
@@ -0,0 +1,77 @@
1package hclsyntax
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// VisitFunc is the callback signature for VisitAll.
8type VisitFunc func(node Node) hcl.Diagnostics
9
10// VisitAll is a basic way to traverse the AST beginning with a particular
11// node. The given function will be called once for each AST node in
12// depth-first order, but no context is provided about the shape of the tree.
13//
14// The VisitFunc may return diagnostics, in which case they will be accumulated
15// and returned as a single set.
16func VisitAll(node Node, f VisitFunc) hcl.Diagnostics {
17 diags := f(node)
18 node.walkChildNodes(func(node Node) Node {
19 diags = append(diags, VisitAll(node, f)...)
20 return node
21 })
22 return diags
23}
24
25// Walker is an interface used with Walk.
26type Walker interface {
27 Enter(node Node) hcl.Diagnostics
28 Exit(node Node) hcl.Diagnostics
29}
30
31// Walk is a more complex way to traverse the AST starting with a particular
32// node, which provides information about the tree structure via separate
33// Enter and Exit functions.
34func Walk(node Node, w Walker) hcl.Diagnostics {
35 diags := w.Enter(node)
36 node.walkChildNodes(func(node Node) Node {
37 diags = append(diags, Walk(node, w)...)
38 return node
39 })
40 return diags
41}
42
43// Transformer is an interface used with Transform
44type Transformer interface {
45 // Transform accepts a node and returns a replacement node along with
46 // a flag for whether to also visit child nodes. If the flag is false,
47 // none of the child nodes will be visited and the TransformExit method
48 // will not be called for the node.
49 //
50 // It is acceptable and appropriate for Transform to return the same node
51 // it was given, for situations where no transform is needed.
52 Transform(node Node) (Node, bool, hcl.Diagnostics)
53
54 // TransformExit signals the end of transformations of child nodes of the
55 // given node. If Transform returned a new node, the given node is the
56 // node that was returned, rather than the node that was originally
57 // encountered.
58 TransformExit(node Node) hcl.Diagnostics
59}
60
61// Transform allows for in-place transformations of an AST starting with a
62// particular node. The provider Transformer implementation drives the
63// transformation process. The return value is the node that replaced the
64// given top-level node.
65func Transform(node Node, t Transformer) (Node, hcl.Diagnostics) {
66 newNode, descend, diags := t.Transform(node)
67 if !descend {
68 return newNode, diags
69 }
70 node.walkChildNodes(func(node Node) Node {
71 newNode, newDiags := Transform(node, t)
72 diags = append(diags, newDiags...)
73 return newNode
74 })
75 diags = append(diags, t.TransformExit(newNode)...)
76 return newNode, diags
77}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/ast.go b/vendor/github.com/hashicorp/hcl2/hcl/json/ast.go
new file mode 100644
index 0000000..753bfa0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/ast.go
@@ -0,0 +1,121 @@
1package json
2
3import (
4 "math/big"
5
6 "github.com/hashicorp/hcl2/hcl"
7)
8
9type node interface {
10 Range() hcl.Range
11 StartRange() hcl.Range
12}
13
14type objectVal struct {
15 Attrs []*objectAttr
16 SrcRange hcl.Range // range of the entire object, brace-to-brace
17 OpenRange hcl.Range // range of the opening brace
18 CloseRange hcl.Range // range of the closing brace
19}
20
21func (n *objectVal) Range() hcl.Range {
22 return n.SrcRange
23}
24
25func (n *objectVal) StartRange() hcl.Range {
26 return n.OpenRange
27}
28
29type objectAttr struct {
30 Name string
31 Value node
32 NameRange hcl.Range // range of the name string
33}
34
35func (n *objectAttr) Range() hcl.Range {
36 return n.NameRange
37}
38
39func (n *objectAttr) StartRange() hcl.Range {
40 return n.NameRange
41}
42
43type arrayVal struct {
44 Values []node
45 SrcRange hcl.Range // range of the entire object, bracket-to-bracket
46 OpenRange hcl.Range // range of the opening bracket
47}
48
49func (n *arrayVal) Range() hcl.Range {
50 return n.SrcRange
51}
52
53func (n *arrayVal) StartRange() hcl.Range {
54 return n.OpenRange
55}
56
57type booleanVal struct {
58 Value bool
59 SrcRange hcl.Range
60}
61
62func (n *booleanVal) Range() hcl.Range {
63 return n.SrcRange
64}
65
66func (n *booleanVal) StartRange() hcl.Range {
67 return n.SrcRange
68}
69
70type numberVal struct {
71 Value *big.Float
72 SrcRange hcl.Range
73}
74
75func (n *numberVal) Range() hcl.Range {
76 return n.SrcRange
77}
78
79func (n *numberVal) StartRange() hcl.Range {
80 return n.SrcRange
81}
82
83type stringVal struct {
84 Value string
85 SrcRange hcl.Range
86}
87
88func (n *stringVal) Range() hcl.Range {
89 return n.SrcRange
90}
91
92func (n *stringVal) StartRange() hcl.Range {
93 return n.SrcRange
94}
95
96type nullVal struct {
97 SrcRange hcl.Range
98}
99
100func (n *nullVal) Range() hcl.Range {
101 return n.SrcRange
102}
103
104func (n *nullVal) StartRange() hcl.Range {
105 return n.SrcRange
106}
107
108// invalidVal is used as a placeholder where a value is needed for a valid
109// parse tree but the input was invalid enough to prevent one from being
110// created.
111type invalidVal struct {
112 SrcRange hcl.Range
113}
114
115func (n invalidVal) Range() hcl.Range {
116 return n.SrcRange
117}
118
119func (n invalidVal) StartRange() hcl.Range {
120 return n.SrcRange
121}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/didyoumean.go b/vendor/github.com/hashicorp/hcl2/hcl/json/didyoumean.go
new file mode 100644
index 0000000..fbdd8bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/didyoumean.go
@@ -0,0 +1,33 @@
1package json
2
3import (
4 "github.com/agext/levenshtein"
5)
6
7var keywords = []string{"false", "true", "null"}
8
9// keywordSuggestion tries to find a valid JSON keyword that is close to the
10// given string and returns it if found. If no keyword is close enough, returns
11// the empty string.
12func keywordSuggestion(given string) string {
13 return nameSuggestion(given, keywords)
14}
15
16// nameSuggestion tries to find a name from the given slice of suggested names
17// that is close to the given name and returns it if found. If no suggestion
18// is close enough, returns the empty string.
19//
20// The suggestions are tried in order, so earlier suggestions take precedence
21// if the given string is similar to two or more suggestions.
22//
23// This function is intended to be used with a relatively-small number of
24// suggestions. It's not optimized for hundreds or thousands of them.
25func nameSuggestion(given string, suggestions []string) string {
26 for _, suggestion := range suggestions {
27 dist := levenshtein.Distance(given, suggestion, nil)
28 if dist < 3 { // threshold determined experimentally
29 return suggestion
30 }
31 }
32 return ""
33}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/doc.go b/vendor/github.com/hashicorp/hcl2/hcl/json/doc.go
new file mode 100644
index 0000000..4943f9b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/doc.go
@@ -0,0 +1,8 @@
1// Package json is the JSON parser for HCL. It parses JSON files and returns
2// implementations of the core HCL structural interfaces in terms of the
3// JSON data inside.
4//
5// This is not a generic JSON parser. Instead, it deals with the mapping from
6// the JSON information model to the HCL information model, using a number
7// of hard-coded structural conventions.
8package json
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/navigation.go b/vendor/github.com/hashicorp/hcl2/hcl/json/navigation.go
new file mode 100644
index 0000000..bc8a97f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/navigation.go
@@ -0,0 +1,70 @@
1package json
2
3import (
4 "fmt"
5 "strings"
6)
7
8type navigation struct {
9 root node
10}
11
12// Implementation of hcled.ContextString
13func (n navigation) ContextString(offset int) string {
14 steps := navigationStepsRev(n.root, offset)
15 if steps == nil {
16 return ""
17 }
18
19 // We built our slice backwards, so we'll reverse it in-place now.
20 half := len(steps) / 2 // integer division
21 for i := 0; i < half; i++ {
22 steps[i], steps[len(steps)-1-i] = steps[len(steps)-1-i], steps[i]
23 }
24
25 ret := strings.Join(steps, "")
26 if len(ret) > 0 && ret[0] == '.' {
27 ret = ret[1:]
28 }
29 return ret
30}
31
32func navigationStepsRev(v node, offset int) []string {
33 switch tv := v.(type) {
34 case *objectVal:
35 // Do any of our properties have an object that contains the target
36 // offset?
37 for _, attr := range tv.Attrs {
38 k := attr.Name
39 av := attr.Value
40
41 switch av.(type) {
42 case *objectVal, *arrayVal:
43 // okay
44 default:
45 continue
46 }
47
48 if av.Range().ContainsOffset(offset) {
49 return append(navigationStepsRev(av, offset), "."+k)
50 }
51 }
52 case *arrayVal:
53 // Do any of our elements contain the target offset?
54 for i, elem := range tv.Values {
55
56 switch elem.(type) {
57 case *objectVal, *arrayVal:
58 // okay
59 default:
60 continue
61 }
62
63 if elem.Range().ContainsOffset(offset) {
64 return append(navigationStepsRev(elem, offset), fmt.Sprintf("[%d]", i))
65 }
66 }
67 }
68
69 return nil
70}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go
new file mode 100644
index 0000000..246fd1c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go
@@ -0,0 +1,491 @@
1package json
2
3import (
4 "encoding/json"
5 "fmt"
6 "math/big"
7
8 "github.com/hashicorp/hcl2/hcl"
9)
10
11func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) {
12 tokens := scan(buf, pos{
13 Filename: filename,
14 Pos: hcl.Pos{
15 Byte: 0,
16 Line: 1,
17 Column: 1,
18 },
19 })
20 p := newPeeker(tokens)
21 node, diags := parseValue(p)
22 if len(diags) == 0 && p.Peek().Type != tokenEOF {
23 diags = diags.Append(&hcl.Diagnostic{
24 Severity: hcl.DiagError,
25 Summary: "Extraneous data after value",
26 Detail: "Extra characters appear after the JSON value.",
27 Subject: p.Peek().Range.Ptr(),
28 })
29 }
30 return node, diags
31}
32
33func parseValue(p *peeker) (node, hcl.Diagnostics) {
34 tok := p.Peek()
35
36 wrapInvalid := func(n node, diags hcl.Diagnostics) (node, hcl.Diagnostics) {
37 if n != nil {
38 return n, diags
39 }
40 return invalidVal{tok.Range}, diags
41 }
42
43 switch tok.Type {
44 case tokenBraceO:
45 return wrapInvalid(parseObject(p))
46 case tokenBrackO:
47 return wrapInvalid(parseArray(p))
48 case tokenNumber:
49 return wrapInvalid(parseNumber(p))
50 case tokenString:
51 return wrapInvalid(parseString(p))
52 case tokenKeyword:
53 return wrapInvalid(parseKeyword(p))
54 case tokenBraceC:
55 return wrapInvalid(nil, hcl.Diagnostics{
56 {
57 Severity: hcl.DiagError,
58 Summary: "Missing attribute value",
59 Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.",
60 Subject: &tok.Range,
61 },
62 })
63 case tokenBrackC:
64 return wrapInvalid(nil, hcl.Diagnostics{
65 {
66 Severity: hcl.DiagError,
67 Summary: "Missing array element value",
68 Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.",
69 Subject: &tok.Range,
70 },
71 })
72 case tokenEOF:
73 return wrapInvalid(nil, hcl.Diagnostics{
74 {
75 Severity: hcl.DiagError,
76 Summary: "Missing value",
77 Detail: "The JSON data ends prematurely.",
78 Subject: &tok.Range,
79 },
80 })
81 default:
82 return wrapInvalid(nil, hcl.Diagnostics{
83 {
84 Severity: hcl.DiagError,
85 Summary: "Invalid start of value",
86 Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.",
87 Subject: &tok.Range,
88 },
89 })
90 }
91}
92
93func tokenCanStartValue(tok token) bool {
94 switch tok.Type {
95 case tokenBraceO, tokenBrackO, tokenNumber, tokenString, tokenKeyword:
96 return true
97 default:
98 return false
99 }
100}
101
102func parseObject(p *peeker) (node, hcl.Diagnostics) {
103 var diags hcl.Diagnostics
104
105 open := p.Read()
106 attrs := []*objectAttr{}
107
108 // recover is used to shift the peeker to what seems to be the end of
109 // our object, so that when we encounter an error we leave the peeker
110 // at a reasonable point in the token stream to continue parsing.
111 recover := func(tok token) {
112 open := 1
113 for {
114 switch tok.Type {
115 case tokenBraceO:
116 open++
117 case tokenBraceC:
118 open--
119 if open <= 1 {
120 return
121 }
122 case tokenEOF:
123 // Ran out of source before we were able to recover,
124 // so we'll bail here and let the caller deal with it.
125 return
126 }
127 tok = p.Read()
128 }
129 }
130
131Token:
132 for {
133 if p.Peek().Type == tokenBraceC {
134 break Token
135 }
136
137 keyNode, keyDiags := parseValue(p)
138 diags = diags.Extend(keyDiags)
139 if keyNode == nil {
140 return nil, diags
141 }
142
143 keyStrNode, ok := keyNode.(*stringVal)
144 if !ok {
145 return nil, diags.Append(&hcl.Diagnostic{
146 Severity: hcl.DiagError,
147 Summary: "Invalid object attribute name",
148 Detail: "A JSON object attribute name must be a string",
149 Subject: keyNode.StartRange().Ptr(),
150 })
151 }
152
153 key := keyStrNode.Value
154
155 colon := p.Read()
156 if colon.Type != tokenColon {
157 recover(colon)
158
159 if colon.Type == tokenBraceC || colon.Type == tokenComma {
160 // Catch common mistake of using braces instead of brackets
161 // for an object.
162 return nil, diags.Append(&hcl.Diagnostic{
163 Severity: hcl.DiagError,
164 Summary: "Missing object value",
165 Detail: "A JSON object attribute must have a value, introduced by a colon.",
166 Subject: &colon.Range,
167 })
168 }
169
170 if colon.Type == tokenEquals {
171 // Possible confusion with native HCL syntax.
172 return nil, diags.Append(&hcl.Diagnostic{
173 Severity: hcl.DiagError,
174 Summary: "Missing attribute value colon",
175 Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.",
176 Subject: &colon.Range,
177 })
178 }
179
180 return nil, diags.Append(&hcl.Diagnostic{
181 Severity: hcl.DiagError,
182 Summary: "Missing attribute value colon",
183 Detail: "A colon must appear between an object attribute's name and its value.",
184 Subject: &colon.Range,
185 })
186 }
187
188 valNode, valDiags := parseValue(p)
189 diags = diags.Extend(valDiags)
190 if valNode == nil {
191 return nil, diags
192 }
193
194 attrs = append(attrs, &objectAttr{
195 Name: key,
196 Value: valNode,
197 NameRange: keyStrNode.SrcRange,
198 })
199
200 switch p.Peek().Type {
201 case tokenComma:
202 comma := p.Read()
203 if p.Peek().Type == tokenBraceC {
204 // Special error message for this common mistake
205 return nil, diags.Append(&hcl.Diagnostic{
206 Severity: hcl.DiagError,
207 Summary: "Trailing comma in object",
208 Detail: "JSON does not permit a trailing comma after the final attribute in an object.",
209 Subject: &comma.Range,
210 })
211 }
212 continue Token
213 case tokenEOF:
214 return nil, diags.Append(&hcl.Diagnostic{
215 Severity: hcl.DiagError,
216 Summary: "Unclosed object",
217 Detail: "No closing brace was found for this JSON object.",
218 Subject: &open.Range,
219 })
220 case tokenBrackC:
221 // Consume the bracket anyway, so that we don't return with the peeker
222 // at a strange place.
223 p.Read()
224 return nil, diags.Append(&hcl.Diagnostic{
225 Severity: hcl.DiagError,
226 Summary: "Mismatched braces",
227 Detail: "A JSON object must be closed with a brace, not a bracket.",
228 Subject: p.Peek().Range.Ptr(),
229 })
230 case tokenBraceC:
231 break Token
232 default:
233 recover(p.Read())
234 return nil, diags.Append(&hcl.Diagnostic{
235 Severity: hcl.DiagError,
236 Summary: "Missing attribute seperator comma",
237 Detail: "A comma must appear between each attribute declaration in an object.",
238 Subject: p.Peek().Range.Ptr(),
239 })
240 }
241
242 }
243
244 close := p.Read()
245 return &objectVal{
246 Attrs: attrs,
247 SrcRange: hcl.RangeBetween(open.Range, close.Range),
248 OpenRange: open.Range,
249 CloseRange: close.Range,
250 }, diags
251}
252
253func parseArray(p *peeker) (node, hcl.Diagnostics) {
254 var diags hcl.Diagnostics
255
256 open := p.Read()
257 vals := []node{}
258
259 // recover is used to shift the peeker to what seems to be the end of
260 // our array, so that when we encounter an error we leave the peeker
261 // at a reasonable point in the token stream to continue parsing.
262 recover := func(tok token) {
263 open := 1
264 for {
265 switch tok.Type {
266 case tokenBrackO:
267 open++
268 case tokenBrackC:
269 open--
270 if open <= 1 {
271 return
272 }
273 case tokenEOF:
274 // Ran out of source before we were able to recover,
275 // so we'll bail here and let the caller deal with it.
276 return
277 }
278 tok = p.Read()
279 }
280 }
281
282Token:
283 for {
284 if p.Peek().Type == tokenBrackC {
285 break Token
286 }
287
288 valNode, valDiags := parseValue(p)
289 diags = diags.Extend(valDiags)
290 if valNode == nil {
291 return nil, diags
292 }
293
294 vals = append(vals, valNode)
295
296 switch p.Peek().Type {
297 case tokenComma:
298 comma := p.Read()
299 if p.Peek().Type == tokenBrackC {
300 // Special error message for this common mistake
301 return nil, diags.Append(&hcl.Diagnostic{
302 Severity: hcl.DiagError,
303 Summary: "Trailing comma in array",
304 Detail: "JSON does not permit a trailing comma after the final attribute in an array.",
305 Subject: &comma.Range,
306 })
307 }
308 continue Token
309 case tokenColon:
310 recover(p.Read())
311 return nil, diags.Append(&hcl.Diagnostic{
312 Severity: hcl.DiagError,
313 Summary: "Invalid array value",
314 Detail: "A colon is not used to introduce values in a JSON array.",
315 Subject: p.Peek().Range.Ptr(),
316 })
317 case tokenEOF:
318 recover(p.Read())
319 return nil, diags.Append(&hcl.Diagnostic{
320 Severity: hcl.DiagError,
321 Summary: "Unclosed object",
322 Detail: "No closing bracket was found for this JSON array.",
323 Subject: &open.Range,
324 })
325 case tokenBraceC:
326 recover(p.Read())
327 return nil, diags.Append(&hcl.Diagnostic{
328 Severity: hcl.DiagError,
329 Summary: "Mismatched brackets",
330 Detail: "A JSON array must be closed with a bracket, not a brace.",
331 Subject: p.Peek().Range.Ptr(),
332 })
333 case tokenBrackC:
334 break Token
335 default:
336 recover(p.Read())
337 return nil, diags.Append(&hcl.Diagnostic{
338 Severity: hcl.DiagError,
339 Summary: "Missing attribute seperator comma",
340 Detail: "A comma must appear between each value in an array.",
341 Subject: p.Peek().Range.Ptr(),
342 })
343 }
344
345 }
346
347 close := p.Read()
348 return &arrayVal{
349 Values: vals,
350 SrcRange: hcl.RangeBetween(open.Range, close.Range),
351 OpenRange: open.Range,
352 }, diags
353}
354
355func parseNumber(p *peeker) (node, hcl.Diagnostics) {
356 tok := p.Read()
357
358 // Use encoding/json to validate the number syntax.
359 // TODO: Do this more directly to produce better diagnostics.
360 var num json.Number
361 err := json.Unmarshal(tok.Bytes, &num)
362 if err != nil {
363 return nil, hcl.Diagnostics{
364 {
365 Severity: hcl.DiagError,
366 Summary: "Invalid JSON number",
367 Detail: fmt.Sprintf("There is a syntax error in the given JSON number."),
368 Subject: &tok.Range,
369 },
370 }
371 }
372
373 f, _, err := big.ParseFloat(string(num), 10, 512, big.ToNearestEven)
374 if err != nil {
375 // Should never happen if above passed, since JSON numbers are a subset
376 // of what big.Float can parse...
377 return nil, hcl.Diagnostics{
378 {
379 Severity: hcl.DiagError,
380 Summary: "Invalid JSON number",
381 Detail: fmt.Sprintf("There is a syntax error in the given JSON number."),
382 Subject: &tok.Range,
383 },
384 }
385 }
386
387 return &numberVal{
388 Value: f,
389 SrcRange: tok.Range,
390 }, nil
391}
392
393func parseString(p *peeker) (node, hcl.Diagnostics) {
394 tok := p.Read()
395 var str string
396 err := json.Unmarshal(tok.Bytes, &str)
397
398 if err != nil {
399 var errRange hcl.Range
400 if serr, ok := err.(*json.SyntaxError); ok {
401 errOfs := serr.Offset
402 errPos := tok.Range.Start
403 errPos.Byte += int(errOfs)
404
405 // TODO: Use the byte offset to properly count unicode
406 // characters for the column, and mark the whole of the
407 // character that was wrong as part of our range.
408 errPos.Column += int(errOfs)
409
410 errEndPos := errPos
411 errEndPos.Byte++
412 errEndPos.Column++
413
414 errRange = hcl.Range{
415 Filename: tok.Range.Filename,
416 Start: errPos,
417 End: errEndPos,
418 }
419 } else {
420 errRange = tok.Range
421 }
422
423 var contextRange *hcl.Range
424 if errRange != tok.Range {
425 contextRange = &tok.Range
426 }
427
428 // FIXME: Eventually we should parse strings directly here so
429 // we can produce a more useful error message in the face fo things
430 // such as invalid escapes, etc.
431 return nil, hcl.Diagnostics{
432 {
433 Severity: hcl.DiagError,
434 Summary: "Invalid JSON string",
435 Detail: fmt.Sprintf("There is a syntax error in the given JSON string."),
436 Subject: &errRange,
437 Context: contextRange,
438 },
439 }
440 }
441
442 return &stringVal{
443 Value: str,
444 SrcRange: tok.Range,
445 }, nil
446}
447
448func parseKeyword(p *peeker) (node, hcl.Diagnostics) {
449 tok := p.Read()
450 s := string(tok.Bytes)
451
452 switch s {
453 case "true":
454 return &booleanVal{
455 Value: true,
456 SrcRange: tok.Range,
457 }, nil
458 case "false":
459 return &booleanVal{
460 Value: false,
461 SrcRange: tok.Range,
462 }, nil
463 case "null":
464 return &nullVal{
465 SrcRange: tok.Range,
466 }, nil
467 case "undefined", "NaN", "Infinity":
468 return nil, hcl.Diagnostics{
469 {
470 Severity: hcl.DiagError,
471 Summary: "Invalid JSON keyword",
472 Detail: fmt.Sprintf("The JavaScript identifier %q cannot be used in JSON.", s),
473 Subject: &tok.Range,
474 },
475 }
476 default:
477 var dym string
478 if suggest := keywordSuggestion(s); suggest != "" {
479 dym = fmt.Sprintf(" Did you mean %q?", suggest)
480 }
481
482 return nil, hcl.Diagnostics{
483 {
484 Severity: hcl.DiagError,
485 Summary: "Invalid JSON keyword",
486 Detail: fmt.Sprintf("%q is not a valid JSON keyword.%s", s, dym),
487 Subject: &tok.Range,
488 },
489 }
490 }
491}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go b/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go
new file mode 100644
index 0000000..fc7bbf5
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go
@@ -0,0 +1,25 @@
1package json
2
3type peeker struct {
4 tokens []token
5 pos int
6}
7
8func newPeeker(tokens []token) *peeker {
9 return &peeker{
10 tokens: tokens,
11 pos: 0,
12 }
13}
14
15func (p *peeker) Peek() token {
16 return p.tokens[p.pos]
17}
18
19func (p *peeker) Read() token {
20 ret := p.tokens[p.pos]
21 if ret.Type != tokenEOF {
22 p.pos++
23 }
24 return ret
25}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/public.go b/vendor/github.com/hashicorp/hcl2/hcl/json/public.go
new file mode 100644
index 0000000..2728aa1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/public.go
@@ -0,0 +1,94 @@
1package json
2
3import (
4 "fmt"
5 "io/ioutil"
6 "os"
7
8 "github.com/hashicorp/hcl2/hcl"
9)
10
11// Parse attempts to parse the given buffer as JSON and, if successful, returns
12// a hcl.File for the HCL configuration represented by it.
13//
14// This is not a generic JSON parser. Instead, it deals only with the profile
15// of JSON used to express HCL configuration.
16//
17// The returned file is valid only if the returned diagnostics returns false
18// from its HasErrors method. If HasErrors returns true, the file represents
19// the subset of data that was able to be parsed, which may be none.
20func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) {
21 rootNode, diags := parseFileContent(src, filename)
22
23 switch rootNode.(type) {
24 case *objectVal, *arrayVal:
25 // okay
26 default:
27 diags = diags.Append(&hcl.Diagnostic{
28 Severity: hcl.DiagError,
29 Summary: "Root value must be object",
30 Detail: "The root value in a JSON-based configuration must be either a JSON object or a JSON array of objects.",
31 Subject: rootNode.StartRange().Ptr(),
32 })
33
34 // Since we've already produced an error message for this being
35 // invalid, we'll return an empty placeholder here so that trying to
36 // extract content from our root body won't produce a redundant
37 // error saying the same thing again in more general terms.
38 fakePos := hcl.Pos{
39 Byte: 0,
40 Line: 1,
41 Column: 1,
42 }
43 fakeRange := hcl.Range{
44 Filename: filename,
45 Start: fakePos,
46 End: fakePos,
47 }
48 rootNode = &objectVal{
49 Attrs: []*objectAttr{},
50 SrcRange: fakeRange,
51 OpenRange: fakeRange,
52 }
53 }
54
55 file := &hcl.File{
56 Body: &body{
57 val: rootNode,
58 },
59 Bytes: src,
60 Nav: navigation{rootNode},
61 }
62 return file, diags
63}
64
65// ParseFile is a convenience wrapper around Parse that first attempts to load
66// data from the given filename, passing the result to Parse if successful.
67//
68// If the file cannot be read, an error diagnostic with nil context is returned.
69func ParseFile(filename string) (*hcl.File, hcl.Diagnostics) {
70 f, err := os.Open(filename)
71 if err != nil {
72 return nil, hcl.Diagnostics{
73 {
74 Severity: hcl.DiagError,
75 Summary: "Failed to open file",
76 Detail: fmt.Sprintf("The file %q could not be opened.", filename),
77 },
78 }
79 }
80 defer f.Close()
81
82 src, err := ioutil.ReadAll(f)
83 if err != nil {
84 return nil, hcl.Diagnostics{
85 {
86 Severity: hcl.DiagError,
87 Summary: "Failed to read file",
88 Detail: fmt.Sprintf("The file %q was opened, but an error occured while reading it.", filename),
89 },
90 }
91 }
92
93 return Parse(src, filename)
94}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go b/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go
new file mode 100644
index 0000000..0a8378b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go
@@ -0,0 +1,293 @@
1package json
2
3import (
4 "fmt"
5
6 "github.com/apparentlymart/go-textseg/textseg"
7 "github.com/hashicorp/hcl2/hcl"
8)
9
10//go:generate stringer -type tokenType scanner.go
11type tokenType rune
12
13const (
14 tokenBraceO tokenType = '{'
15 tokenBraceC tokenType = '}'
16 tokenBrackO tokenType = '['
17 tokenBrackC tokenType = ']'
18 tokenComma tokenType = ','
19 tokenColon tokenType = ':'
20 tokenKeyword tokenType = 'K'
21 tokenString tokenType = 'S'
22 tokenNumber tokenType = 'N'
23 tokenEOF tokenType = '␄'
24 tokenInvalid tokenType = 0
25 tokenEquals tokenType = '=' // used only for reminding the user of JSON syntax
26)
27
28type token struct {
29 Type tokenType
30 Bytes []byte
31 Range hcl.Range
32}
33
34// scan returns the primary tokens for the given JSON buffer in sequence.
35//
36// The responsibility of this pass is to just mark the slices of the buffer
37// as being of various types. It is lax in how it interprets the multi-byte
38// token types keyword, string and number, preferring to capture erroneous
39// extra bytes that we presume the user intended to be part of the token
40// so that we can generate more helpful diagnostics in the parser.
41func scan(buf []byte, start pos) []token {
42 var tokens []token
43 p := start
44 for {
45 if len(buf) == 0 {
46 tokens = append(tokens, token{
47 Type: tokenEOF,
48 Bytes: nil,
49 Range: posRange(p, p),
50 })
51 return tokens
52 }
53
54 buf, p = skipWhitespace(buf, p)
55
56 if len(buf) == 0 {
57 tokens = append(tokens, token{
58 Type: tokenEOF,
59 Bytes: nil,
60 Range: posRange(p, p),
61 })
62 return tokens
63 }
64
65 start = p
66
67 first := buf[0]
68 switch {
69 case first == '{' || first == '}' || first == '[' || first == ']' || first == ',' || first == ':' || first == '=':
70 p.Pos.Column++
71 p.Pos.Byte++
72 tokens = append(tokens, token{
73 Type: tokenType(first),
74 Bytes: buf[0:1],
75 Range: posRange(start, p),
76 })
77 buf = buf[1:]
78 case first == '"':
79 var tokBuf []byte
80 tokBuf, buf, p = scanString(buf, p)
81 tokens = append(tokens, token{
82 Type: tokenString,
83 Bytes: tokBuf,
84 Range: posRange(start, p),
85 })
86 case byteCanStartNumber(first):
87 var tokBuf []byte
88 tokBuf, buf, p = scanNumber(buf, p)
89 tokens = append(tokens, token{
90 Type: tokenNumber,
91 Bytes: tokBuf,
92 Range: posRange(start, p),
93 })
94 case byteCanStartKeyword(first):
95 var tokBuf []byte
96 tokBuf, buf, p = scanKeyword(buf, p)
97 tokens = append(tokens, token{
98 Type: tokenKeyword,
99 Bytes: tokBuf,
100 Range: posRange(start, p),
101 })
102 default:
103 tokens = append(tokens, token{
104 Type: tokenInvalid,
105 Bytes: buf[:1],
106 Range: start.Range(1, 1),
107 })
108 // If we've encountered an invalid then we might as well stop
109 // scanning since the parser won't proceed beyond this point.
110 return tokens
111 }
112 }
113}
114
115func byteCanStartNumber(b byte) bool {
116 switch b {
117 // We are slightly more tolerant than JSON requires here since we
118 // expect the parser will make a stricter interpretation of the
119 // number bytes, but we specifically don't allow 'e' or 'E' here
120 // since we want the scanner to treat that as the start of an
121 // invalid keyword instead, to produce more intelligible error messages.
122 case '-', '+', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
123 return true
124 default:
125 return false
126 }
127}
128
129func scanNumber(buf []byte, start pos) ([]byte, []byte, pos) {
130 // The scanner doesn't check that the sequence of digit-ish bytes is
131 // in a valid order. The parser must do this when decoding a number
132 // token.
133 var i int
134 p := start
135Byte:
136 for i = 0; i < len(buf); i++ {
137 switch buf[i] {
138 case '-', '+', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
139 p.Pos.Byte++
140 p.Pos.Column++
141 default:
142 break Byte
143 }
144 }
145 return buf[:i], buf[i:], p
146}
147
148func byteCanStartKeyword(b byte) bool {
149 switch {
150 // We allow any sequence of alphabetical characters here, even though
151 // JSON is more constrained, so that we can collect what we presume
152 // the user intended to be a single keyword and then check its validity
153 // in the parser, where we can generate better diagnostics.
154 // So e.g. we want to be able to say:
155 // unrecognized keyword "True". Did you mean "true"?
156 case b >= 'a' || b <= 'z' || b >= 'A' || b <= 'Z':
157 return true
158 default:
159 return false
160 }
161}
162
163func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) {
164 var i int
165 p := start
166Byte:
167 for i = 0; i < len(buf); i++ {
168 b := buf[i]
169 switch {
170 case (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_':
171 p.Pos.Byte++
172 p.Pos.Column++
173 default:
174 break Byte
175 }
176 }
177 return buf[:i], buf[i:], p
178}
179
180func scanString(buf []byte, start pos) ([]byte, []byte, pos) {
181 // The scanner doesn't validate correct use of escapes, etc. It pays
182 // attention to escapes only for the purpose of identifying the closing
183 // quote character. It's the parser's responsibility to do proper
184 // validation.
185 //
186 // The scanner also doesn't specifically detect unterminated string
187 // literals, though they can be identified in the parser by checking if
188 // the final byte in a string token is the double-quote character.
189
190 // Skip the opening quote symbol
191 i := 1
192 p := start
193 p.Pos.Byte++
194 p.Pos.Column++
195 escaping := false
196Byte:
197 for i < len(buf) {
198 b := buf[i]
199
200 switch {
201 case b == '\\':
202 escaping = !escaping
203 p.Pos.Byte++
204 p.Pos.Column++
205 i++
206 case b == '"':
207 p.Pos.Byte++
208 p.Pos.Column++
209 i++
210 if !escaping {
211 break Byte
212 }
213 escaping = false
214 case b < 32:
215 break Byte
216 default:
217 // Advance by one grapheme cluster, so that we consider each
218 // grapheme to be a "column".
219 // Ignoring error because this scanner cannot produce errors.
220 advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true)
221
222 p.Pos.Byte += advance
223 p.Pos.Column++
224 i += advance
225
226 escaping = false
227 }
228 }
229 return buf[:i], buf[i:], p
230}
231
232func skipWhitespace(buf []byte, start pos) ([]byte, pos) {
233 var i int
234 p := start
235Byte:
236 for i = 0; i < len(buf); i++ {
237 switch buf[i] {
238 case ' ':
239 p.Pos.Byte++
240 p.Pos.Column++
241 case '\n':
242 p.Pos.Byte++
243 p.Pos.Column = 1
244 p.Pos.Line++
245 case '\r':
246 // For the purpose of line/column counting we consider a
247 // carriage return to take up no space, assuming that it will
248 // be paired up with a newline (on Windows, for example) that
249 // will account for both of them.
250 p.Pos.Byte++
251 case '\t':
252 // We arbitrarily count a tab as if it were two spaces, because
253 // we need to choose _some_ number here. This means any system
254 // that renders code on-screen with markers must itself treat
255 // tabs as a pair of spaces for rendering purposes, or instead
256 // use the byte offset and back into its own column position.
257 p.Pos.Byte++
258 p.Pos.Column += 2
259 default:
260 break Byte
261 }
262 }
263 return buf[i:], p
264}
265
266type pos struct {
267 Filename string
268 Pos hcl.Pos
269}
270
271func (p *pos) Range(byteLen, charLen int) hcl.Range {
272 start := p.Pos
273 end := p.Pos
274 end.Byte += byteLen
275 end.Column += charLen
276 return hcl.Range{
277 Filename: p.Filename,
278 Start: start,
279 End: end,
280 }
281}
282
283func posRange(start, end pos) hcl.Range {
284 return hcl.Range{
285 Filename: start.Filename,
286 Start: start.Pos,
287 End: end.Pos,
288 }
289}
290
291func (t token) GoString() string {
292 return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range)
293}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md
new file mode 100644
index 0000000..9b33c7f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md
@@ -0,0 +1,405 @@
1# HCL JSON Syntax Specification
2
3This is the specification for the JSON serialization for hcl. HCL is a system
4for defining configuration languages for applications. The HCL information
5model is designed to support multiple concrete syntaxes for configuration,
6and this JSON-based format complements [the native syntax](../hclsyntax/spec.md)
7by being easy to machine-generate, whereas the native syntax is oriented
8towards human authoring and maintenence.
9
10This syntax is defined in terms of JSON as defined in
11[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON
12grammar as-is, and merely defines a specific methodology for interpreting
13JSON constructs into HCL structural elements and expressions.
14
15This mapping is defined such that valid JSON-serialized HCL input can be
16_produced_ using standard JSON implementations in various programming languages.
17_Parsing_ such JSON has some additional constraints not beyond what is normally
18supported by JSON parsers, so a specialized parser may be required that
19is able to:
20
21* Preserve the relative ordering of properties defined in an object.
22* Preserve multiple definitions of the same property name.
23* Preserve numeric values to the precision required by the number type
24 in [the HCL syntax-agnostic information model](../spec.md).
25* Retain source location information for parsed tokens/constructs in order
26 to produce good error messages.
27
28## Structural Elements
29
30[The HCL syntax-agnostic information model](../spec.md) defines a _body_ as an
31abstract container for attribute definitions and child blocks. A body is
32represented in JSON as either a single JSON object or a JSON array of objects.
33
34Body processing is in terms of JSON object properties, visited in the order
35they appear in the input. Where a body is represented by a single JSON object,
36the properties of that object are visited in order. Where a body is
37represented by a JSON array, each of its elements are visited in order and
38each element has its properties visited in order. If any element of the array
39is not a JSON object then the input is erroneous.
40
41When a body is being processed in the _dynamic attributes_ mode, the allowance
42of a JSON array in the previous paragraph does not apply and instead a single
43JSON object is always required.
44
45As defined in the language-agnostic model, body processing is in terms
46of a schema which provides context for interpreting the body's content. For
47JSON bodies, the schema is crucial to allow differentiation of attribute
48definitions and block definitions, both of which are represented via object
49properties.
50
51The special property name `"//"`, when used in an object representing a HCL
52body, is parsed and ignored. A property with this name can be used to
53include human-readable comments. (This special property name is _not_
54processed in this way for any _other_ HCL constructs that are represented as
55JSON objects.)
56
57### Attributes
58
59Where the given schema describes an attribute with a given name, the object
60property with the matching name — if present — serves as the attribute's
61definition.
62
63When a body is being processed in the _dynamic attributes_ mode, each object
64property serves as an attribute definition for the attribute whose name
65matches the property name.
66
67The value of an attribute definition property is interpreted as an _expression_,
68as described in a later section.
69
70Given a schema that calls for an attribute named "foo", a JSON object like
71the following provides a definition for that attribute:
72
73```json
74{
75 "foo": "bar baz"
76}
77```
78
79### Blocks
80
81Where the given schema describes a block with a given type name, each object
82property with the matching name serves as a definition of zero or more blocks
83of that type.
84
85Processing of child blocks is in terms of nested JSON objects and arrays.
86If the schema defines one or more _labels_ for the block type, a nested JSON
87object or JSON array of objects is required for each labelling level. These
88are flattened to a single ordered sequence of object properties using the
89same algorithm as for body content as defined above. Each object property
90serves as a label value at the corresponding level.
91
92After any labelling levels, the next nested value is either a JSON object
93representing a single block body, or a JSON array of JSON objects that each
94represent a single block body. Use of an array accommodates the definition
95of multiple blocks that have identical type and labels.
96
97Given a schema that calls for a block type named "foo" with no labels, the
98following JSON objects are all valid definitions of zero or more blocks of this
99type:
100
101```json
102{
103 "foo": {
104 "child_attr": "baz"
105 }
106}
107```
108
109```json
110{
111 "foo": [
112 {
113 "child_attr": "baz"
114 },
115 {
116 "child_attr": "boz"
117 }
118 ]
119}
120```
121```json
122{
123 "foo": []
124}
125```
126
127The first of these defines a single child block of type "foo". The second
128defines _two_ such blocks. The final example shows a degenerate definition
129of zero blocks, though generators should prefer to omit the property entirely
130in this scenario.
131
132Given a schema that calls for a block type named "foo" with _two_ labels, the
133extra label levels must be represented as objects or arrays of objects as in
134the following examples:
135
136```json
137{
138 "foo": {
139 "bar": {
140 "baz": {
141 "child_attr": "baz"
142 },
143 "boz": {
144 "child_attr": "baz"
145 }
146 },
147 "boz": {
148 "baz": {
149 "child_attr": "baz"
150 },
151 }
152 }
153}
154```
155
156```json
157{
158 "foo": {
159 "bar": {
160 "baz": {
161 "child_attr": "baz"
162 },
163 "boz": {
164 "child_attr": "baz"
165 }
166 },
167 "boz": {
168 "baz": [
169 {
170 "child_attr": "baz"
171 },
172 {
173 "child_attr": "boz"
174 }
175 ]
176 }
177 }
178}
179```
180
181```json
182{
183 "foo": [
184 {
185 "bar": {
186 "baz": {
187 "child_attr": "baz"
188 },
189 "boz": {
190 "child_attr": "baz"
191 }
192 },
193 },
194 {
195 "bar": {
196 "baz": [
197 {
198 "child_attr": "baz"
199 },
200 {
201 "child_attr": "boz"
202 }
203 ]
204 }
205 }
206 ]
207}
208```
209
210```json
211{
212 "foo": {
213 "bar": {
214 "baz": {
215 "child_attr": "baz"
216 },
217 "boz": {
218 "child_attr": "baz"
219 }
220 },
221 "bar": {
222 "baz": [
223 {
224 "child_attr": "baz"
225 },
226 {
227 "child_attr": "boz"
228 }
229 ]
230 }
231 }
232}
233```
234
235Arrays can be introduced at either the label definition or block body
236definition levels to define multiple definitions of the same block type
237or labels while preserving order.
238
239A JSON HCL parser _must_ support duplicate definitions of the same property
240name within a single object, preserving all of them and the relative ordering
241between them. The array-based forms are also required so that JSON HCL
242configurations can be produced with JSON producing libraries that are not
243able to preserve property definition order and multiple definitions of
244the same property.
245
246## Expressions
247
248JSON lacks a native expression syntax, so the HCL JSON syntax instead defines
249a mapping for each of the JSON value types, including a special mapping for
250strings that allows optional use of arbitrary expressions.
251
252### Objects
253
254When interpreted as an expression, a JSON object represents a value of a HCL
255object type.
256
257Each property of the JSON object represents an attribute of the HCL object type.
258The property name string given in the JSON input is interpreted as a string
259expression as described below, and its result is converted to string as defined
260by the syntax-agnostic information model. If such a conversion is not possible,
261an error is produced and evaluation fails.
262
263An instance of the constructed object type is then created, whose values
264are interpreted by again recursively applying the mapping rules defined in
265this section to each of the property values.
266
267If any evaluated property name strings produce null values, an error is
268produced and evaluation fails. If any produce _unknown_ values, the _entire
269object's_ result is an unknown value of the dynamic pseudo-type, signalling
270that the type of the object cannot be determined.
271
272It is an error to define the same property name multiple times within a single
273JSON object interpreted as an expression. In full expression mode, this
274constraint applies to the name expression results after conversion to string,
275rather than the raw string that may contain interpolation expressions.
276
277### Arrays
278
279When interpreted as an expression, a JSON array represents a value of a HCL
280tuple type.
281
282Each element of the JSON array represents an element of the HCL tuple type.
283The tuple type is constructed by enumerationg the JSON array elements, creating
284for each an element whose type is the result of recursively applying the
285expression mapping rules. Correspondance is preserved between the array element
286indices and the tuple element indices.
287
288An instance of the constructed tuple type is then created, whose values are
289interpreted by again recursively applying the mapping rules defined in this
290section.
291
292### Numbers
293
294When interpreted as an expression, a JSON number represents a HCL number value.
295
296HCL numbers are arbitrary-precision decimal values, so a JSON HCL parser must
297be able to translate exactly the value given to a number of corresponding
298precision, within the constraints set by the HCL syntax-agnostic information
299model.
300
301In practice, off-the-shelf JSON serializers often do not support customizing the
302processing of numbers, and instead force processing as 32-bit or 64-bit
303floating point values.
304
305A _producer_ of JSON HCL that uses such a serializer can provide numeric values
306as JSON strings where they have precision too great for representation in the
307serializer's chosen numeric type in situations where the result will be
308converted to number (using the standard conversion rules) by a calling
309application.
310
311Alternatively, for expressions that are evaluated in full expression mode an
312embedded template interpolation can be used to faithfully represent a number,
313such as `"${1e150}"`, which will then be evaluated by the underlying HCL native
314syntax expression evaluator.
315
316### Boolean Values
317
318The JSON boolean values `true` and `false`, when interpreted as expressions,
319represent the corresponding HCL boolean values.
320
321### The Null Value
322
323The JSON value `null`, when interpreted as an expression, represents a
324HCL null value of the dynamic pseudo-type.
325
326### Strings
327
328When intepreted as an expression, a JSON string may be interpreted in one of
329two ways depending on the evaluation mode.
330
331If evaluating in literal-only mode (as defined by the syntax-agnostic
332information model) the literal string is intepreted directly as a HCL string
333value, by directly using the exact sequence of unicode characters represented.
334Template interpolations and directives MUST NOT be processed in this mode,
335allowing any characters that appear as introduction sequences to pass through
336literally:
337
338```json
339"Hello world! Template sequences like ${ are not intepreted here."
340```
341
342When evaluating in full expression mode (again, as defined by the syntax-
343agnostic information model) the literal string is instead interpreted as a
344_standalone template_ in the HCL Native Syntax. The expression evaluation
345result is then the direct result of evaluating that template with the current
346variable scope and function table.
347
348```json
349"Hello, ${name}! Template sequences are interpreted in full expression mode."
350```
351
352In particular the _Template Interpolation Unwrapping_ requirement from the
353HCL native syntax specification must be implemented, allowing the use of
354single-interpolation templates to represent expressions that would not
355otherwise be representable in JSON, such as the following example where
356the result must be a number, rather than a string representation of a number:
357
358```json
359"${ a + b }"
360```
361
362## Static Analysis
363
364The HCL static analysis operations are implemented for JSON values that
365represent expressions, as described in the following sections.
366
367Due to the limited expressive power of the JSON syntax alone, use of these
368static analyses functions rather than normal expression evaluation is used
369as additional context for how a JSON value is to be interpreted, which means
370that static analyses can result in a different interpretation of a given
371expression than normal evaluation.
372
373### Static List
374
375An expression interpreted as a static list must be a JSON array. Each of the
376values in the array is interpreted as an expression and returned.
377
378### Static Map
379
380An expression interpreted as a static map must be a JSON object. Each of the
381key/value pairs in the object is presented as a pair of expressions. Since
382object property names are always strings, evaluating the key expression with
383a non-`nil` evaluation context will evaluate any template sequences given
384in the property name.
385
386### Static Call
387
388An expression interpreted as a static call must be a string. The content of
389the string is interpreted as a native syntax expression (not a _template_,
390unlike normal evaluation) and then the static call analysis is delegated to
391that expression.
392
393If the original expression is not a string or its contents cannot be parsed
394as a native syntax expression then static call analysis is not supported.
395
396### Static Traversal
397
398An expression interpreted as a static traversal must be a string. The content
399of the string is interpreted as a native syntax expression (not a _template_,
400unlike normal evaluation) and then static traversal analysis is delegated
401to that expression.
402
403If the original expression is not a string or its contents cannot be parsed
404as a native syntax expression then static call analysis is not supported.
405
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go
new file mode 100644
index 0000000..28dcf52
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go
@@ -0,0 +1,616 @@
1package json
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/hcl2/hcl/hclsyntax"
8 "github.com/zclconf/go-cty/cty"
9 "github.com/zclconf/go-cty/cty/convert"
10)
11
12// body is the implementation of "Body" used for files processed with the JSON
13// parser.
14type body struct {
15 val node
16
17 // If non-nil, the keys of this map cause the corresponding attributes to
18 // be treated as non-existing. This is used when Body.PartialContent is
19 // called, to produce the "remaining content" Body.
20 hiddenAttrs map[string]struct{}
21}
22
23// expression is the implementation of "Expression" used for files processed
24// with the JSON parser.
25type expression struct {
26 src node
27}
28
29func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
30 content, newBody, diags := b.PartialContent(schema)
31
32 hiddenAttrs := newBody.(*body).hiddenAttrs
33
34 var nameSuggestions []string
35 for _, attrS := range schema.Attributes {
36 if _, ok := hiddenAttrs[attrS.Name]; !ok {
37 // Only suggest an attribute name if we didn't use it already.
38 nameSuggestions = append(nameSuggestions, attrS.Name)
39 }
40 }
41 for _, blockS := range schema.Blocks {
42 // Blocks can appear multiple times, so we'll suggest their type
43 // names regardless of whether they've already been used.
44 nameSuggestions = append(nameSuggestions, blockS.Type)
45 }
46
47 jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil)
48 diags = append(diags, attrDiags...)
49
50 for _, attr := range jsonAttrs {
51 k := attr.Name
52 if k == "//" {
53 // Ignore "//" keys in objects representing bodies, to allow
54 // their use as comments.
55 continue
56 }
57
58 if _, ok := hiddenAttrs[k]; !ok {
59 suggestion := nameSuggestion(k, nameSuggestions)
60 if suggestion != "" {
61 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
62 }
63
64 diags = append(diags, &hcl.Diagnostic{
65 Severity: hcl.DiagError,
66 Summary: "Extraneous JSON object property",
67 Detail: fmt.Sprintf("No attribute or block type is named %q.%s", k, suggestion),
68 Subject: &attr.NameRange,
69 Context: attr.Range().Ptr(),
70 })
71 }
72 }
73
74 return content, diags
75}
76
77func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
78 var diags hcl.Diagnostics
79
80 jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil)
81 diags = append(diags, attrDiags...)
82
83 usedNames := map[string]struct{}{}
84 if b.hiddenAttrs != nil {
85 for k := range b.hiddenAttrs {
86 usedNames[k] = struct{}{}
87 }
88 }
89
90 content := &hcl.BodyContent{
91 Attributes: map[string]*hcl.Attribute{},
92 Blocks: nil,
93
94 MissingItemRange: b.MissingItemRange(),
95 }
96
97 // Create some more convenient data structures for our work below.
98 attrSchemas := map[string]hcl.AttributeSchema{}
99 blockSchemas := map[string]hcl.BlockHeaderSchema{}
100 for _, attrS := range schema.Attributes {
101 attrSchemas[attrS.Name] = attrS
102 }
103 for _, blockS := range schema.Blocks {
104 blockSchemas[blockS.Type] = blockS
105 }
106
107 for _, jsonAttr := range jsonAttrs {
108 attrName := jsonAttr.Name
109 if _, used := b.hiddenAttrs[attrName]; used {
110 continue
111 }
112
113 if attrS, defined := attrSchemas[attrName]; defined {
114 if existing, exists := content.Attributes[attrName]; exists {
115 diags = append(diags, &hcl.Diagnostic{
116 Severity: hcl.DiagError,
117 Summary: "Duplicate attribute definition",
118 Detail: fmt.Sprintf("The attribute %q was already defined at %s.", attrName, existing.Range),
119 Subject: &jsonAttr.NameRange,
120 Context: jsonAttr.Range().Ptr(),
121 })
122 continue
123 }
124
125 content.Attributes[attrS.Name] = &hcl.Attribute{
126 Name: attrS.Name,
127 Expr: &expression{src: jsonAttr.Value},
128 Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()),
129 NameRange: jsonAttr.NameRange,
130 }
131 usedNames[attrName] = struct{}{}
132
133 } else if blockS, defined := blockSchemas[attrName]; defined {
134 bv := jsonAttr.Value
135 blockDiags := b.unpackBlock(bv, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks)
136 diags = append(diags, blockDiags...)
137 usedNames[attrName] = struct{}{}
138 }
139
140 // We ignore anything that isn't defined because that's the
141 // PartialContent contract. The Content method will catch leftovers.
142 }
143
144 // Make sure we got all the required attributes.
145 for _, attrS := range schema.Attributes {
146 if !attrS.Required {
147 continue
148 }
149 if _, defined := content.Attributes[attrS.Name]; !defined {
150 diags = append(diags, &hcl.Diagnostic{
151 Severity: hcl.DiagError,
152 Summary: "Missing required attribute",
153 Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name),
154 Subject: b.MissingItemRange().Ptr(),
155 })
156 }
157 }
158
159 unusedBody := &body{
160 val: b.val,
161 hiddenAttrs: usedNames,
162 }
163
164 return content, unusedBody, diags
165}
166
167// JustAttributes for JSON bodies interprets all properties of the wrapped
168// JSON object as attributes and returns them.
169func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
170 var diags hcl.Diagnostics
171 attrs := make(map[string]*hcl.Attribute)
172
173 obj, ok := b.val.(*objectVal)
174 if !ok {
175 diags = append(diags, &hcl.Diagnostic{
176 Severity: hcl.DiagError,
177 Summary: "Incorrect JSON value type",
178 Detail: "A JSON object is required here, defining the attributes for this block.",
179 Subject: b.val.StartRange().Ptr(),
180 })
181 return attrs, diags
182 }
183
184 for _, jsonAttr := range obj.Attrs {
185 name := jsonAttr.Name
186 if name == "//" {
187 // Ignore "//" keys in objects representing bodies, to allow
188 // their use as comments.
189 continue
190 }
191
192 if _, hidden := b.hiddenAttrs[name]; hidden {
193 continue
194 }
195
196 if existing, exists := attrs[name]; exists {
197 diags = append(diags, &hcl.Diagnostic{
198 Severity: hcl.DiagError,
199 Summary: "Duplicate attribute definition",
200 Detail: fmt.Sprintf("The attribute %q was already defined at %s.", name, existing.Range),
201 Subject: &jsonAttr.NameRange,
202 })
203 continue
204 }
205
206 attrs[name] = &hcl.Attribute{
207 Name: name,
208 Expr: &expression{src: jsonAttr.Value},
209 Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()),
210 NameRange: jsonAttr.NameRange,
211 }
212 }
213
214 // No diagnostics possible here, since the parser already took care of
215 // finding duplicates and every JSON value can be a valid attribute value.
216 return attrs, diags
217}
218
219func (b *body) MissingItemRange() hcl.Range {
220 switch tv := b.val.(type) {
221 case *objectVal:
222 return tv.CloseRange
223 case *arrayVal:
224 return tv.OpenRange
225 default:
226 // Should not happen in correct operation, but might show up if the
227 // input is invalid and we are producing partial results.
228 return tv.StartRange()
229 }
230}
231
232func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) {
233 if len(labelsLeft) > 0 {
234 labelName := labelsLeft[0]
235 jsonAttrs, attrDiags := b.collectDeepAttrs(v, &labelName)
236 diags = append(diags, attrDiags...)
237
238 if len(jsonAttrs) == 0 {
239 diags = diags.Append(&hcl.Diagnostic{
240 Severity: hcl.DiagError,
241 Summary: "Missing block label",
242 Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName),
243 Subject: v.StartRange().Ptr(),
244 })
245 return
246 }
247 labelsUsed := append(labelsUsed, "")
248 labelRanges := append(labelRanges, hcl.Range{})
249 for _, p := range jsonAttrs {
250 pk := p.Name
251 labelsUsed[len(labelsUsed)-1] = pk
252 labelRanges[len(labelRanges)-1] = p.NameRange
253 diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...)
254 }
255 return
256 }
257
258 // By the time we get here, we've peeled off all the labels and we're ready
259 // to deal with the block's actual content.
260
261 // need to copy the label slices because their underlying arrays will
262 // continue to be mutated after we return.
263 labels := make([]string, len(labelsUsed))
264 copy(labels, labelsUsed)
265 labelR := make([]hcl.Range, len(labelRanges))
266 copy(labelR, labelRanges)
267
268 switch tv := v.(type) {
269 case *objectVal:
270 // Single instance of the block
271 *blocks = append(*blocks, &hcl.Block{
272 Type: typeName,
273 Labels: labels,
274 Body: &body{
275 val: tv,
276 },
277
278 DefRange: tv.OpenRange,
279 TypeRange: *typeRange,
280 LabelRanges: labelR,
281 })
282 case *arrayVal:
283 // Multiple instances of the block
284 for _, av := range tv.Values {
285 *blocks = append(*blocks, &hcl.Block{
286 Type: typeName,
287 Labels: labels,
288 Body: &body{
289 val: av, // might be mistyped; we'll find out when content is requested for this body
290 },
291
292 DefRange: tv.OpenRange,
293 TypeRange: *typeRange,
294 LabelRanges: labelR,
295 })
296 }
297 default:
298 diags = diags.Append(&hcl.Diagnostic{
299 Severity: hcl.DiagError,
300 Summary: "Incorrect JSON value type",
301 Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName),
302 Subject: v.StartRange().Ptr(),
303 })
304 }
305 return
306}
307
308// collectDeepAttrs takes either a single object or an array of objects and
309// flattens it into a list of object attributes, collecting attributes from
310// all of the objects in a given array.
311//
312// Ordering is preserved, so a list of objects that each have one property
313// will result in those properties being returned in the same order as the
314// objects appeared in the array.
315//
316// This is appropriate for use only for objects representing bodies or labels
317// within a block.
318//
319// The labelName argument, if non-null, is used to tailor returned error
320// messages to refer to block labels rather than attributes and child blocks.
321// It has no other effect.
322func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.Diagnostics) {
323 var diags hcl.Diagnostics
324 var attrs []*objectAttr
325
326 switch tv := v.(type) {
327
328 case *objectVal:
329 attrs = append(attrs, tv.Attrs...)
330
331 case *arrayVal:
332 for _, ev := range tv.Values {
333 switch tev := ev.(type) {
334 case *objectVal:
335 attrs = append(attrs, tev.Attrs...)
336 default:
337 if labelName != nil {
338 diags = append(diags, &hcl.Diagnostic{
339 Severity: hcl.DiagError,
340 Summary: "Incorrect JSON value type",
341 Detail: fmt.Sprintf("A JSON object is required here, to specify %s labels for this block.", *labelName),
342 Subject: ev.StartRange().Ptr(),
343 })
344 } else {
345 diags = append(diags, &hcl.Diagnostic{
346 Severity: hcl.DiagError,
347 Summary: "Incorrect JSON value type",
348 Detail: "A JSON object is required here, to define attributes and child blocks.",
349 Subject: ev.StartRange().Ptr(),
350 })
351 }
352 }
353 }
354
355 default:
356 if labelName != nil {
357 diags = append(diags, &hcl.Diagnostic{
358 Severity: hcl.DiagError,
359 Summary: "Incorrect JSON value type",
360 Detail: fmt.Sprintf("Either a JSON object or JSON array of objects is required here, to specify %s labels for this block.", *labelName),
361 Subject: v.StartRange().Ptr(),
362 })
363 } else {
364 diags = append(diags, &hcl.Diagnostic{
365 Severity: hcl.DiagError,
366 Summary: "Incorrect JSON value type",
367 Detail: "Either a JSON object or JSON array of objects is required here, to define attributes and child blocks.",
368 Subject: v.StartRange().Ptr(),
369 })
370 }
371 }
372
373 return attrs, diags
374}
375
376func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
377 switch v := e.src.(type) {
378 case *stringVal:
379 if ctx != nil {
380 // Parse string contents as a HCL native language expression.
381 // We only do this if we have a context, so passing a nil context
382 // is how the caller specifies that interpolations are not allowed
383 // and that the string should just be returned verbatim.
384 templateSrc := v.Value
385 expr, diags := hclsyntax.ParseTemplate(
386 []byte(templateSrc),
387 v.SrcRange.Filename,
388
389 // This won't produce _exactly_ the right result, since
390 // the hclsyntax parser can't "see" any escapes we removed
391 // while parsing JSON, but it's better than nothing.
392 hcl.Pos{
393 Line: v.SrcRange.Start.Line,
394
395 // skip over the opening quote mark
396 Byte: v.SrcRange.Start.Byte + 1,
397 Column: v.SrcRange.Start.Column + 1,
398 },
399 )
400 if diags.HasErrors() {
401 return cty.DynamicVal, diags
402 }
403 val, evalDiags := expr.Value(ctx)
404 diags = append(diags, evalDiags...)
405 return val, diags
406 }
407
408 return cty.StringVal(v.Value), nil
409 case *numberVal:
410 return cty.NumberVal(v.Value), nil
411 case *booleanVal:
412 return cty.BoolVal(v.Value), nil
413 case *arrayVal:
414 vals := []cty.Value{}
415 for _, jsonVal := range v.Values {
416 val, _ := (&expression{src: jsonVal}).Value(ctx)
417 vals = append(vals, val)
418 }
419 return cty.TupleVal(vals), nil
420 case *objectVal:
421 var diags hcl.Diagnostics
422 attrs := map[string]cty.Value{}
423 attrRanges := map[string]hcl.Range{}
424 known := true
425 for _, jsonAttr := range v.Attrs {
426 // In this one context we allow keys to contain interpolation
427 // experessions too, assuming we're evaluating in interpolation
428 // mode. This achieves parity with the native syntax where
429 // object expressions can have dynamic keys, while block contents
430 // may not.
431 name, nameDiags := (&expression{src: &stringVal{
432 Value: jsonAttr.Name,
433 SrcRange: jsonAttr.NameRange,
434 }}).Value(ctx)
435 val, valDiags := (&expression{src: jsonAttr.Value}).Value(ctx)
436 diags = append(diags, nameDiags...)
437 diags = append(diags, valDiags...)
438
439 var err error
440 name, err = convert.Convert(name, cty.String)
441 if err != nil {
442 diags = append(diags, &hcl.Diagnostic{
443 Severity: hcl.DiagError,
444 Summary: "Invalid object key expression",
445 Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err),
446 Subject: &jsonAttr.NameRange,
447 })
448 continue
449 }
450 if name.IsNull() {
451 diags = append(diags, &hcl.Diagnostic{
452 Severity: hcl.DiagError,
453 Summary: "Invalid object key expression",
454 Detail: "Cannot use null value as an object key.",
455 Subject: &jsonAttr.NameRange,
456 })
457 continue
458 }
459 if !name.IsKnown() {
460 // This is a bit of a weird case, since our usual rules require
461 // us to tolerate unknowns and just represent the result as
462 // best we can but if we don't know the key then we can't
463 // know the type of our object at all, and thus we must turn
464 // the whole thing into cty.DynamicVal. This is consistent with
465 // how this situation is handled in the native syntax.
466 // We'll keep iterating so we can collect other errors in
467 // subsequent attributes.
468 known = false
469 continue
470 }
471 nameStr := name.AsString()
472 if _, defined := attrs[nameStr]; defined {
473 diags = append(diags, &hcl.Diagnostic{
474 Severity: hcl.DiagError,
475 Summary: "Duplicate object attribute",
476 Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]),
477 Subject: &jsonAttr.NameRange,
478 })
479 continue
480 }
481 attrs[nameStr] = val
482 attrRanges[nameStr] = jsonAttr.NameRange
483 }
484 if !known {
485 // We encountered an unknown key somewhere along the way, so
486 // we can't know what our type will eventually be.
487 return cty.DynamicVal, diags
488 }
489 return cty.ObjectVal(attrs), diags
490 default:
491 // Default to DynamicVal so that ASTs containing invalid nodes can
492 // still be partially-evaluated.
493 return cty.DynamicVal, nil
494 }
495}
496
497func (e *expression) Variables() []hcl.Traversal {
498 var vars []hcl.Traversal
499
500 switch v := e.src.(type) {
501 case *stringVal:
502 templateSrc := v.Value
503 expr, diags := hclsyntax.ParseTemplate(
504 []byte(templateSrc),
505 v.SrcRange.Filename,
506
507 // This won't produce _exactly_ the right result, since
508 // the hclsyntax parser can't "see" any escapes we removed
509 // while parsing JSON, but it's better than nothing.
510 hcl.Pos{
511 Line: v.SrcRange.Start.Line,
512
513 // skip over the opening quote mark
514 Byte: v.SrcRange.Start.Byte + 1,
515 Column: v.SrcRange.Start.Column + 1,
516 },
517 )
518 if diags.HasErrors() {
519 return vars
520 }
521 return expr.Variables()
522
523 case *arrayVal:
524 for _, jsonVal := range v.Values {
525 vars = append(vars, (&expression{src: jsonVal}).Variables()...)
526 }
527 case *objectVal:
528 for _, jsonAttr := range v.Attrs {
529 vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...)
530 }
531 }
532
533 return vars
534}
535
536func (e *expression) Range() hcl.Range {
537 return e.src.Range()
538}
539
540func (e *expression) StartRange() hcl.Range {
541 return e.src.StartRange()
542}
543
544// Implementation for hcl.AbsTraversalForExpr.
545func (e *expression) AsTraversal() hcl.Traversal {
546 // In JSON-based syntax a traversal is given as a string containing
547 // traversal syntax as defined by hclsyntax.ParseTraversalAbs.
548
549 switch v := e.src.(type) {
550 case *stringVal:
551 traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start)
552 if diags.HasErrors() {
553 return nil
554 }
555 return traversal
556 default:
557 return nil
558 }
559}
560
561// Implementation for hcl.ExprCall.
562func (e *expression) ExprCall() *hcl.StaticCall {
563 // In JSON-based syntax a static call is given as a string containing
564 // an expression in the native syntax that also supports ExprCall.
565
566 switch v := e.src.(type) {
567 case *stringVal:
568 expr, diags := hclsyntax.ParseExpression([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start)
569 if diags.HasErrors() {
570 return nil
571 }
572
573 call, diags := hcl.ExprCall(expr)
574 if diags.HasErrors() {
575 return nil
576 }
577
578 return call
579 default:
580 return nil
581 }
582}
583
584// Implementation for hcl.ExprList.
585func (e *expression) ExprList() []hcl.Expression {
586 switch v := e.src.(type) {
587 case *arrayVal:
588 ret := make([]hcl.Expression, len(v.Values))
589 for i, node := range v.Values {
590 ret[i] = &expression{src: node}
591 }
592 return ret
593 default:
594 return nil
595 }
596}
597
598// Implementation for hcl.ExprMap.
599func (e *expression) ExprMap() []hcl.KeyValuePair {
600 switch v := e.src.(type) {
601 case *objectVal:
602 ret := make([]hcl.KeyValuePair, len(v.Attrs))
603 for i, jsonAttr := range v.Attrs {
604 ret[i] = hcl.KeyValuePair{
605 Key: &expression{src: &stringVal{
606 Value: jsonAttr.Name,
607 SrcRange: jsonAttr.NameRange,
608 }},
609 Value: &expression{src: jsonAttr.Value},
610 }
611 }
612 return ret
613 default:
614 return nil
615 }
616}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go b/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go
new file mode 100644
index 0000000..bbcce5b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go
@@ -0,0 +1,29 @@
1// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT.
2
3package json
4
5import "strconv"
6
7const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF"
8
9var _tokenType_map = map[tokenType]string{
10 0: _tokenType_name[0:12],
11 44: _tokenType_name[12:22],
12 58: _tokenType_name[22:32],
13 61: _tokenType_name[32:43],
14 75: _tokenType_name[43:55],
15 78: _tokenType_name[55:66],
16 83: _tokenType_name[66:77],
17 91: _tokenType_name[77:88],
18 93: _tokenType_name[88:99],
19 123: _tokenType_name[99:110],
20 125: _tokenType_name[110:121],
21 9220: _tokenType_name[121:129],
22}
23
24func (i tokenType) String() string {
25 if str, ok := _tokenType_map[i]; ok {
26 return str
27 }
28 return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")"
29}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/merged.go b/vendor/github.com/hashicorp/hcl2/hcl/merged.go
new file mode 100644
index 0000000..ca2b728
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/merged.go
@@ -0,0 +1,226 @@
1package hcl
2
3import (
4 "fmt"
5)
6
7// MergeFiles combines the given files to produce a single body that contains
8// configuration from all of the given files.
9//
10// The ordering of the given files decides the order in which contained
11// elements will be returned. If any top-level attributes are defined with
12// the same name across multiple files, a diagnostic will be produced from
13// the Content and PartialContent methods describing this error in a
14// user-friendly way.
15func MergeFiles(files []*File) Body {
16 var bodies []Body
17 for _, file := range files {
18 bodies = append(bodies, file.Body)
19 }
20 return MergeBodies(bodies)
21}
22
23// MergeBodies is like MergeFiles except it deals directly with bodies, rather
24// than with entire files.
25func MergeBodies(bodies []Body) Body {
26 if len(bodies) == 0 {
27 // Swap out for our singleton empty body, to reduce the number of
28 // empty slices we have hanging around.
29 return emptyBody
30 }
31
32 // If any of the given bodies are already merged bodies, we'll unpack
33 // to flatten to a single mergedBodies, since that's conceptually simpler.
34 // This also, as a side-effect, eliminates any empty bodies, since
35 // empties are merged bodies with no inner bodies.
36 var newLen int
37 var flatten bool
38 for _, body := range bodies {
39 if children, merged := body.(mergedBodies); merged {
40 newLen += len(children)
41 flatten = true
42 } else {
43 newLen++
44 }
45 }
46
47 if !flatten { // not just newLen == len, because we might have mergedBodies with single bodies inside
48 return mergedBodies(bodies)
49 }
50
51 if newLen == 0 {
52 // Don't allocate a new empty when we already have one
53 return emptyBody
54 }
55
56 new := make([]Body, 0, newLen)
57 for _, body := range bodies {
58 if children, merged := body.(mergedBodies); merged {
59 new = append(new, children...)
60 } else {
61 new = append(new, body)
62 }
63 }
64 return mergedBodies(new)
65}
66
67var emptyBody = mergedBodies([]Body{})
68
69// EmptyBody returns a body with no content. This body can be used as a
70// placeholder when a body is required but no body content is available.
71func EmptyBody() Body {
72 return emptyBody
73}
74
75type mergedBodies []Body
76
77// Content returns the content produced by applying the given schema to all
78// of the merged bodies and merging the result.
79//
80// Although required attributes _are_ supported, they should be used sparingly
81// with merged bodies since in this case there is no contextual information
82// with which to return good diagnostics. Applications working with merged
83// bodies may wish to mark all attributes as optional and then check for
84// required attributes afterwards, to produce better diagnostics.
85func (mb mergedBodies) Content(schema *BodySchema) (*BodyContent, Diagnostics) {
86 // the returned body will always be empty in this case, because mergedContent
87 // will only ever call Content on the child bodies.
88 content, _, diags := mb.mergedContent(schema, false)
89 return content, diags
90}
91
92func (mb mergedBodies) PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) {
93 return mb.mergedContent(schema, true)
94}
95
96func (mb mergedBodies) JustAttributes() (Attributes, Diagnostics) {
97 attrs := make(map[string]*Attribute)
98 var diags Diagnostics
99
100 for _, body := range mb {
101 thisAttrs, thisDiags := body.JustAttributes()
102
103 if len(thisDiags) != 0 {
104 diags = append(diags, thisDiags...)
105 }
106
107 if thisAttrs != nil {
108 for name, attr := range thisAttrs {
109 if existing := attrs[name]; existing != nil {
110 diags = diags.Append(&Diagnostic{
111 Severity: DiagError,
112 Summary: "Duplicate attribute",
113 Detail: fmt.Sprintf(
114 "Attribute %q was already assigned at %s",
115 name, existing.NameRange.String(),
116 ),
117 Subject: &attr.NameRange,
118 })
119 continue
120 }
121
122 attrs[name] = attr
123 }
124 }
125 }
126
127 return attrs, diags
128}
129
130func (mb mergedBodies) MissingItemRange() Range {
131 if len(mb) == 0 {
132 // Nothing useful to return here, so we'll return some garbage.
133 return Range{
134 Filename: "<empty>",
135 }
136 }
137
138 // arbitrarily use the first body's missing item range
139 return mb[0].MissingItemRange()
140}
141
142func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyContent, Body, Diagnostics) {
143 // We need to produce a new schema with none of the attributes marked as
144 // required, since _any one_ of our bodies can contribute an attribute value.
145 // We'll separately check that all required attributes are present at
146 // the end.
147 mergedSchema := &BodySchema{
148 Blocks: schema.Blocks,
149 }
150 for _, attrS := range schema.Attributes {
151 mergedAttrS := attrS
152 mergedAttrS.Required = false
153 mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS)
154 }
155
156 var mergedLeftovers []Body
157 content := &BodyContent{
158 Attributes: map[string]*Attribute{},
159 }
160
161 var diags Diagnostics
162 for _, body := range mb {
163 var thisContent *BodyContent
164 var thisLeftovers Body
165 var thisDiags Diagnostics
166
167 if partial {
168 thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema)
169 } else {
170 thisContent, thisDiags = body.Content(mergedSchema)
171 }
172
173 if thisLeftovers != nil {
174 mergedLeftovers = append(mergedLeftovers)
175 }
176 if len(thisDiags) != 0 {
177 diags = append(diags, thisDiags...)
178 }
179
180 if thisContent.Attributes != nil {
181 for name, attr := range thisContent.Attributes {
182 if existing := content.Attributes[name]; existing != nil {
183 diags = diags.Append(&Diagnostic{
184 Severity: DiagError,
185 Summary: "Duplicate attribute",
186 Detail: fmt.Sprintf(
187 "Attribute %q was already assigned at %s",
188 name, existing.NameRange.String(),
189 ),
190 Subject: &attr.NameRange,
191 })
192 continue
193 }
194 content.Attributes[name] = attr
195 }
196 }
197
198 if len(thisContent.Blocks) != 0 {
199 content.Blocks = append(content.Blocks, thisContent.Blocks...)
200 }
201 }
202
203 // Finally, we check for required attributes.
204 for _, attrS := range schema.Attributes {
205 if !attrS.Required {
206 continue
207 }
208
209 if content.Attributes[attrS.Name] == nil {
210 // We don't have any context here to produce a good diagnostic,
211 // which is why we warn in the Content docstring to minimize the
212 // use of required attributes on merged bodies.
213 diags = diags.Append(&Diagnostic{
214 Severity: DiagError,
215 Summary: "Missing required attribute",
216 Detail: fmt.Sprintf(
217 "The attribute %q is required, but was not assigned.",
218 attrS.Name,
219 ),
220 })
221 }
222 }
223
224 leftoverBody := MergeBodies(mergedLeftovers)
225 return content, leftoverBody, diags
226}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/ops.go b/vendor/github.com/hashicorp/hcl2/hcl/ops.go
new file mode 100644
index 0000000..f4e30b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/ops.go
@@ -0,0 +1,147 @@
1package hcl
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/convert"
8)
9
10// Index is a helper function that performs the same operation as the index
11// operator in the HCL expression language. That is, the result is the
12// same as it would be for collection[key] in a configuration expression.
13//
14// This is exported so that applications can perform indexing in a manner
15// consistent with how the language does it, including handling of null and
16// unknown values, etc.
17//
18// Diagnostics are produced if the given combination of values is not valid.
19// Therefore a pointer to a source range must be provided to use in diagnostics,
20// though nil can be provided if the calling application is going to
21// ignore the subject of the returned diagnostics anyway.
22func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) {
23 if collection.IsNull() {
24 return cty.DynamicVal, Diagnostics{
25 {
26 Severity: DiagError,
27 Summary: "Attempt to index null value",
28 Detail: "This value is null, so it does not have any indices.",
29 Subject: srcRange,
30 },
31 }
32 }
33 if key.IsNull() {
34 return cty.DynamicVal, Diagnostics{
35 {
36 Severity: DiagError,
37 Summary: "Invalid index",
38 Detail: "Can't use a null value as an indexing key.",
39 Subject: srcRange,
40 },
41 }
42 }
43 ty := collection.Type()
44 kty := key.Type()
45 if kty == cty.DynamicPseudoType || ty == cty.DynamicPseudoType {
46 return cty.DynamicVal, nil
47 }
48
49 switch {
50
51 case ty.IsListType() || ty.IsTupleType() || ty.IsMapType():
52 var wantType cty.Type
53 switch {
54 case ty.IsListType() || ty.IsTupleType():
55 wantType = cty.Number
56 case ty.IsMapType():
57 wantType = cty.String
58 default:
59 // should never happen
60 panic("don't know what key type we want")
61 }
62
63 key, keyErr := convert.Convert(key, wantType)
64 if keyErr != nil {
65 return cty.DynamicVal, Diagnostics{
66 {
67 Severity: DiagError,
68 Summary: "Invalid index",
69 Detail: fmt.Sprintf(
70 "The given key does not identify an element in this collection value: %s.",
71 keyErr.Error(),
72 ),
73 Subject: srcRange,
74 },
75 }
76 }
77
78 has := collection.HasIndex(key)
79 if !has.IsKnown() {
80 if ty.IsTupleType() {
81 return cty.DynamicVal, nil
82 } else {
83 return cty.UnknownVal(ty.ElementType()), nil
84 }
85 }
86 if has.False() {
87 return cty.DynamicVal, Diagnostics{
88 {
89 Severity: DiagError,
90 Summary: "Invalid index",
91 Detail: "The given key does not identify an element in this collection value.",
92 Subject: srcRange,
93 },
94 }
95 }
96
97 return collection.Index(key), nil
98
99 case ty.IsObjectType():
100 key, keyErr := convert.Convert(key, cty.String)
101 if keyErr != nil {
102 return cty.DynamicVal, Diagnostics{
103 {
104 Severity: DiagError,
105 Summary: "Invalid index",
106 Detail: fmt.Sprintf(
107 "The given key does not identify an element in this collection value: %s.",
108 keyErr.Error(),
109 ),
110 Subject: srcRange,
111 },
112 }
113 }
114 if !collection.IsKnown() {
115 return cty.DynamicVal, nil
116 }
117 if !key.IsKnown() {
118 return cty.DynamicVal, nil
119 }
120
121 attrName := key.AsString()
122
123 if !ty.HasAttribute(attrName) {
124 return cty.DynamicVal, Diagnostics{
125 {
126 Severity: DiagError,
127 Summary: "Invalid index",
128 Detail: "The given key does not identify an element in this collection value.",
129 Subject: srcRange,
130 },
131 }
132 }
133
134 return collection.GetAttr(attrName), nil
135
136 default:
137 return cty.DynamicVal, Diagnostics{
138 {
139 Severity: DiagError,
140 Summary: "Invalid index",
141 Detail: "This value does not have any indices.",
142 Subject: srcRange,
143 },
144 }
145 }
146
147}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos.go b/vendor/github.com/hashicorp/hcl2/hcl/pos.go
new file mode 100644
index 0000000..1a4b329
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/pos.go
@@ -0,0 +1,262 @@
1package hcl
2
3import "fmt"
4
5// Pos represents a single position in a source file, by addressing the
6// start byte of a unicode character encoded in UTF-8.
7//
8// Pos is generally used only in the context of a Range, which then defines
9// which source file the position is within.
10type Pos struct {
11 // Line is the source code line where this position points. Lines are
12 // counted starting at 1 and incremented for each newline character
13 // encountered.
14 Line int
15
16 // Column is the source code column where this position points, in
17 // unicode characters, with counting starting at 1.
18 //
19 // Column counts characters as they appear visually, so for example a
20 // latin letter with a combining diacritic mark counts as one character.
21 // This is intended for rendering visual markers against source code in
22 // contexts where these diacritics would be rendered in a single character
23 // cell. Technically speaking, Column is counting grapheme clusters as
24 // used in unicode normalization.
25 Column int
26
27 // Byte is the byte offset into the file where the indicated character
28 // begins. This is a zero-based offset to the first byte of the first
29 // UTF-8 codepoint sequence in the character, and thus gives a position
30 // that can be resolved _without_ awareness of Unicode characters.
31 Byte int
32}
33
34// Range represents a span of characters between two positions in a source
35// file.
36//
37// This struct is usually used by value in types that represent AST nodes,
38// but by pointer in types that refer to the positions of other objects,
39// such as in diagnostics.
40type Range struct {
41 // Filename is the name of the file into which this range's positions
42 // point.
43 Filename string
44
45 // Start and End represent the bounds of this range. Start is inclusive
46 // and End is exclusive.
47 Start, End Pos
48}
49
50// RangeBetween returns a new range that spans from the beginning of the
51// start range to the end of the end range.
52//
53// The result is meaningless if the two ranges do not belong to the same
54// source file or if the end range appears before the start range.
55func RangeBetween(start, end Range) Range {
56 return Range{
57 Filename: start.Filename,
58 Start: start.Start,
59 End: end.End,
60 }
61}
62
63// RangeOver returns a new range that covers both of the given ranges and
64// possibly additional content between them if the two ranges do not overlap.
65//
66// If either range is empty then it is ignored. The result is empty if both
67// given ranges are empty.
68//
69// The result is meaningless if the two ranges to not belong to the same
70// source file.
71func RangeOver(a, b Range) Range {
72 if a.Empty() {
73 return b
74 }
75 if b.Empty() {
76 return a
77 }
78
79 var start, end Pos
80 if a.Start.Byte < b.Start.Byte {
81 start = a.Start
82 } else {
83 start = b.Start
84 }
85 if a.End.Byte > b.End.Byte {
86 end = a.End
87 } else {
88 end = b.End
89 }
90 return Range{
91 Filename: a.Filename,
92 Start: start,
93 End: end,
94 }
95}
96
97// ContainsOffset returns true if and only if the given byte offset is within
98// the receiving Range.
99func (r Range) ContainsOffset(offset int) bool {
100 return offset >= r.Start.Byte && offset < r.End.Byte
101}
102
103// Ptr returns a pointer to a copy of the receiver. This is a convenience when
104// ranges in places where pointers are required, such as in Diagnostic, but
105// the range in question is returned from a method. Go would otherwise not
106// allow one to take the address of a function call.
107func (r Range) Ptr() *Range {
108 return &r
109}
110
111// String returns a compact string representation of the receiver.
112// Callers should generally prefer to present a range more visually,
113// e.g. via markers directly on the relevant portion of source code.
114func (r Range) String() string {
115 if r.Start.Line == r.End.Line {
116 return fmt.Sprintf(
117 "%s:%d,%d-%d",
118 r.Filename,
119 r.Start.Line, r.Start.Column,
120 r.End.Column,
121 )
122 } else {
123 return fmt.Sprintf(
124 "%s:%d,%d-%d,%d",
125 r.Filename,
126 r.Start.Line, r.Start.Column,
127 r.End.Line, r.End.Column,
128 )
129 }
130}
131
132func (r Range) Empty() bool {
133 return r.Start.Byte == r.End.Byte
134}
135
136// CanSliceBytes returns true if SliceBytes could return an accurate
137// sub-slice of the given slice.
138//
139// This effectively tests whether the start and end offsets of the range
140// are within the bounds of the slice, and thus whether SliceBytes can be
141// trusted to produce an accurate start and end position within that slice.
142func (r Range) CanSliceBytes(b []byte) bool {
143 switch {
144 case r.Start.Byte < 0 || r.Start.Byte > len(b):
145 return false
146 case r.End.Byte < 0 || r.End.Byte > len(b):
147 return false
148 case r.End.Byte < r.Start.Byte:
149 return false
150 default:
151 return true
152 }
153}
154
155// SliceBytes returns a sub-slice of the given slice that is covered by the
156// receiving range, assuming that the given slice is the source code of the
157// file indicated by r.Filename.
158//
159// If the receiver refers to any byte offsets that are outside of the slice
160// then the result is constrained to the overlapping portion only, to avoid
161// a panic. Use CanSliceBytes to determine if the result is guaranteed to
162// be an accurate span of the requested range.
163func (r Range) SliceBytes(b []byte) []byte {
164 start := r.Start.Byte
165 end := r.End.Byte
166 if start < 0 {
167 start = 0
168 } else if start > len(b) {
169 start = len(b)
170 }
171 if end < 0 {
172 end = 0
173 } else if end > len(b) {
174 end = len(b)
175 }
176 if end < start {
177 end = start
178 }
179 return b[start:end]
180}
181
182// Overlaps returns true if the receiver and the other given range share any
183// characters in common.
184func (r Range) Overlaps(other Range) bool {
185 switch {
186 case r.Filename != other.Filename:
187 // If the ranges are in different files then they can't possibly overlap
188 return false
189 case r.Empty() || other.Empty():
190 // Empty ranges can never overlap
191 return false
192 case r.ContainsOffset(other.Start.Byte) || r.ContainsOffset(other.End.Byte):
193 return true
194 case other.ContainsOffset(r.Start.Byte) || other.ContainsOffset(r.End.Byte):
195 return true
196 default:
197 return false
198 }
199}
200
201// Overlap finds a range that is either identical to or a sub-range of both
202// the receiver and the other given range. It returns an empty range
203// within the receiver if there is no overlap between the two ranges.
204//
205// A non-empty result is either identical to or a subset of the receiver.
206func (r Range) Overlap(other Range) Range {
207 if !r.Overlaps(other) {
208 // Start == End indicates an empty range
209 return Range{
210 Filename: r.Filename,
211 Start: r.Start,
212 End: r.Start,
213 }
214 }
215
216 var start, end Pos
217 if r.Start.Byte > other.Start.Byte {
218 start = r.Start
219 } else {
220 start = other.Start
221 }
222 if r.End.Byte < other.End.Byte {
223 end = r.End
224 } else {
225 end = other.End
226 }
227
228 return Range{
229 Filename: r.Filename,
230 Start: start,
231 End: end,
232 }
233}
234
235// PartitionAround finds the portion of the given range that overlaps with
236// the reciever and returns three ranges: the portion of the reciever that
237// precedes the overlap, the overlap itself, and then the portion of the
238// reciever that comes after the overlap.
239//
240// If the two ranges do not overlap then all three returned ranges are empty.
241//
242// If the given range aligns with or extends beyond either extent of the
243// reciever then the corresponding outer range will be empty.
244func (r Range) PartitionAround(other Range) (before, overlap, after Range) {
245 overlap = r.Overlap(other)
246 if overlap.Empty() {
247 return overlap, overlap, overlap
248 }
249
250 before = Range{
251 Filename: r.Filename,
252 Start: r.Start,
253 End: overlap.Start,
254 }
255 after = Range{
256 Filename: r.Filename,
257 Start: overlap.End,
258 End: r.End,
259 }
260
261 return before, overlap, after
262}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go
new file mode 100644
index 0000000..7c8f2df
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go
@@ -0,0 +1,148 @@
1package hcl
2
3import (
4 "bufio"
5 "bytes"
6
7 "github.com/apparentlymart/go-textseg/textseg"
8)
9
10// RangeScanner is a helper that will scan over a buffer using a bufio.SplitFunc
11// and visit a source range for each token matched.
12//
13// For example, this can be used with bufio.ScanLines to find the source range
14// for each line in the file, skipping over the actual newline characters, which
15// may be useful when printing source code snippets as part of diagnostic
16// messages.
17//
18// The line and column information in the returned ranges is produced by
19// counting newline characters and grapheme clusters respectively, which
20// mimics the behavior we expect from a parser when producing ranges.
21type RangeScanner struct {
22 filename string
23 b []byte
24 cb bufio.SplitFunc
25
26 pos Pos // position of next byte to process in b
27 cur Range // latest range
28 tok []byte // slice of b that is covered by cur
29 err error // error from last scan, if any
30}
31
32// Create a new RangeScanner for the given buffer, producing ranges for the
33// given filename.
34//
35// Since ranges have grapheme-cluster granularity rather than byte granularity,
36// the scanner will produce incorrect results if the given SplitFunc creates
37// tokens between grapheme cluster boundaries. In particular, it is incorrect
38// to use RangeScanner with bufio.ScanRunes because it will produce tokens
39// around individual UTF-8 sequences, which will split any multi-sequence
40// grapheme clusters.
41func NewRangeScanner(b []byte, filename string, cb bufio.SplitFunc) *RangeScanner {
42 return &RangeScanner{
43 filename: filename,
44 b: b,
45 cb: cb,
46 pos: Pos{
47 Byte: 0,
48 Line: 1,
49 Column: 1,
50 },
51 }
52}
53
54func (sc *RangeScanner) Scan() bool {
55 if sc.pos.Byte >= len(sc.b) || sc.err != nil {
56 // All done
57 return false
58 }
59
60 // Since we're operating on an in-memory buffer, we always pass the whole
61 // remainder of the buffer to our SplitFunc and set isEOF to let it know
62 // that it has the whole thing.
63 advance, token, err := sc.cb(sc.b[sc.pos.Byte:], true)
64
65 // Since we are setting isEOF to true this should never happen, but
66 // if it does we will just abort and assume the SplitFunc is misbehaving.
67 if advance == 0 && token == nil && err == nil {
68 return false
69 }
70
71 if err != nil {
72 sc.err = err
73 sc.cur = Range{
74 Filename: sc.filename,
75 Start: sc.pos,
76 End: sc.pos,
77 }
78 sc.tok = nil
79 return false
80 }
81
82 sc.tok = token
83 start := sc.pos
84 end := sc.pos
85 new := sc.pos
86
87 // adv is similar to token but it also includes any subsequent characters
88 // we're being asked to skip over by the SplitFunc.
89 // adv is a slice covering any additional bytes we are skipping over, based
90 // on what the SplitFunc told us to do with advance.
91 adv := sc.b[sc.pos.Byte : sc.pos.Byte+advance]
92
93 // We now need to scan over our token to count the grapheme clusters
94 // so we can correctly advance Column, and count the newlines so we
95 // can correctly advance Line.
96 advR := bytes.NewReader(adv)
97 gsc := bufio.NewScanner(advR)
98 advanced := 0
99 gsc.Split(textseg.ScanGraphemeClusters)
100 for gsc.Scan() {
101 gr := gsc.Bytes()
102 new.Byte += len(gr)
103 new.Column++
104
105 // We rely here on the fact that \r\n is considered a grapheme cluster
106 // and so we don't need to worry about miscounting additional lines
107 // on files with Windows-style line endings.
108 if len(gr) != 0 && (gr[0] == '\r' || gr[0] == '\n') {
109 new.Column = 1
110 new.Line++
111 }
112
113 if advanced < len(token) {
114 // If we've not yet found the end of our token then we'll
115 // also push our "end" marker along.
116 // (if advance > len(token) then we'll stop moving "end" early
117 // so that the caller only sees the range covered by token.)
118 end = new
119 }
120 advanced += len(gr)
121 }
122
123 sc.cur = Range{
124 Filename: sc.filename,
125 Start: start,
126 End: end,
127 }
128 sc.pos = new
129 return true
130}
131
132// Range returns a range that covers the latest token obtained after a call
133// to Scan returns true.
134func (sc *RangeScanner) Range() Range {
135 return sc.cur
136}
137
138// Bytes returns the slice of the input buffer that is covered by the range
139// that would be returned by Range.
140func (sc *RangeScanner) Bytes() []byte {
141 return sc.tok
142}
143
144// Err can be called after Scan returns false to determine if the latest read
145// resulted in an error, and obtain that error if so.
146func (sc *RangeScanner) Err() error {
147 return sc.err
148}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/schema.go b/vendor/github.com/hashicorp/hcl2/hcl/schema.go
new file mode 100644
index 0000000..891257a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/schema.go
@@ -0,0 +1,21 @@
1package hcl
2
3// BlockHeaderSchema represents the shape of a block header, and is
4// used for matching blocks within bodies.
5type BlockHeaderSchema struct {
6 Type string
7 LabelNames []string
8}
9
10// AttributeSchema represents the requirements for an attribute, and is used
11// for matching attributes within bodies.
12type AttributeSchema struct {
13 Name string
14 Required bool
15}
16
17// BodySchema represents the desired shallow structure of a body.
18type BodySchema struct {
19 Attributes []AttributeSchema
20 Blocks []BlockHeaderSchema
21}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/spec.md
new file mode 100644
index 0000000..58257bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/spec.md
@@ -0,0 +1,691 @@
1# HCL Syntax-Agnostic Information Model
2
3This is the specification for the general information model (abstract types and
4semantics) for hcl. HCL is a system for defining configuration languages for
5applications. The HCL information model is designed to support multiple
6concrete syntaxes for configuration, each with a mapping to the model defined
7in this specification.
8
9The two primary syntaxes intended for use in conjunction with this model are
10[the HCL native syntax](./hclsyntax/spec.md) and [the JSON syntax](./json/spec.md).
11In principle other syntaxes are possible as long as either their language model
12is sufficiently rich to express the concepts described in this specification
13or the language targets a well-defined subset of the specification.
14
15## Structural Elements
16
17The primary structural element is the _body_, which is a container representing
18a set of zero or more _attributes_ and a set of zero or more _blocks_.
19
20A _configuration file_ is the top-level object, and will usually be produced
21by reading a file from disk and parsing it as a particular syntax. A
22configuration file has its own _body_, representing the top-level attributes
23and blocks.
24
25An _attribute_ is a name and value pair associated with a body. Attribute names
26are unique within a given body. Attribute values are provided as _expressions_,
27which are discussed in detail in a later section.
28
29A _block_ is a nested structure that has a _type name_, zero or more string
30_labels_ (e.g. identifiers), and a nested body.
31
32Together the structural elements create a heirarchical data structure, with
33attributes intended to represent the direct properties of a particular object
34in the calling application, and blocks intended to represent child objects
35of a particular object.
36
37## Body Content
38
39To support the expression of the HCL concepts in languages whose information
40model is a subset of HCL's, such as JSON, a _body_ is an opaque container
41whose content can only be accessed by providing information on the expected
42structure of the content.
43
44The specification for each syntax must describe how its physical constructs
45are mapped on to body content given a schema. For syntaxes that have
46first-class syntax distinguishing attributes and bodies this can be relatively
47straightforward, while more detailed mapping rules may be required in syntaxes
48where the representation of attributes vs. blocks is ambiguous.
49
50### Schema-driven Processing
51
52Schema-driven processing is the primary way to access body content.
53A _body schema_ is a description of what is expected within a particular body,
54which can then be used to extract the _body content_, which then provides
55access to the specific attributes and blocks requested.
56
57A _body schema_ consists of a list of _attribute schemata_ and
58_block header schemata_:
59
60* An _attribute schema_ provides the name of an attribute and whether its
61 presence is required.
62
63* A _block header schema_ provides a block type name and the semantic names
64 assigned to each of the labels of that block type, if any.
65
66Within a schema, it is an error to request the same attribute name twice or
67to request a block type whose name is also an attribute name. While this can
68in principle be supported in some syntaxes, in other syntaxes the attribute
69and block namespaces are combined and so an an attribute cannot coexist with
70a block whose type name is identical to the attribute name.
71
72The result of applying a body schema to a body is _body content_, which
73consists of an _attribute map_ and a _block sequence_:
74
75* The _attribute map_ is a map data structure whose keys are attribute names
76 and whose values are _expressions_ that represent the corresponding attribute
77 values.
78
79* The _block sequence_ is an ordered sequence of blocks, with each specifying
80 a block _type name_, the sequence of _labels_ specified for the block,
81 and the body object (not body _content_) representing the block's own body.
82
83After obtaining _body content_, the calling application may continue processing
84by evaluating attribute expressions and/or recursively applying further
85schema-driven processing to the child block bodies.
86
87**Note:** The _body schema_ is intentionally minimal, to reduce the set of
88mapping rules that must be defined for each syntax. Higher-level utility
89libraries may be provided to assist in the construction of a schema and
90perform additional processing, such as automatically evaluating attribute
91expressions and assigning their result values into a data structure, or
92recursively applying a schema to child blocks. Such utilities are not part of
93this core specification and will vary depending on the capabilities and idiom
94of the implementation language.
95
96### _Dynamic Attributes_ Processing
97
98The _schema-driven_ processing model is useful when the expected structure
99of a body is known a priori by the calling application. Some blocks are
100instead more free-form, such as a user-provided set of arbitrary key/value
101pairs.
102
103The alternative _dynamic attributes_ processing mode allows for this more
104ad-hoc approach. Processing in this mode behaves as if a schema had been
105constructed without any _block header schemata_ and with an attribute
106schema for each distinct key provided within the physical representation
107of the body.
108
109The means by which _distinct keys_ are identified is dependent on the
110physical syntax; this processing mode assumes that the syntax has a way
111to enumerate keys provided by the author and identify expressions that
112correspond with those keys, but does not define the means by which this is
113done.
114
115The result of _dynamic attributes_ processing is an _attribute map_ as
116defined in the previous section. No _block sequence_ is produced in this
117processing mode.
118
119### Partial Processing of Body Content
120
121Under _schema-driven processing_, by default the given schema is assumed
122to be exhaustive, such that any attribute or block not matched by schema
123elements is considered an error. This allows feedback about unsupported
124attributes and blocks (such as typos) to be provided.
125
126An alternative is _partial processing_, where any additional elements within
127the body are not considered an error.
128
129Under partial processing, the result is both body content as described
130above _and_ a new body that represents any body elements that remain after
131the schema has been processed.
132
133Specifically:
134
135* Any attribute whose name is specified in the schema is returned in body
136 content and elided from the new body.
137
138* Any block whose type is specified in the schema is returned in body content
139 and elided from the new body.
140
141* Any attribute or block _not_ meeting the above conditions is placed into
142 the new body, unmodified.
143
144The new body can then be recursively processed using any of the body
145processing models. This facility allows different subsets of body content
146to be processed by different parts of the calling application.
147
148Processing a body in two steps — first partial processing of a source body,
149then exhaustive processing of the returned body — is equivalent to single-step
150processing with a schema that is the union of the schemata used
151across the two steps.
152
153## Expressions
154
155Attribute values are represented by _expressions_. Depending on the concrete
156syntax in use, an expression may just be a literal value or it may describe
157a computation in terms of literal values, variables, and functions.
158
159Each syntax defines its own representation of expressions. For syntaxes based
160in languages that do not have any non-literal expression syntax, it is
161recommended to embed the template language from
162[the native syntax](./hclsyntax/spec.md) e.g. as a post-processing step on
163string literals.
164
165### Expression Evaluation
166
167In order to obtain a concrete value, each expression must be _evaluated_.
168Evaluation is performed in terms of an evaluation context, which
169consists of the following:
170
171* An _evaluation mode_, which is defined below.
172* A _variable scope_, which provides a set of named variables for use in
173 expressions.
174* A _function table_, which provides a set of named functions for use in
175 expressions.
176
177The _evaluation mode_ allows for two different interpretations of an
178expression:
179
180* In _literal-only mode_, variables and functions are not available and it
181 is assumed that the calling application's intent is to treat the attribute
182 value as a literal.
183
184* In _full expression mode_, variables and functions are defined and it is
185 assumed that the calling application wishes to provide a full expression
186 language for definition of the attribute value.
187
188The actual behavior of these two modes depends on the syntax in use. For
189languages with first-class expression syntax, these two modes may be considered
190equivalent, with _literal-only mode_ simply not defining any variables or
191functions. For languages that embed arbitrary expressions via string templates,
192_literal-only mode_ may disable such processing, allowing literal strings to
193pass through without interpretation as templates.
194
195Since literal-only mode does not support variables and functions, it is an
196error for the calling application to enable this mode and yet provide a
197variable scope and/or function table.
198
199## Values and Value Types
200
201The result of expression evaluation is a _value_. Each value has a _type_,
202which is dynamically determined during evaluation. The _variable scope_ in
203the evaluation context is a map from variable name to value, using the same
204definition of value.
205
206The type system for HCL values is intended to be of a level abstraction
207suitable for configuration of various applications. A well-defined,
208implementation-language-agnostic type system is defined to allow for
209consistent processing of configuration across many implementation languages.
210Concrete implementations may provide additional functionality to lower
211HCL values and types to corresponding native language types, which may then
212impose additional constraints on the values outside of the scope of this
213specification.
214
215Two values are _equal_ if and only if they have identical types and their
216values are equal according to the rules of their shared type.
217
218### Primitive Types
219
220The primitive types are _string_, _bool_, and _number_.
221
222A _string_ is a sequence of unicode characters. Two strings are equal if
223NFC normalization ([UAX#15](http://unicode.org/reports/tr15/)
224of each string produces two identical sequences of characters.
225NFC normalization ensures that, for example, a precomposed combination of a
226latin letter and a diacritic compares equal with the letter followed by
227a combining diacritic.
228
229The _bool_ type has only two non-null values: _true_ and _false_. Two bool
230values are equal if and only if they are either both true or both false.
231
232A _number_ is an arbitrary-precision floating point value. An implementation
233_must_ make the full-precision values available to the calling application
234for interpretation into any suitable number representation. An implementation
235may in practice implement numbers with limited precision so long as the
236following constraints are met:
237
238* Integers are represented with at least 256 bits.
239* Non-integer numbers are represented as floating point values with a
240 mantissa of at least 256 bits and a signed binary exponent of at least
241 16 bits.
242* An error is produced if an integer value given in source cannot be
243 represented precisely.
244* An error is produced if a non-integer value cannot be represented due to
245 overflow.
246* A non-integer number is rounded to the nearest possible value when a
247 value is of too high a precision to be represented.
248
249The _number_ type also requires representation of both positive and negative
250infinity. A "not a number" (NaN) value is _not_ provided nor used.
251
252Two number values are equal if they are numerically equal to the precision
253associated with the number. Positive infinity and negative infinity are
254equal to themselves but not to each other. Positive infinity is greater than
255any other number value, and negative infinity is less than any other number
256value.
257
258Some syntaxes may be unable to represent numeric literals of arbitrary
259precision. This must be defined in the syntax specification as part of its
260description of mapping numeric literals to HCL values.
261
262### Structural Types
263
264_Structural types_ are types that are constructed by combining other types.
265Each distinct combination of other types is itself a distinct type. There
266are two structural type _kinds_:
267
268* _Object types_ are constructed of a set of named attributes, each of which
269 has a type. Attribute names are always strings. (_Object_ attributes are a
270 distinct idea from _body_ attributes, though calling applications
271 may choose to blur the distinction by use of common naming schemes.)
272* _Tuple tupes_ are constructed of a sequence of elements, each of which
273 has a type.
274
275Values of structural types are compared for equality in terms of their
276attributes or elements. A structural type value is equal to another if and
277only if all of the corresponding attributes or elements are equal.
278
279Two structural types are identical if they are of the same kind and
280have attributes or elements with identical types.
281
282### Collection Types
283
284_Collection types_ are types that combine together an arbitrary number of
285values of some other single type. There are three collection type _kinds_:
286
287* _List types_ represent ordered sequences of values of their element type.
288* _Map types_ represent values of their element type accessed via string keys.
289* _Set types_ represent unordered sets of distinct values of their element type.
290
291For each of these kinds and each distinct element type there is a distinct
292collection type. For example, "list of string" is a distinct type from
293"set of string", and "list of number" is a distinct type from "list of string".
294
295Values of collection types are compared for equality in terms of their
296elements. A collection type value is equal to another if and only if both
297have the same number of elements and their corresponding elements are equal.
298
299Two collection types are identical if they are of the same kind and have
300the same element type.
301
302### Null values
303
304Each type has a null value. The null value of a type represents the absense
305of a value, but with type information retained to allow for type checking.
306
307Null values are used primarily to represent the conditional absense of a
308body attribute. In a syntax with a conditional operator, one of the result
309values of that conditional may be null to indicate that the attribute should be
310considered not present in that case.
311
312Calling applications _should_ consider an attribute with a null value as
313equivalent to the value not being present at all.
314
315A null value of a particular type is equal to itself.
316
317### Unknown Values and the Dynamic Pseudo-type
318
319An _unknown value_ is a placeholder for a value that is not yet known.
320Operations on unknown values themselves return unknown values that have a
321type appropriate to the operation. For example, adding together two unknown
322numbers yields an unknown number, while comparing two unknown values of any
323type for equality yields an unknown bool.
324
325Each type has a distinct unknown value. For example, an unknown _number_ is
326a distinct value from an unknown _string_.
327
328_The dynamic pseudo-type_ is a placeholder for a type that is not yet known.
329The only values of this type are its null value and its unknown value. It is
330referred to as a _pseudo-type_ because it should not be considered a type in
331its own right, but rather as a placeholder for a type yet to be established.
332The unknown value of the dynamic pseudo-type is referred to as _the dynamic
333value_.
334
335Operations on values of the dynamic pseudo-type behave as if it is a value
336of the expected type, optimistically assuming that once the value and type
337are known they will be valid for the operation. For example, adding together
338a number and the dynamic value produces an unknown number.
339
340Unknown values and the dynamic pseudo-type can be used as a mechanism for
341partial type checking and semantic checking: by evaluating an expression with
342all variables set to an unknown value, the expression can be evaluated to
343produce an unknown value of a given type, or produce an error if any operation
344is provably invalid with only type information.
345
346Unknown values and the dynamic pseudo-type must never be returned from
347operations unless at least one operand is unknown or dynamic. Calling
348applications are guaranteed that unless the global scope includes unknown
349values, or the function table includes functions that return unknown values,
350no expression will evaluate to an unknown value. The calling application is
351thus in total control over the use and meaning of unknown values.
352
353The dynamic pseudo-type is identical only to itself.
354
355### Capsule Types
356
357A _capsule type_ is a custom type defined by the calling application. A value
358of a capsule type is considered opaque to HCL, but may be accepted
359by functions provided by the calling application.
360
361A particular capsule type is identical only to itself. The equality of two
362values of the same capsule type is defined by the calling application. No
363other operations are supported for values of capsule types.
364
365Support for capsule types in a HCL implementation is optional. Capsule types
366are intended to allow calling applications to pass through values that are
367not part of the standard type system. For example, an application that
368deals with raw binary data may define a capsule type representing a byte
369array, and provide functions that produce or operate on byte arrays.
370
371### Type Specifications
372
373In certain situations it is necessary to define expectations about the expected
374type of a value. Whereas two _types_ have a commutative _identity_ relationship,
375a type has a non-commutative _matches_ relationship with a _type specification_.
376A type specification is, in practice, just a different interpretation of a
377type such that:
378
379* Any type _matches_ any type that it is identical to.
380
381* Any type _matches_ the dynamic pseudo-type.
382
383For example, given a type specification "list of dynamic pseudo-type", the
384concrete types "list of string" and "list of map" match, but the
385type "set of string" does not.
386
387## Functions and Function Calls
388
389The evaluation context used to evaluate an expression includes a function
390table, which represents an application-defined set of named functions
391available for use in expressions.
392
393Each syntax defines whether function calls are supported and how they are
394physically represented in source code, but the semantics of function calls are
395defined here to ensure consistent results across syntaxes and to allow
396applications to provide functions that are interoperable with all syntaxes.
397
398A _function_ is defined from the following elements:
399
400* Zero or more _positional parameters_, each with a name used for documentation,
401 a type specification for expected argument values, and a flag for whether
402 each of null values, unknown values, and values of the dynamic pseudo-type
403 are accepted.
404
405* Zero or one _variadic parameters_, with the same structure as the _positional_
406 parameters, which if present collects any additional arguments provided at
407 the function call site.
408
409* A _result type definition_, which specifies the value type returned for each
410 valid sequence of argument values.
411
412* A _result value definition_, which specifies the value returned for each
413 valid sequence of argument values.
414
415A _function call_, regardless of source syntax, consists of a sequence of
416argument values. The argument values are each mapped to a corresponding
417parameter as follows:
418
419* For each of the function's positional parameters in sequence, take the next
420 argument. If there are no more arguments, the call is erroneous.
421
422* If the function has a variadic parameter, take all remaining arguments that
423 where not yet assigned to a positional parameter and collect them into
424 a sequence of variadic arguments that each correspond to the variadic
425 parameter.
426
427* If the function has _no_ variadic parameter, it is an error if any arguments
428 remain after taking one argument for each positional parameter.
429
430After mapping each argument to a parameter, semantic checking proceeds
431for each argument:
432
433* If the argument value corresponding to a parameter does not match the
434 parameter's type specification, the call is erroneous.
435
436* If the argument value corresponding to a parameter is null and the parameter
437 is not specified as accepting nulls, the call is erroneous.
438
439* If the argument value corresponding to a parameter is the dynamic value
440 and the parameter is not specified as accepting values of the dynamic
441 pseudo-type, the call is valid but its _result type_ is forced to be the
442 dynamic pseudo type.
443
444* If neither of the above conditions holds for any argument, the call is
445 valid and the function's value type definition is used to determine the
446 call's _result type_. A function _may_ vary its result type depending on
447 the argument _values_ as well as the argument _types_; for example, a
448 function that decodes a JSON value will return a different result type
449 depending on the data structure described by the given JSON source code.
450
451If semantic checking succeeds without error, the call is _executed_:
452
453* For each argument, if its value is unknown and its corresponding parameter
454 is not specified as accepting unknowns, the _result value_ is forced to be an
455 unknown value of the result type.
456
457* If the previous condition does not apply, the function's result value
458 definition is used to determine the call's _result value_.
459
460The result of a function call expression is either an error, if one of the
461erroenous conditions above applies, or the _result value_.
462
463## Type Conversions and Unification
464
465Values given in configuration may not always match the expectations of the
466operations applied to them or to the calling application. In such situations,
467automatic type conversion is attempted as a convenience to the user.
468
469Along with conversions to a _specified_ type, it is sometimes necessary to
470ensure that a selection of values are all of the _same_ type, without any
471constraint on which type that is. This is the process of _type unification_,
472which attempts to find the most general type that all of the given types can
473be converted to.
474
475Both type conversions and unification are defined in the syntax-agnostic
476model to ensure consistency of behavior between syntaxes.
477
478Type conversions are broadly characterized into two categories: _safe_ and
479_unsafe_. A conversion is "safe" if any distinct value of the source type
480has a corresponding distinct value in the target type. A conversion is
481"unsafe" if either the target type values are _not_ distinct (information
482may be lost in conversion) or if some values of the source type do not have
483any corresponding value in the target type. An unsafe conversion may result
484in an error.
485
486A given type can always be converted to itself, which is a no-op.
487
488### Conversion of Null Values
489
490All null values are safely convertable to a null value of any other type,
491regardless of other type-specific rules specified in the sections below.
492
493### Conversion to and from the Dynamic Pseudo-type
494
495Conversion _from_ the dynamic pseudo-type _to_ any other type always succeeds,
496producing an unknown value of the target type.
497
498Conversion of any value _to_ the dynamic pseudo-type is a no-op. The result
499is the input value, verbatim. This is the only situation where the conversion
500result value is not of the the given target type.
501
502### Primitive Type Conversions
503
504Bidirectional conversions are available between the string and number types,
505and between the string and boolean types.
506
507The bool value true corresponds to the string containing the characters "true",
508while the bool value false corresponds to teh string containing the characters
509"false". Conversion from bool to string is safe, while the converse is
510unsafe. The strings "1" and "0" are alternative string representations
511of true and false respectively. It is an error to convert a string other than
512the four in this paragraph to type bool.
513
514A number value is converted to string by translating its integer portion
515into a sequence of decimal digits (`0` through `9`), and then if it has a
516non-zero fractional part, a period `.` followed by a sequence of decimal
517digits representing its fractional part. No exponent portion is included.
518The number is converted at its full precision. Conversion from number to
519string is safe.
520
521A string is converted to a number value by reversing the above mapping.
522No exponent portion is allowed. Conversion from string to number is unsafe.
523It is an error to convert a string that does not comply with the expected
524syntax to type number.
525
526No direct conversion is available between the bool and number types.
527
528### Collection and Structural Type Conversions
529
530Conversion from set types to list types is _safe_, as long as their
531element types are safely convertable. If the element types are _unsafely_
532convertable, then the collection conversion is also unsafe. Each set element
533becomes a corresponding list element, in an undefined order. Although no
534particular ordering is required, implementations _should_ produce list
535elements in a consistent order for a given input set, as a convenience
536to calling applications.
537
538Conversion from list types to set types is _unsafe_, as long as their element
539types are convertable. Each distinct list item becomes a distinct set item.
540If two list items are equal, one of the two is lost in the conversion.
541
542Conversion from tuple types to list types permitted if all of the
543tuple element types are convertable to the target list element type.
544The safety of the conversion depends on the safety of each of the element
545conversions. Each element in turn is converted to the list element type,
546producing a list of identical length.
547
548Conversion from tuple types to set types is permitted, behaving as if the
549tuple type was first converted to a list of the same element type and then
550that list converted to the target set type.
551
552Conversion from object types to map types is permitted if all of the object
553attribute types are convertable to the target map element type. The safety
554of the conversion depends on the safety of each of the attribute conversions.
555Each attribute in turn is converted to the map element type, and map element
556keys are set to the name of each corresponding object attribute.
557
558Conversion from list and set types to tuple types is permitted, following
559the opposite steps as the converse conversions. Such conversions are _unsafe_.
560It is an error to convert a list or set to a tuple type whose number of
561elements does not match the list or set length.
562
563Conversion from map types to object types is permitted if each map key
564corresponds to an attribute in the target object type. It is an error to
565convert from a map value whose set of keys does not exactly match the target
566type's attributes. The conversion takes the opposite steps of the converse
567conversion.
568
569Conversion from one object type to another is permitted as long as the
570common attribute names have convertable types. Any attribute present in the
571target type but not in the source type is populated with a null value of
572the appropriate type.
573
574Conversion from one tuple type to another is permitted as long as the
575tuples have the same length and the elements have convertable types.
576
577### Type Unification
578
579Type unification is an operation that takes a list of types and attempts
580to find a single type to which they can all be converted. Since some
581type pairs have bidirectional conversions, preference is given to _safe_
582conversions. In technical terms, all possible types are arranged into
583a lattice, from which a most general supertype is selected where possible.
584
585The type resulting from type unification may be one of the input types, or
586it may be an entirely new type produced by combination of two or more
587input types.
588
589The following rules do not guarantee a valid result. In addition to these
590rules, unification fails if any of the given types are not convertable
591(per the above rules) to the selected result type.
592
593The following unification rules apply transitively. That is, if a rule is
594defined from A to B, and one from B to C, then A can unify to C.
595
596Number and bool types both unify with string by preferring string.
597
598Two collection types of the same kind unify according to the unification
599of their element types.
600
601List and set types unify by preferring the list type.
602
603Map and object types unify by preferring the object type.
604
605List, set and tuple types unify by preferring the tuple type.
606
607The dynamic pseudo-type unifies with any other type by selecting that other
608type. The dynamic pseudo-type is the result type only if _all_ input types
609are the dynamic pseudo-type.
610
611Two object types unify by constructing a new type whose attributes are
612the union of those of the two input types. Any common attributes themselves
613have their types unified.
614
615Two tuple types of the same length unify constructing a new type of the
616same length whose elements are the unification of the corresponding elements
617in the two input types.
618
619## Static Analysis
620
621In most applications, full expression evaluation is sufficient for understanding
622the provided configuration. However, some specialized applications require more
623direct access to the physical structures in the expressions, which can for
624example allow the construction of new language constructs in terms of the
625existing syntax elements.
626
627Since static analysis analyses the physical structure of configuration, the
628details will vary depending on syntax. Each syntax must decide which of its
629physical structures corresponds to the following analyses, producing error
630diagnostics if they are applied to inappropriate expressions.
631
632The following are the required static analysis functions:
633
634* **Static List**: Require list/tuple construction syntax to be used and
635 return a list of expressions for each of the elements given.
636
637* **Static Map**: Require map/object construction syntax to be used and
638 return a list of key/value pairs -- both expressions -- for each of
639 the elements given. The usual constraint that a map key must be a string
640 must not apply to this analysis, thus allowing applications to interpret
641 arbitrary keys as they see fit.
642
643* **Static Call**: Require function call syntax to be used and return an
644 object describing the called function name and a list of expressions
645 representing each of the call arguments.
646
647* **Static Traversal**: Require a reference to a symbol in the variable
648 scope and return a description of the path from the root scope to the
649 accessed attribute or index.
650
651The intent of a calling application using these features is to require a more
652rigid interpretation of the configuration than in expression evaluation.
653Syntax implementations should make use of the extra contextual information
654provided in order to make an intuitive mapping onto the constructs of the
655underlying syntax, possibly interpreting the expression slightly differently
656than it would be interpreted in normal evaluation.
657
658Each syntax must define which of its expression elements each of the analyses
659above applies to, and how those analyses behave given those expression elements.
660
661## Implementation Considerations
662
663Implementations of this specification are free to adopt any strategy that
664produces behavior consistent with the specification. This non-normative
665section describes some possible implementation strategies that are consistent
666with the goals of this specification.
667
668### Language-agnosticism
669
670The language-agnosticism of this specification assumes that certain behaviors
671are implemented separately for each syntax:
672
673* Matching of a body schema with the physical elements of a body in the
674 source language, to determine correspondance between physical constructs
675 and schema elements.
676
677* Implementing the _dynamic attributes_ body processing mode by either
678 interpreting all physical constructs as attributes or producing an error
679 if non-attribute constructs are present.
680
681* Providing an evaluation function for all possible expressions that produces
682 a value given an evaluation context.
683
684* Providing the static analysis functionality described above in a manner that
685 makes sense within the convention of the syntax.
686
687The suggested implementation strategy is to use an implementation language's
688closest concept to an _abstract type_, _virtual type_ or _interface type_
689to represent both Body and Expression. Each language-specific implementation
690can then provide an implementation of each of these types wrapping AST nodes
691or other physical constructs from the language parser.
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go
new file mode 100644
index 0000000..98ada87
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go
@@ -0,0 +1,40 @@
1package hcl
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7type staticExpr struct {
8 val cty.Value
9 rng Range
10}
11
12// StaticExpr returns an Expression that always evaluates to the given value.
13//
14// This is useful to substitute default values for expressions that are
15// not explicitly given in configuration and thus would otherwise have no
16// Expression to return.
17//
18// Since expressions are expected to have a source range, the caller must
19// provide one. Ideally this should be a real source range, but it can
20// be a synthetic one (with an empty-string filename) if no suitable range
21// is available.
22func StaticExpr(val cty.Value, rng Range) Expression {
23 return staticExpr{val, rng}
24}
25
26func (e staticExpr) Value(ctx *EvalContext) (cty.Value, Diagnostics) {
27 return e.val, nil
28}
29
30func (e staticExpr) Variables() []Traversal {
31 return nil
32}
33
34func (e staticExpr) Range() Range {
35 return e.rng
36}
37
38func (e staticExpr) StartRange() Range {
39 return e.rng
40}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/structure.go
new file mode 100644
index 0000000..b336f30
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/structure.go
@@ -0,0 +1,151 @@
1package hcl
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// File is the top-level node that results from parsing a HCL file.
8type File struct {
9 Body Body
10 Bytes []byte
11
12 // Nav is used to integrate with the "hcled" editor integration package,
13 // and with diagnostic information formatters. It is not for direct use
14 // by a calling application.
15 Nav interface{}
16}
17
18// Block represents a nested block within a Body.
19type Block struct {
20 Type string
21 Labels []string
22 Body Body
23
24 DefRange Range // Range that can be considered the "definition" for seeking in an editor
25 TypeRange Range // Range for the block type declaration specifically.
26 LabelRanges []Range // Ranges for the label values specifically.
27}
28
29// Blocks is a sequence of Block.
30type Blocks []*Block
31
32// Attributes is a set of attributes keyed by their names.
33type Attributes map[string]*Attribute
34
35// Body is a container for attributes and blocks. It serves as the primary
36// unit of heirarchical structure within configuration.
37//
38// The content of a body cannot be meaningfully intepreted without a schema,
39// so Body represents the raw body content and has methods that allow the
40// content to be extracted in terms of a given schema.
41type Body interface {
42 // Content verifies that the entire body content conforms to the given
43 // schema and then returns it, and/or returns diagnostics. The returned
44 // body content is valid if non-nil, regardless of whether Diagnostics
45 // are provided, but diagnostics should still be eventually shown to
46 // the user.
47 Content(schema *BodySchema) (*BodyContent, Diagnostics)
48
49 // PartialContent is like Content except that it permits the configuration
50 // to contain additional blocks or attributes not specified in the
51 // schema. If any are present, the returned Body is non-nil and contains
52 // the remaining items from the body that were not selected by the schema.
53 PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics)
54
55 // JustAttributes attempts to interpret all of the contents of the body
56 // as attributes, allowing for the contents to be accessed without a priori
57 // knowledge of the structure.
58 //
59 // The behavior of this method depends on the body's source language.
60 // Some languages, like JSON, can't distinguish between attributes and
61 // blocks without schema hints, but for languages that _can_ error
62 // diagnostics will be generated if any blocks are present in the body.
63 //
64 // Diagnostics may be produced for other reasons too, such as duplicate
65 // declarations of the same attribute.
66 JustAttributes() (Attributes, Diagnostics)
67
68 // MissingItemRange returns a range that represents where a missing item
69 // might hypothetically be inserted. This is used when producing
70 // diagnostics about missing required attributes or blocks. Not all bodies
71 // will have an obvious single insertion point, so the result here may
72 // be rather arbitrary.
73 MissingItemRange() Range
74}
75
76// BodyContent is the result of applying a BodySchema to a Body.
77type BodyContent struct {
78 Attributes Attributes
79 Blocks Blocks
80
81 MissingItemRange Range
82}
83
84// Attribute represents an attribute from within a body.
85type Attribute struct {
86 Name string
87 Expr Expression
88
89 Range Range
90 NameRange Range
91}
92
93// Expression is a literal value or an expression provided in the
94// configuration, which can be evaluated within a scope to produce a value.
95type Expression interface {
96 // Value returns the value resulting from evaluating the expression
97 // in the given evaluation context.
98 //
99 // The context may be nil, in which case the expression may contain
100 // only constants and diagnostics will be produced for any non-constant
101 // sub-expressions. (The exact definition of this depends on the source
102 // language.)
103 //
104 // The context may instead be set but have either its Variables or
105 // Functions maps set to nil, in which case only use of these features
106 // will return diagnostics.
107 //
108 // Different diagnostics are provided depending on whether the given
109 // context maps are nil or empty. In the former case, the message
110 // tells the user that variables/functions are not permitted at all,
111 // while in the latter case usage will produce a "not found" error for
112 // the specific symbol in question.
113 Value(ctx *EvalContext) (cty.Value, Diagnostics)
114
115 // Variables returns a list of variables referenced in the receiving
116 // expression. These are expressed as absolute Traversals, so may include
117 // additional information about how the variable is used, such as
118 // attribute lookups, which the calling application can potentially use
119 // to only selectively populate the scope.
120 Variables() []Traversal
121
122 Range() Range
123 StartRange() Range
124}
125
126// OfType filters the receiving block sequence by block type name,
127// returning a new block sequence including only the blocks of the
128// requested type.
129func (els Blocks) OfType(typeName string) Blocks {
130 ret := make(Blocks, 0)
131 for _, el := range els {
132 if el.Type == typeName {
133 ret = append(ret, el)
134 }
135 }
136 return ret
137}
138
139// ByType transforms the receiving block sequence into a map from type
140// name to block sequences of only that type.
141func (els Blocks) ByType() map[string]Blocks {
142 ret := make(map[string]Blocks)
143 for _, el := range els {
144 ty := el.Type
145 if ret[ty] == nil {
146 ret[ty] = make(Blocks, 0, 1)
147 }
148 ret[ty] = append(ret[ty], el)
149 }
150 return ret
151}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go
new file mode 100644
index 0000000..24f4c91
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go
@@ -0,0 +1,352 @@
1package hcl
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7)
8
9// A Traversal is a description of traversing through a value through a
10// series of operations such as attribute lookup, index lookup, etc.
11//
12// It is used to look up values in scopes, for example.
13//
14// The traversal operations are implementations of interface Traverser.
15// This is a closed set of implementations, so the interface cannot be
16// implemented from outside this package.
17//
18// A traversal can be absolute (its first value is a symbol name) or relative
19// (starts from an existing value).
20type Traversal []Traverser
21
22// TraversalJoin appends a relative traversal to an absolute traversal to
23// produce a new absolute traversal.
24func TraversalJoin(abs Traversal, rel Traversal) Traversal {
25 if abs.IsRelative() {
26 panic("first argument to TraversalJoin must be absolute")
27 }
28 if !rel.IsRelative() {
29 panic("second argument to TraversalJoin must be relative")
30 }
31
32 ret := make(Traversal, len(abs)+len(rel))
33 copy(ret, abs)
34 copy(ret[len(abs):], rel)
35 return ret
36}
37
38// TraverseRel applies the receiving traversal to the given value, returning
39// the resulting value. This is supported only for relative traversals,
40// and will panic if applied to an absolute traversal.
41func (t Traversal) TraverseRel(val cty.Value) (cty.Value, Diagnostics) {
42 if !t.IsRelative() {
43 panic("can't use TraverseRel on an absolute traversal")
44 }
45
46 current := val
47 var diags Diagnostics
48 for _, tr := range t {
49 var newDiags Diagnostics
50 current, newDiags = tr.TraversalStep(current)
51 diags = append(diags, newDiags...)
52 if newDiags.HasErrors() {
53 return cty.DynamicVal, diags
54 }
55 }
56 return current, diags
57}
58
59// TraverseAbs applies the receiving traversal to the given eval context,
60// returning the resulting value. This is supported only for absolute
61// traversals, and will panic if applied to a relative traversal.
62func (t Traversal) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) {
63 if t.IsRelative() {
64 panic("can't use TraverseAbs on a relative traversal")
65 }
66
67 split := t.SimpleSplit()
68 root := split.Abs[0].(TraverseRoot)
69 name := root.Name
70
71 thisCtx := ctx
72 hasNonNil := false
73 for thisCtx != nil {
74 if thisCtx.Variables == nil {
75 thisCtx = thisCtx.parent
76 continue
77 }
78 hasNonNil = true
79 val, exists := thisCtx.Variables[name]
80 if exists {
81 return split.Rel.TraverseRel(val)
82 }
83 thisCtx = thisCtx.parent
84 }
85
86 if !hasNonNil {
87 return cty.DynamicVal, Diagnostics{
88 {
89 Severity: DiagError,
90 Summary: "Variables not allowed",
91 Detail: "Variables may not be used here.",
92 Subject: &root.SrcRange,
93 },
94 }
95 }
96
97 suggestions := make([]string, 0, len(ctx.Variables))
98 thisCtx = ctx
99 for thisCtx != nil {
100 for k := range thisCtx.Variables {
101 suggestions = append(suggestions, k)
102 }
103 thisCtx = thisCtx.parent
104 }
105 suggestion := nameSuggestion(name, suggestions)
106 if suggestion != "" {
107 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
108 }
109
110 return cty.DynamicVal, Diagnostics{
111 {
112 Severity: DiagError,
113 Summary: "Unknown variable",
114 Detail: fmt.Sprintf("There is no variable named %q.%s", name, suggestion),
115 Subject: &root.SrcRange,
116 },
117 }
118}
119
120// IsRelative returns true if the receiver is a relative traversal, or false
121// otherwise.
122func (t Traversal) IsRelative() bool {
123 if len(t) == 0 {
124 return true
125 }
126 if _, firstIsRoot := t[0].(TraverseRoot); firstIsRoot {
127 return false
128 }
129 return true
130}
131
132// SimpleSplit returns a TraversalSplit where the name lookup is the absolute
133// part and the remainder is the relative part. Supported only for
134// absolute traversals, and will panic if applied to a relative traversal.
135//
136// This can be used by applications that have a relatively-simple variable
137// namespace where only the top-level is directly populated in the scope, with
138// everything else handled by relative lookups from those initial values.
139func (t Traversal) SimpleSplit() TraversalSplit {
140 if t.IsRelative() {
141 panic("can't use SimpleSplit on a relative traversal")
142 }
143 return TraversalSplit{
144 Abs: t[0:1],
145 Rel: t[1:],
146 }
147}
148
149// RootName returns the root name for a absolute traversal. Will panic if
150// called on a relative traversal.
151func (t Traversal) RootName() string {
152 if t.IsRelative() {
153 panic("can't use RootName on a relative traversal")
154
155 }
156 return t[0].(TraverseRoot).Name
157}
158
159// SourceRange returns the source range for the traversal.
160func (t Traversal) SourceRange() Range {
161 if len(t) == 0 {
162 // Nothing useful to return here, but we'll return something
163 // that's correctly-typed at least.
164 return Range{}
165 }
166
167 return RangeBetween(t[0].SourceRange(), t[len(t)-1].SourceRange())
168}
169
170// TraversalSplit represents a pair of traversals, the first of which is
171// an absolute traversal and the second of which is relative to the first.
172//
173// This is used by calling applications that only populate prefixes of the
174// traversals in the scope, with Abs representing the part coming from the
175// scope and Rel representing the remaining steps once that part is
176// retrieved.
177type TraversalSplit struct {
178 Abs Traversal
179 Rel Traversal
180}
181
182// TraverseAbs traverses from a scope to the value resulting from the
183// absolute traversal.
184func (t TraversalSplit) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) {
185 return t.Abs.TraverseAbs(ctx)
186}
187
188// TraverseRel traverses from a given value, assumed to be the result of
189// TraverseAbs on some scope, to a final result for the entire split traversal.
190func (t TraversalSplit) TraverseRel(val cty.Value) (cty.Value, Diagnostics) {
191 return t.Rel.TraverseRel(val)
192}
193
194// Traverse is a convenience function to apply TraverseAbs followed by
195// TraverseRel.
196func (t TraversalSplit) Traverse(ctx *EvalContext) (cty.Value, Diagnostics) {
197 v1, diags := t.TraverseAbs(ctx)
198 if diags.HasErrors() {
199 return cty.DynamicVal, diags
200 }
201 v2, newDiags := t.TraverseRel(v1)
202 diags = append(diags, newDiags...)
203 return v2, diags
204}
205
206// Join concatenates together the Abs and Rel parts to produce a single
207// absolute traversal.
208func (t TraversalSplit) Join() Traversal {
209 return TraversalJoin(t.Abs, t.Rel)
210}
211
212// RootName returns the root name for the absolute part of the split.
213func (t TraversalSplit) RootName() string {
214 return t.Abs.RootName()
215}
216
217// A Traverser is a step within a Traversal.
218type Traverser interface {
219 TraversalStep(cty.Value) (cty.Value, Diagnostics)
220 SourceRange() Range
221 isTraverserSigil() isTraverser
222}
223
224// Embed this in a struct to declare it as a Traverser
225type isTraverser struct {
226}
227
228func (tr isTraverser) isTraverserSigil() isTraverser {
229 return isTraverser{}
230}
231
232// TraverseRoot looks up a root name in a scope. It is used as the first step
233// of an absolute Traversal, and cannot itself be traversed directly.
234type TraverseRoot struct {
235 isTraverser
236 Name string
237 SrcRange Range
238}
239
240// TraversalStep on a TraverseName immediately panics, because absolute
241// traversals cannot be directly traversed.
242func (tn TraverseRoot) TraversalStep(cty.Value) (cty.Value, Diagnostics) {
243 panic("Cannot traverse an absolute traversal")
244}
245
246func (tn TraverseRoot) SourceRange() Range {
247 return tn.SrcRange
248}
249
250// TraverseAttr looks up an attribute in its initial value.
251type TraverseAttr struct {
252 isTraverser
253 Name string
254 SrcRange Range
255}
256
257func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) {
258 if val.IsNull() {
259 return cty.DynamicVal, Diagnostics{
260 {
261 Severity: DiagError,
262 Summary: "Attempt to get attribute from null value",
263 Detail: "This value is null, so it does not have any attributes.",
264 Subject: &tn.SrcRange,
265 },
266 }
267 }
268
269 ty := val.Type()
270 switch {
271 case ty.IsObjectType():
272 if !ty.HasAttribute(tn.Name) {
273 return cty.DynamicVal, Diagnostics{
274 {
275 Severity: DiagError,
276 Summary: "Unsupported attribute",
277 Detail: fmt.Sprintf("This object does not have an attribute named %q.", tn.Name),
278 Subject: &tn.SrcRange,
279 },
280 }
281 }
282
283 if !val.IsKnown() {
284 return cty.UnknownVal(ty.AttributeType(tn.Name)), nil
285 }
286
287 return val.GetAttr(tn.Name), nil
288 case ty.IsMapType():
289 if !val.IsKnown() {
290 return cty.UnknownVal(ty.ElementType()), nil
291 }
292
293 idx := cty.StringVal(tn.Name)
294 if val.HasIndex(idx).False() {
295 return cty.DynamicVal, Diagnostics{
296 {
297 Severity: DiagError,
298 Summary: "Missing map element",
299 Detail: fmt.Sprintf("This map does not have an element with the key %q.", tn.Name),
300 Subject: &tn.SrcRange,
301 },
302 }
303 }
304
305 return val.Index(idx), nil
306 case ty == cty.DynamicPseudoType:
307 return cty.DynamicVal, nil
308 default:
309 return cty.DynamicVal, Diagnostics{
310 {
311 Severity: DiagError,
312 Summary: "Unsupported attribute",
313 Detail: "This value does not have any attributes.",
314 Subject: &tn.SrcRange,
315 },
316 }
317 }
318}
319
320func (tn TraverseAttr) SourceRange() Range {
321 return tn.SrcRange
322}
323
324// TraverseIndex applies the index operation to its initial value.
325type TraverseIndex struct {
326 isTraverser
327 Key cty.Value
328 SrcRange Range
329}
330
331func (tn TraverseIndex) TraversalStep(val cty.Value) (cty.Value, Diagnostics) {
332 return Index(val, tn.Key, &tn.SrcRange)
333}
334
335func (tn TraverseIndex) SourceRange() Range {
336 return tn.SrcRange
337}
338
339// TraverseSplat applies the splat operation to its initial value.
340type TraverseSplat struct {
341 isTraverser
342 Each Traversal
343 SrcRange Range
344}
345
346func (tn TraverseSplat) TraversalStep(val cty.Value) (cty.Value, Diagnostics) {
347 panic("TraverseSplat not yet implemented")
348}
349
350func (tn TraverseSplat) SourceRange() Range {
351 return tn.SrcRange
352}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go
new file mode 100644
index 0000000..5f52946
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go
@@ -0,0 +1,121 @@
1package hcl
2
3// AbsTraversalForExpr attempts to interpret the given expression as
4// an absolute traversal, or returns error diagnostic(s) if that is
5// not possible for the given expression.
6//
7// A particular Expression implementation can support this function by
8// offering a method called AsTraversal that takes no arguments and
9// returns either a valid absolute traversal or nil to indicate that
10// no traversal is possible. Alternatively, an implementation can support
11// UnwrapExpression to delegate handling of this function to a wrapped
12// Expression object.
13//
14// In most cases the calling application is interested in the value
15// that results from an expression, but in rarer cases the application
16// needs to see the the name of the variable and subsequent
17// attributes/indexes itself, for example to allow users to give references
18// to the variables themselves rather than to their values. An implementer
19// of this function should at least support attribute and index steps.
20func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
21 type asTraversal interface {
22 AsTraversal() Traversal
23 }
24
25 physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
26 _, supported := expr.(asTraversal)
27 return supported
28 })
29
30 if asT, supported := physExpr.(asTraversal); supported {
31 if traversal := asT.AsTraversal(); traversal != nil {
32 return traversal, nil
33 }
34 }
35 return nil, Diagnostics{
36 &Diagnostic{
37 Severity: DiagError,
38 Summary: "Invalid expression",
39 Detail: "A static variable reference is required.",
40 Subject: expr.Range().Ptr(),
41 },
42 }
43}
44
45// RelTraversalForExpr is similar to AbsTraversalForExpr but it returns
46// a relative traversal instead. Due to the nature of HCL expressions, the
47// first element of the returned traversal is always a TraverseAttr, and
48// then it will be followed by zero or more other expressions.
49//
50// Any expression accepted by AbsTraversalForExpr is also accepted by
51// RelTraversalForExpr.
52func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
53 traversal, diags := AbsTraversalForExpr(expr)
54 if len(traversal) > 0 {
55 root := traversal[0].(TraverseRoot)
56 traversal[0] = TraverseAttr{
57 Name: root.Name,
58 SrcRange: root.SrcRange,
59 }
60 }
61 return traversal, diags
62}
63
64// ExprAsKeyword attempts to interpret the given expression as a static keyword,
65// returning the keyword string if possible, and the empty string if not.
66//
67// A static keyword, for the sake of this function, is a single identifier.
68// For example, the following attribute has an expression that would produce
69// the keyword "foo":
70//
71// example = foo
72//
73// This function is a variant of AbsTraversalForExpr, which uses the same
74// interface on the given expression. This helper constrains the result
75// further by requiring only a single root identifier.
76//
77// This function is intended to be used with the following idiom, to recognize
78// situations where one of a fixed set of keywords is required and arbitrary
79// expressions are not allowed:
80//
81// switch hcl.ExprAsKeyword(expr) {
82// case "allow":
83// // (take suitable action for keyword "allow")
84// case "deny":
85// // (take suitable action for keyword "deny")
86// default:
87// diags = append(diags, &hcl.Diagnostic{
88// // ... "invalid keyword" diagnostic message ...
89// })
90// }
91//
92// The above approach will generate the same message for both the use of an
93// unrecognized keyword and for not using a keyword at all, which is usually
94// reasonable if the message specifies that the given value must be a keyword
95// from that fixed list.
96//
97// Note that in the native syntax the keywords "true", "false", and "null" are
98// recognized as literal values during parsing and so these reserved words
99// cannot not be accepted as keywords by this function.
100//
101// Since interpreting an expression as a keyword bypasses usual expression
102// evaluation, it should be used sparingly for situations where e.g. one of
103// a fixed set of keywords is used in a structural way in a special attribute
104// to affect the further processing of a block.
105func ExprAsKeyword(expr Expression) string {
106 type asTraversal interface {
107 AsTraversal() Traversal
108 }
109
110 physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
111 _, supported := expr.(asTraversal)
112 return supported
113 })
114
115 if asT, supported := physExpr.(asTraversal); supported {
116 if traversal := asT.AsTraversal(); len(traversal) == 1 {
117 return traversal.RootName()
118 }
119 }
120 return ""
121}
diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go b/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go
new file mode 100644
index 0000000..7e652e9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go
@@ -0,0 +1,21 @@
1package hcldec
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7type blockLabel struct {
8 Value string
9 Range hcl.Range
10}
11
12func labelsForBlock(block *hcl.Block) []blockLabel {
13 ret := make([]blockLabel, len(block.Labels))
14 for i := range block.Labels {
15 ret[i] = blockLabel{
16 Value: block.Labels[i],
17 Range: block.LabelRanges[i],
18 }
19 }
20 return ret
21}
diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/decode.go b/vendor/github.com/hashicorp/hcl2/hcldec/decode.go
new file mode 100644
index 0000000..6cf93fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcldec/decode.go
@@ -0,0 +1,36 @@
1package hcldec
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/zclconf/go-cty/cty"
6)
7
8func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) {
9 schema := ImpliedSchema(spec)
10
11 var content *hcl.BodyContent
12 var diags hcl.Diagnostics
13 var leftovers hcl.Body
14
15 if partial {
16 content, leftovers, diags = body.PartialContent(schema)
17 } else {
18 content, diags = body.Content(schema)
19 }
20
21 val, valDiags := spec.decode(content, blockLabels, ctx)
22 diags = append(diags, valDiags...)
23
24 return val, leftovers, diags
25}
26
27func impliedType(spec Spec) cty.Type {
28 return spec.impliedType()
29}
30
31func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range {
32 schema := ImpliedSchema(spec)
33 content, _, _ := body.PartialContent(schema)
34
35 return spec.sourceRange(content, blockLabels)
36}
diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/doc.go b/vendor/github.com/hashicorp/hcl2/hcldec/doc.go
new file mode 100644
index 0000000..23bfe54
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcldec/doc.go
@@ -0,0 +1,12 @@
1// Package hcldec provides a higher-level API for unpacking the content of
2// HCL bodies, implemented in terms of the low-level "Content" API exposed
3// by the bodies themselves.
4//
5// It allows decoding an entire nested configuration in a single operation
6// by providing a description of the intended structure.
7//
8// For some applications it may be more convenient to use the "gohcl"
9// package, which has a similar purpose but decodes directly into native
10// Go data types. hcldec instead targets the cty type system, and thus allows
11// a cty-driven application to remain within that type system.
12package hcldec
diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/gob.go b/vendor/github.com/hashicorp/hcl2/hcldec/gob.go
new file mode 100644
index 0000000..e2027cf
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcldec/gob.go
@@ -0,0 +1,23 @@
1package hcldec
2
3import (
4 "encoding/gob"
5)
6
7func init() {
8 // Every Spec implementation should be registered with gob, so that
9 // specs can be sent over gob channels, such as using
10 // github.com/hashicorp/go-plugin with plugins that need to describe
11 // what shape of configuration they are expecting.
12 gob.Register(ObjectSpec(nil))
13 gob.Register(TupleSpec(nil))
14 gob.Register((*AttrSpec)(nil))
15 gob.Register((*LiteralSpec)(nil))
16 gob.Register((*ExprSpec)(nil))
17 gob.Register((*BlockSpec)(nil))
18 gob.Register((*BlockListSpec)(nil))
19 gob.Register((*BlockSetSpec)(nil))
20 gob.Register((*BlockMapSpec)(nil))
21 gob.Register((*BlockLabelSpec)(nil))
22 gob.Register((*DefaultSpec)(nil))
23}
diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/public.go b/vendor/github.com/hashicorp/hcl2/hcldec/public.go
new file mode 100644
index 0000000..5d1f10a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcldec/public.go
@@ -0,0 +1,78 @@
1package hcldec
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// Decode interprets the given body using the given specification and returns
9// the resulting value. If the given body is not valid per the spec, error
10// diagnostics are returned and the returned value is likely to be incomplete.
11//
12// The ctx argument may be nil, in which case any references to variables or
13// functions will produce error diagnostics.
14func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
15 val, _, diags := decode(body, nil, ctx, spec, false)
16 return val, diags
17}
18
19// PartialDecode is like Decode except that it permits "leftover" items in
20// the top-level body, which are returned as a new body to allow for
21// further processing.
22//
23// Any descendent block bodies are _not_ decoded partially and thus must
24// be fully described by the given specification.
25func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) {
26 return decode(body, nil, ctx, spec, true)
27}
28
29// ImpliedType returns the value type that should result from decoding the
30// given spec.
31func ImpliedType(spec Spec) cty.Type {
32 return impliedType(spec)
33}
34
35// SourceRange interprets the given body using the given specification and
36// then returns the source range of the value that would be used to
37// fulfill the spec.
38//
39// This can be used if application-level validation detects value errors, to
40// obtain a reasonable SourceRange to use for generated diagnostics. It works
41// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...)
42// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will
43// be less useful the broader the specification, so e.g. a spec that returns
44// the entirety of all of the blocks of a given type is likely to be
45// _particularly_ arbitrary and useless.
46//
47// If the given body is not valid per the given spec, the result is best-effort
48// and may not actually be something ideal. It's expected that an application
49// will already have used Decode or PartialDecode earlier and thus had an
50// opportunity to detect and report spec violations.
51func SourceRange(body hcl.Body, spec Spec) hcl.Range {
52 return sourceRange(body, nil, spec)
53}
54
55// ChildBlockTypes returns a map of all of the child block types declared
56// by the given spec, with block type names as keys and the associated
57// nested body specs as values.
58func ChildBlockTypes(spec Spec) map[string]Spec {
59 ret := map[string]Spec{}
60
61 // visitSameBodyChildren walks through the spec structure, calling
62 // the given callback for each descendent spec encountered. We are
63 // interested in the specs that reference attributes and blocks.
64 var visit visitFunc
65 visit = func(s Spec) {
66 if bs, ok := s.(blockSpec); ok {
67 for _, blockS := range bs.blockHeaderSchemata() {
68 ret[blockS.Type] = bs.nestedSpec()
69 }
70 }
71
72 s.visitSameBodyChildren(visit)
73 }
74
75 visit(spec)
76
77 return ret
78}
diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/schema.go b/vendor/github.com/hashicorp/hcl2/hcldec/schema.go
new file mode 100644
index 0000000..b57bd96
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcldec/schema.go
@@ -0,0 +1,36 @@
1package hcldec
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// ImpliedSchema returns the *hcl.BodySchema implied by the given specification.
8// This is the schema that the Decode function will use internally to
9// access the content of a given body.
10func ImpliedSchema(spec Spec) *hcl.BodySchema {
11 var attrs []hcl.AttributeSchema
12 var blocks []hcl.BlockHeaderSchema
13
14 // visitSameBodyChildren walks through the spec structure, calling
15 // the given callback for each descendent spec encountered. We are
16 // interested in the specs that reference attributes and blocks.
17 var visit visitFunc
18 visit = func(s Spec) {
19 if as, ok := s.(attrSpec); ok {
20 attrs = append(attrs, as.attrSchemata()...)
21 }
22
23 if bs, ok := s.(blockSpec); ok {
24 blocks = append(blocks, bs.blockHeaderSchemata()...)
25 }
26
27 s.visitSameBodyChildren(visit)
28 }
29
30 visit(spec)
31
32 return &hcl.BodySchema{
33 Attributes: attrs,
34 Blocks: blocks,
35 }
36}
diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go
new file mode 100644
index 0000000..25cafcd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go
@@ -0,0 +1,998 @@
1package hcldec
2
3import (
4 "bytes"
5 "fmt"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/zclconf/go-cty/cty"
9 "github.com/zclconf/go-cty/cty/convert"
10 "github.com/zclconf/go-cty/cty/function"
11)
12
13// A Spec is a description of how to decode a hcl.Body to a cty.Value.
14//
15// The various other types in this package whose names end in "Spec" are
16// the spec implementations. The most common top-level spec is ObjectSpec,
17// which decodes body content into a cty.Value of an object type.
18type Spec interface {
19 // Perform the decode operation on the given body, in the context of
20 // the given block (which might be null), using the given eval context.
21 //
22 // "block" is provided only by the nested calls performed by the spec
23 // types that work on block bodies.
24 decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics)
25
26 // Return the cty.Type that should be returned when decoding a body with
27 // this spec.
28 impliedType() cty.Type
29
30 // Call the given callback once for each of the nested specs that would
31 // get decoded with the same body and block as the receiver. This should
32 // not descend into the nested specs used when decoding blocks.
33 visitSameBodyChildren(cb visitFunc)
34
35 // Determine the source range of the value that would be returned for the
36 // spec in the given content, in the context of the given block
37 // (which might be null). If the corresponding item is missing, return
38 // a place where it might be inserted.
39 sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range
40}
41
42type visitFunc func(spec Spec)
43
44// An ObjectSpec is a Spec that produces a cty.Value of an object type whose
45// attributes correspond to the keys of the spec map.
46type ObjectSpec map[string]Spec
47
48// attrSpec is implemented by specs that require attributes from the body.
49type attrSpec interface {
50 attrSchemata() []hcl.AttributeSchema
51}
52
53// blockSpec is implemented by specs that require blocks from the body.
54type blockSpec interface {
55 blockHeaderSchemata() []hcl.BlockHeaderSchema
56 nestedSpec() Spec
57}
58
59// specNeedingVariables is implemented by specs that can use variables
60// from the EvalContext, to declare which variables they need.
61type specNeedingVariables interface {
62 variablesNeeded(content *hcl.BodyContent) []hcl.Traversal
63}
64
65func (s ObjectSpec) visitSameBodyChildren(cb visitFunc) {
66 for _, c := range s {
67 cb(c)
68 }
69}
70
71func (s ObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
72 vals := make(map[string]cty.Value, len(s))
73 var diags hcl.Diagnostics
74
75 for k, spec := range s {
76 var kd hcl.Diagnostics
77 vals[k], kd = spec.decode(content, blockLabels, ctx)
78 diags = append(diags, kd...)
79 }
80
81 return cty.ObjectVal(vals), diags
82}
83
84func (s ObjectSpec) impliedType() cty.Type {
85 if len(s) == 0 {
86 return cty.EmptyObject
87 }
88
89 attrTypes := make(map[string]cty.Type)
90 for k, childSpec := range s {
91 attrTypes[k] = childSpec.impliedType()
92 }
93 return cty.Object(attrTypes)
94}
95
96func (s ObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
97 // This is not great, but the best we can do. In practice, it's rather
98 // strange to ask for the source range of an entire top-level body, since
99 // that's already readily available to the caller.
100 return content.MissingItemRange
101}
102
103// A TupleSpec is a Spec that produces a cty.Value of a tuple type whose
104// elements correspond to the elements of the spec slice.
105type TupleSpec []Spec
106
107func (s TupleSpec) visitSameBodyChildren(cb visitFunc) {
108 for _, c := range s {
109 cb(c)
110 }
111}
112
113func (s TupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
114 vals := make([]cty.Value, len(s))
115 var diags hcl.Diagnostics
116
117 for i, spec := range s {
118 var ed hcl.Diagnostics
119 vals[i], ed = spec.decode(content, blockLabels, ctx)
120 diags = append(diags, ed...)
121 }
122
123 return cty.TupleVal(vals), diags
124}
125
126func (s TupleSpec) impliedType() cty.Type {
127 if len(s) == 0 {
128 return cty.EmptyTuple
129 }
130
131 attrTypes := make([]cty.Type, len(s))
132 for i, childSpec := range s {
133 attrTypes[i] = childSpec.impliedType()
134 }
135 return cty.Tuple(attrTypes)
136}
137
138func (s TupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
139 // This is not great, but the best we can do. In practice, it's rather
140 // strange to ask for the source range of an entire top-level body, since
141 // that's already readily available to the caller.
142 return content.MissingItemRange
143}
144
145// An AttrSpec is a Spec that evaluates a particular attribute expression in
146// the body and returns its resulting value converted to the requested type,
147// or produces a diagnostic if the type is incorrect.
148type AttrSpec struct {
149 Name string
150 Type cty.Type
151 Required bool
152}
153
154func (s *AttrSpec) visitSameBodyChildren(cb visitFunc) {
155 // leaf node
156}
157
158// specNeedingVariables implementation
159func (s *AttrSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal {
160 attr, exists := content.Attributes[s.Name]
161 if !exists {
162 return nil
163 }
164
165 return attr.Expr.Variables()
166}
167
168// attrSpec implementation
169func (s *AttrSpec) attrSchemata() []hcl.AttributeSchema {
170 return []hcl.AttributeSchema{
171 {
172 Name: s.Name,
173 Required: s.Required,
174 },
175 }
176}
177
178func (s *AttrSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
179 attr, exists := content.Attributes[s.Name]
180 if !exists {
181 return content.MissingItemRange
182 }
183
184 return attr.Expr.Range()
185}
186
187func (s *AttrSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
188 attr, exists := content.Attributes[s.Name]
189 if !exists {
190 // We don't need to check required and emit a diagnostic here, because
191 // that would already have happened when building "content".
192 return cty.NullVal(s.Type), nil
193 }
194
195 val, diags := attr.Expr.Value(ctx)
196
197 convVal, err := convert.Convert(val, s.Type)
198 if err != nil {
199 diags = append(diags, &hcl.Diagnostic{
200 Severity: hcl.DiagError,
201 Summary: "Incorrect attribute value type",
202 Detail: fmt.Sprintf(
203 "Inappropriate value for attribute %q: %s.",
204 s.Name, err.Error(),
205 ),
206 Subject: attr.Expr.StartRange().Ptr(),
207 Context: hcl.RangeBetween(attr.NameRange, attr.Expr.StartRange()).Ptr(),
208 })
209 // We'll return an unknown value of the _correct_ type so that the
210 // incomplete result can still be used for some analysis use-cases.
211 val = cty.UnknownVal(s.Type)
212 } else {
213 val = convVal
214 }
215
216 return val, diags
217}
218
219func (s *AttrSpec) impliedType() cty.Type {
220 return s.Type
221}
222
223// A LiteralSpec is a Spec that produces the given literal value, ignoring
224// the given body.
225type LiteralSpec struct {
226 Value cty.Value
227}
228
229func (s *LiteralSpec) visitSameBodyChildren(cb visitFunc) {
230 // leaf node
231}
232
233func (s *LiteralSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
234 return s.Value, nil
235}
236
237func (s *LiteralSpec) impliedType() cty.Type {
238 return s.Value.Type()
239}
240
241func (s *LiteralSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
242 // No sensible range to return for a literal, so the caller had better
243 // ensure it doesn't cause any diagnostics.
244 return hcl.Range{
245 Filename: "<unknown>",
246 }
247}
248
249// An ExprSpec is a Spec that evaluates the given expression, ignoring the
250// given body.
251type ExprSpec struct {
252 Expr hcl.Expression
253}
254
255func (s *ExprSpec) visitSameBodyChildren(cb visitFunc) {
256 // leaf node
257}
258
259// specNeedingVariables implementation
260func (s *ExprSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal {
261 return s.Expr.Variables()
262}
263
264func (s *ExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
265 return s.Expr.Value(ctx)
266}
267
268func (s *ExprSpec) impliedType() cty.Type {
269 // We can't know the type of our expression until we evaluate it
270 return cty.DynamicPseudoType
271}
272
273func (s *ExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
274 return s.Expr.Range()
275}
276
277// A BlockSpec is a Spec that produces a cty.Value by decoding the contents
278// of a single nested block of a given type, using a nested spec.
279//
280// If the Required flag is not set, the nested block may be omitted, in which
281// case a null value is produced. If it _is_ set, an error diagnostic is
282// produced if there are no nested blocks of the given type.
283type BlockSpec struct {
284 TypeName string
285 Nested Spec
286 Required bool
287}
288
289func (s *BlockSpec) visitSameBodyChildren(cb visitFunc) {
290 // leaf node ("Nested" does not use the same body)
291}
292
293// blockSpec implementation
294func (s *BlockSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema {
295 return []hcl.BlockHeaderSchema{
296 {
297 Type: s.TypeName,
298 LabelNames: findLabelSpecs(s.Nested),
299 },
300 }
301}
302
303// blockSpec implementation
304func (s *BlockSpec) nestedSpec() Spec {
305 return s.Nested
306}
307
308// specNeedingVariables implementation
309func (s *BlockSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal {
310 var childBlock *hcl.Block
311 for _, candidate := range content.Blocks {
312 if candidate.Type != s.TypeName {
313 continue
314 }
315
316 childBlock = candidate
317 break
318 }
319
320 if childBlock == nil {
321 return nil
322 }
323
324 return Variables(childBlock.Body, s.Nested)
325}
326
327func (s *BlockSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
328 var diags hcl.Diagnostics
329
330 var childBlock *hcl.Block
331 for _, candidate := range content.Blocks {
332 if candidate.Type != s.TypeName {
333 continue
334 }
335
336 if childBlock != nil {
337 diags = append(diags, &hcl.Diagnostic{
338 Severity: hcl.DiagError,
339 Summary: fmt.Sprintf("Duplicate %s block", s.TypeName),
340 Detail: fmt.Sprintf(
341 "Only one block of type %q is allowed. Previous definition was at %s.",
342 s.TypeName, childBlock.DefRange.String(),
343 ),
344 Subject: &candidate.DefRange,
345 })
346 break
347 }
348
349 childBlock = candidate
350 }
351
352 if childBlock == nil {
353 if s.Required {
354 diags = append(diags, &hcl.Diagnostic{
355 Severity: hcl.DiagError,
356 Summary: fmt.Sprintf("Missing %s block", s.TypeName),
357 Detail: fmt.Sprintf(
358 "A block of type %q is required here.", s.TypeName,
359 ),
360 Subject: &content.MissingItemRange,
361 })
362 }
363 return cty.NullVal(s.Nested.impliedType()), diags
364 }
365
366 if s.Nested == nil {
367 panic("BlockSpec with no Nested Spec")
368 }
369 val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false)
370 diags = append(diags, childDiags...)
371 return val, diags
372}
373
374func (s *BlockSpec) impliedType() cty.Type {
375 return s.Nested.impliedType()
376}
377
378func (s *BlockSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
379 var childBlock *hcl.Block
380 for _, candidate := range content.Blocks {
381 if candidate.Type != s.TypeName {
382 continue
383 }
384
385 childBlock = candidate
386 break
387 }
388
389 if childBlock == nil {
390 return content.MissingItemRange
391 }
392
393 return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
394}
395
396// A BlockListSpec is a Spec that produces a cty list of the results of
397// decoding all of the nested blocks of a given type, using a nested spec.
398type BlockListSpec struct {
399 TypeName string
400 Nested Spec
401 MinItems int
402 MaxItems int
403}
404
405func (s *BlockListSpec) visitSameBodyChildren(cb visitFunc) {
406 // leaf node ("Nested" does not use the same body)
407}
408
409// blockSpec implementation
410func (s *BlockListSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema {
411 return []hcl.BlockHeaderSchema{
412 {
413 Type: s.TypeName,
414 LabelNames: findLabelSpecs(s.Nested),
415 },
416 }
417}
418
419// blockSpec implementation
420func (s *BlockListSpec) nestedSpec() Spec {
421 return s.Nested
422}
423
424// specNeedingVariables implementation
425func (s *BlockListSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal {
426 var ret []hcl.Traversal
427
428 for _, childBlock := range content.Blocks {
429 if childBlock.Type != s.TypeName {
430 continue
431 }
432
433 ret = append(ret, Variables(childBlock.Body, s.Nested)...)
434 }
435
436 return ret
437}
438
439func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
440 var diags hcl.Diagnostics
441
442 if s.Nested == nil {
443 panic("BlockListSpec with no Nested Spec")
444 }
445
446 var elems []cty.Value
447 var sourceRanges []hcl.Range
448 for _, childBlock := range content.Blocks {
449 if childBlock.Type != s.TypeName {
450 continue
451 }
452
453 val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false)
454 diags = append(diags, childDiags...)
455 elems = append(elems, val)
456 sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested))
457 }
458
459 if len(elems) < s.MinItems {
460 diags = append(diags, &hcl.Diagnostic{
461 Severity: hcl.DiagError,
462 Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName),
463 Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName),
464 Subject: &content.MissingItemRange,
465 })
466 } else if s.MaxItems > 0 && len(elems) > s.MaxItems {
467 diags = append(diags, &hcl.Diagnostic{
468 Severity: hcl.DiagError,
469 Summary: fmt.Sprintf("Too many %s blocks", s.TypeName),
470 Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName),
471 Subject: &sourceRanges[s.MaxItems],
472 })
473 }
474
475 var ret cty.Value
476
477 if len(elems) == 0 {
478 ret = cty.ListValEmpty(s.Nested.impliedType())
479 } else {
480 ret = cty.ListVal(elems)
481 }
482
483 return ret, diags
484}
485
486func (s *BlockListSpec) impliedType() cty.Type {
487 return cty.List(s.Nested.impliedType())
488}
489
490func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
491 // We return the source range of the _first_ block of the given type,
492 // since they are not guaranteed to form a contiguous range.
493
494 var childBlock *hcl.Block
495 for _, candidate := range content.Blocks {
496 if candidate.Type != s.TypeName {
497 continue
498 }
499
500 childBlock = candidate
501 break
502 }
503
504 if childBlock == nil {
505 return content.MissingItemRange
506 }
507
508 return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
509}
510
511// A BlockSetSpec is a Spec that produces a cty set of the results of
512// decoding all of the nested blocks of a given type, using a nested spec.
513type BlockSetSpec struct {
514 TypeName string
515 Nested Spec
516 MinItems int
517 MaxItems int
518}
519
520func (s *BlockSetSpec) visitSameBodyChildren(cb visitFunc) {
521 // leaf node ("Nested" does not use the same body)
522}
523
524// blockSpec implementation
525func (s *BlockSetSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema {
526 return []hcl.BlockHeaderSchema{
527 {
528 Type: s.TypeName,
529 LabelNames: findLabelSpecs(s.Nested),
530 },
531 }
532}
533
534// blockSpec implementation
535func (s *BlockSetSpec) nestedSpec() Spec {
536 return s.Nested
537}
538
539// specNeedingVariables implementation
540func (s *BlockSetSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal {
541 var ret []hcl.Traversal
542
543 for _, childBlock := range content.Blocks {
544 if childBlock.Type != s.TypeName {
545 continue
546 }
547
548 ret = append(ret, Variables(childBlock.Body, s.Nested)...)
549 }
550
551 return ret
552}
553
554func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
555 var diags hcl.Diagnostics
556
557 if s.Nested == nil {
558 panic("BlockSetSpec with no Nested Spec")
559 }
560
561 var elems []cty.Value
562 var sourceRanges []hcl.Range
563 for _, childBlock := range content.Blocks {
564 if childBlock.Type != s.TypeName {
565 continue
566 }
567
568 val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false)
569 diags = append(diags, childDiags...)
570 elems = append(elems, val)
571 sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested))
572 }
573
574 if len(elems) < s.MinItems {
575 diags = append(diags, &hcl.Diagnostic{
576 Severity: hcl.DiagError,
577 Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName),
578 Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName),
579 Subject: &content.MissingItemRange,
580 })
581 } else if s.MaxItems > 0 && len(elems) > s.MaxItems {
582 diags = append(diags, &hcl.Diagnostic{
583 Severity: hcl.DiagError,
584 Summary: fmt.Sprintf("Too many %s blocks", s.TypeName),
585 Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName),
586 Subject: &sourceRanges[s.MaxItems],
587 })
588 }
589
590 var ret cty.Value
591
592 if len(elems) == 0 {
593 ret = cty.SetValEmpty(s.Nested.impliedType())
594 } else {
595 ret = cty.SetVal(elems)
596 }
597
598 return ret, diags
599}
600
601func (s *BlockSetSpec) impliedType() cty.Type {
602 return cty.Set(s.Nested.impliedType())
603}
604
605func (s *BlockSetSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
606 // We return the source range of the _first_ block of the given type,
607 // since they are not guaranteed to form a contiguous range.
608
609 var childBlock *hcl.Block
610 for _, candidate := range content.Blocks {
611 if candidate.Type != s.TypeName {
612 continue
613 }
614
615 childBlock = candidate
616 break
617 }
618
619 if childBlock == nil {
620 return content.MissingItemRange
621 }
622
623 return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
624}
625
626// A BlockMapSpec is a Spec that produces a cty map of the results of
627// decoding all of the nested blocks of a given type, using a nested spec.
628//
629// One level of map structure is created for each of the given label names.
630// There must be at least one given label name.
631type BlockMapSpec struct {
632 TypeName string
633 LabelNames []string
634 Nested Spec
635}
636
637func (s *BlockMapSpec) visitSameBodyChildren(cb visitFunc) {
638 // leaf node ("Nested" does not use the same body)
639}
640
641// blockSpec implementation
642func (s *BlockMapSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema {
643 return []hcl.BlockHeaderSchema{
644 {
645 Type: s.TypeName,
646 LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...),
647 },
648 }
649}
650
651// blockSpec implementation
652func (s *BlockMapSpec) nestedSpec() Spec {
653 return s.Nested
654}
655
656// specNeedingVariables implementation
657func (s *BlockMapSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal {
658 var ret []hcl.Traversal
659
660 for _, childBlock := range content.Blocks {
661 if childBlock.Type != s.TypeName {
662 continue
663 }
664
665 ret = append(ret, Variables(childBlock.Body, s.Nested)...)
666 }
667
668 return ret
669}
670
671func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
672 var diags hcl.Diagnostics
673
674 if s.Nested == nil {
675 panic("BlockSetSpec with no Nested Spec")
676 }
677
678 elems := map[string]interface{}{}
679 for _, childBlock := range content.Blocks {
680 if childBlock.Type != s.TypeName {
681 continue
682 }
683
684 childLabels := labelsForBlock(childBlock)
685 val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false)
686 targetMap := elems
687 for _, key := range childBlock.Labels[:len(s.LabelNames)-1] {
688 if _, exists := targetMap[key]; !exists {
689 targetMap[key] = make(map[string]interface{})
690 }
691 targetMap = targetMap[key].(map[string]interface{})
692 }
693
694 diags = append(diags, childDiags...)
695
696 key := childBlock.Labels[len(s.LabelNames)-1]
697 if _, exists := targetMap[key]; exists {
698 labelsBuf := bytes.Buffer{}
699 for _, label := range childBlock.Labels {
700 fmt.Fprintf(&labelsBuf, " %q", label)
701 }
702 diags = append(diags, &hcl.Diagnostic{
703 Severity: hcl.DiagError,
704 Summary: fmt.Sprintf("Duplicate %s block", s.TypeName),
705 Detail: fmt.Sprintf(
706 "A block for %s%s was already defined. The %s labels must be unique.",
707 s.TypeName, labelsBuf.String(), s.TypeName,
708 ),
709 Subject: &childBlock.DefRange,
710 })
711 continue
712 }
713
714 targetMap[key] = val
715 }
716
717 if len(elems) == 0 {
718 return cty.MapValEmpty(s.Nested.impliedType()), diags
719 }
720
721 var ctyMap func(map[string]interface{}, int) cty.Value
722 ctyMap = func(raw map[string]interface{}, depth int) cty.Value {
723 vals := make(map[string]cty.Value, len(raw))
724 if depth == 1 {
725 for k, v := range raw {
726 vals[k] = v.(cty.Value)
727 }
728 } else {
729 for k, v := range raw {
730 vals[k] = ctyMap(v.(map[string]interface{}), depth-1)
731 }
732 }
733 return cty.MapVal(vals)
734 }
735
736 return ctyMap(elems, len(s.LabelNames)), diags
737}
738
739func (s *BlockMapSpec) impliedType() cty.Type {
740 ret := s.Nested.impliedType()
741 for _ = range s.LabelNames {
742 ret = cty.Map(ret)
743 }
744 return ret
745}
746
747func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
748 // We return the source range of the _first_ block of the given type,
749 // since they are not guaranteed to form a contiguous range.
750
751 var childBlock *hcl.Block
752 for _, candidate := range content.Blocks {
753 if candidate.Type != s.TypeName {
754 continue
755 }
756
757 childBlock = candidate
758 break
759 }
760
761 if childBlock == nil {
762 return content.MissingItemRange
763 }
764
765 return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
766}
767
768// A BlockLabelSpec is a Spec that returns a cty.String representing the
769// label of the block its given body belongs to, if indeed its given body
770// belongs to a block. It is a programming error to use this in a non-block
771// context, so this spec will panic in that case.
772//
773// This spec only works in the nested spec within a BlockSpec, BlockListSpec,
774// BlockSetSpec or BlockMapSpec.
775//
776// The full set of label specs used against a particular block must have a
777// consecutive set of indices starting at zero. The maximum index found
778// defines how many labels the corresponding blocks must have in cty source.
779type BlockLabelSpec struct {
780 Index int
781 Name string
782}
783
784func (s *BlockLabelSpec) visitSameBodyChildren(cb visitFunc) {
785 // leaf node
786}
787
788func (s *BlockLabelSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
789 if s.Index >= len(blockLabels) {
790 panic("BlockListSpec used in non-block context")
791 }
792
793 return cty.StringVal(blockLabels[s.Index].Value), nil
794}
795
796func (s *BlockLabelSpec) impliedType() cty.Type {
797 return cty.String // labels are always strings
798}
799
800func (s *BlockLabelSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
801 if s.Index >= len(blockLabels) {
802 panic("BlockListSpec used in non-block context")
803 }
804
805 return blockLabels[s.Index].Range
806}
807
808func findLabelSpecs(spec Spec) []string {
809 maxIdx := -1
810 var names map[int]string
811
812 var visit visitFunc
813 visit = func(s Spec) {
814 if ls, ok := s.(*BlockLabelSpec); ok {
815 if maxIdx < ls.Index {
816 maxIdx = ls.Index
817 }
818 if names == nil {
819 names = make(map[int]string)
820 }
821 names[ls.Index] = ls.Name
822 }
823 s.visitSameBodyChildren(visit)
824 }
825
826 visit(spec)
827
828 if maxIdx < 0 {
829 return nil // no labels at all
830 }
831
832 ret := make([]string, maxIdx+1)
833 for i := range ret {
834 name := names[i]
835 if name == "" {
836 // Should never happen if the spec is conformant, since we require
837 // consecutive indices starting at zero.
838 name = fmt.Sprintf("missing%02d", i)
839 }
840 ret[i] = name
841 }
842
843 return ret
844}
845
846// DefaultSpec is a spec that wraps two specs, evaluating the primary first
847// and then evaluating the default if the primary returns a null value.
848//
849// The two specifications must have the same implied result type for correct
850// operation. If not, the result is undefined.
851type DefaultSpec struct {
852 Primary Spec
853 Default Spec
854}
855
856func (s *DefaultSpec) visitSameBodyChildren(cb visitFunc) {
857 cb(s.Primary)
858 cb(s.Default)
859}
860
861func (s *DefaultSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
862 val, diags := s.Primary.decode(content, blockLabels, ctx)
863 if val.IsNull() {
864 var moreDiags hcl.Diagnostics
865 val, moreDiags = s.Default.decode(content, blockLabels, ctx)
866 diags = append(diags, moreDiags...)
867 }
868 return val, diags
869}
870
871func (s *DefaultSpec) impliedType() cty.Type {
872 return s.Primary.impliedType()
873}
874
875func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
876 // We can't tell from here which of the two specs will ultimately be used
877 // in our result, so we'll just assume the first. This is usually the right
878 // choice because the default is often a literal spec that doesn't have a
879 // reasonable source range to return anyway.
880 return s.Primary.sourceRange(content, blockLabels)
881}
882
883// TransformExprSpec is a spec that wraps another and then evaluates a given
884// hcl.Expression on the result.
885//
886// The implied type of this spec is determined by evaluating the expression
887// with an unknown value of the nested spec's implied type, which may cause
888// the result to be imprecise. This spec should not be used in situations where
889// precise result type information is needed.
890type TransformExprSpec struct {
891 Wrapped Spec
892 Expr hcl.Expression
893 TransformCtx *hcl.EvalContext
894 VarName string
895}
896
897func (s *TransformExprSpec) visitSameBodyChildren(cb visitFunc) {
898 cb(s.Wrapped)
899}
900
901func (s *TransformExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
902 wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx)
903 if diags.HasErrors() {
904 // We won't try to run our function in this case, because it'll probably
905 // generate confusing additional errors that will distract from the
906 // root cause.
907 return cty.UnknownVal(s.impliedType()), diags
908 }
909
910 chiCtx := s.TransformCtx.NewChild()
911 chiCtx.Variables = map[string]cty.Value{
912 s.VarName: wrappedVal,
913 }
914 resultVal, resultDiags := s.Expr.Value(chiCtx)
915 diags = append(diags, resultDiags...)
916 return resultVal, diags
917}
918
919func (s *TransformExprSpec) impliedType() cty.Type {
920 wrappedTy := s.Wrapped.impliedType()
921 chiCtx := s.TransformCtx.NewChild()
922 chiCtx.Variables = map[string]cty.Value{
923 s.VarName: cty.UnknownVal(wrappedTy),
924 }
925 resultVal, _ := s.Expr.Value(chiCtx)
926 return resultVal.Type()
927}
928
929func (s *TransformExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
930 // We'll just pass through our wrapped range here, even though that's
931 // not super-accurate, because there's nothing better to return.
932 return s.Wrapped.sourceRange(content, blockLabels)
933}
934
935// TransformFuncSpec is a spec that wraps another and then evaluates a given
936// cty function with the result. The given function must expect exactly one
937// argument, where the result of the wrapped spec will be passed.
938//
939// The implied type of this spec is determined by type-checking the function
940// with an unknown value of the nested spec's implied type, which may cause
941// the result to be imprecise. This spec should not be used in situations where
942// precise result type information is needed.
943//
944// If the given function produces an error when run, this spec will produce
945// a non-user-actionable diagnostic message. It's the caller's responsibility
946// to ensure that the given function cannot fail for any non-error result
947// of the wrapped spec.
948type TransformFuncSpec struct {
949 Wrapped Spec
950 Func function.Function
951}
952
953func (s *TransformFuncSpec) visitSameBodyChildren(cb visitFunc) {
954 cb(s.Wrapped)
955}
956
957func (s *TransformFuncSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
958 wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx)
959 if diags.HasErrors() {
960 // We won't try to run our function in this case, because it'll probably
961 // generate confusing additional errors that will distract from the
962 // root cause.
963 return cty.UnknownVal(s.impliedType()), diags
964 }
965
966 resultVal, err := s.Func.Call([]cty.Value{wrappedVal})
967 if err != nil {
968 // This is not a good example of a diagnostic because it is reporting
969 // a programming error in the calling application, rather than something
970 // an end-user could act on.
971 diags = append(diags, &hcl.Diagnostic{
972 Severity: hcl.DiagError,
973 Summary: "Transform function failed",
974 Detail: fmt.Sprintf("Decoder transform returned an error: %s", err),
975 Subject: s.sourceRange(content, blockLabels).Ptr(),
976 })
977 return cty.UnknownVal(s.impliedType()), diags
978 }
979
980 return resultVal, diags
981}
982
983func (s *TransformFuncSpec) impliedType() cty.Type {
984 wrappedTy := s.Wrapped.impliedType()
985 resultTy, err := s.Func.ReturnType([]cty.Type{wrappedTy})
986 if err != nil {
987 // Should never happen with a correctly-configured spec
988 return cty.DynamicPseudoType
989 }
990
991 return resultTy
992}
993
994func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
995 // We'll just pass through our wrapped range here, even though that's
996 // not super-accurate, because there's nothing better to return.
997 return s.Wrapped.sourceRange(content, blockLabels)
998}
diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go
new file mode 100644
index 0000000..427b0d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go
@@ -0,0 +1,34 @@
1package hcldec
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// Variables processes the given body with the given spec and returns a
8// list of the variable traversals that would be required to decode
9// the same pairing of body and spec.
10//
11// This can be used to conditionally populate the variables in the EvalContext
12// passed to Decode, for applications where a static scope is insufficient.
13//
14// If the given body is not compliant with the given schema, the result may
15// be incomplete, but that's assumed to be okay because the eventual call
16// to Decode will produce error diagnostics anyway.
17func Variables(body hcl.Body, spec Spec) []hcl.Traversal {
18 schema := ImpliedSchema(spec)
19
20 content, _, _ := body.PartialContent(schema)
21
22 var vars []hcl.Traversal
23
24 if vs, ok := spec.(specNeedingVariables); ok {
25 vars = append(vars, vs.variablesNeeded(content)...)
26 }
27 spec.visitSameBodyChildren(func(s Spec) {
28 if vs, ok := s.(specNeedingVariables); ok {
29 vars = append(vars, vs.variablesNeeded(content)...)
30 }
31 })
32
33 return vars
34}
diff --git a/vendor/github.com/hashicorp/hcl2/hclparse/parser.go b/vendor/github.com/hashicorp/hcl2/hclparse/parser.go
new file mode 100644
index 0000000..6d47f12
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclparse/parser.go
@@ -0,0 +1,123 @@
1package hclparse
2
3import (
4 "fmt"
5 "io/ioutil"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9 "github.com/hashicorp/hcl2/hcl/json"
10)
11
12// NOTE: This is the public interface for parsing. The actual parsers are
13// in other packages alongside this one, with this package just wrapping them
14// to provide a unified interface for the caller across all supported formats.
15
16// Parser is the main interface for parsing configuration files. As well as
17// parsing files, a parser also retains a registry of all of the files it
18// has parsed so that multiple attempts to parse the same file will return
19// the same object and so the collected files can be used when printing
20// diagnostics.
21//
22// Any diagnostics for parsing a file are only returned once on the first
23// call to parse that file. Callers are expected to collect up diagnostics
24// and present them together, so returning diagnostics for the same file
25// multiple times would create a confusing result.
26type Parser struct {
27 files map[string]*hcl.File
28}
29
30// NewParser creates a new parser, ready to parse configuration files.
31func NewParser() *Parser {
32 return &Parser{
33 files: map[string]*hcl.File{},
34 }
35}
36
37// ParseHCL parses the given buffer (which is assumed to have been loaded from
38// the given filename) as a native-syntax configuration file and returns the
39// hcl.File object representing it.
40func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) {
41 if existing := p.files[filename]; existing != nil {
42 return existing, nil
43 }
44
45 file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1})
46 p.files[filename] = file
47 return file, diags
48}
49
50// ParseHCLFile reads the given filename and parses it as a native-syntax HCL
51// configuration file. An error diagnostic is returned if the given file
52// cannot be read.
53func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) {
54 if existing := p.files[filename]; existing != nil {
55 return existing, nil
56 }
57
58 src, err := ioutil.ReadFile(filename)
59 if err != nil {
60 return nil, hcl.Diagnostics{
61 {
62 Severity: hcl.DiagError,
63 Summary: "Failed to read file",
64 Detail: fmt.Sprintf("The configuration file %q could not be read.", filename),
65 },
66 }
67 }
68
69 return p.ParseHCL(src, filename)
70}
71
72// ParseJSON parses the given JSON buffer (which is assumed to have been loaded
73// from the given filename) and returns the hcl.File object representing it.
74func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) {
75 if existing := p.files[filename]; existing != nil {
76 return existing, nil
77 }
78
79 file, diags := json.Parse(src, filename)
80 p.files[filename] = file
81 return file, diags
82}
83
84// ParseJSONFile reads the given filename and parses it as JSON, similarly to
85// ParseJSON. An error diagnostic is returned if the given file cannot be read.
86func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) {
87 if existing := p.files[filename]; existing != nil {
88 return existing, nil
89 }
90
91 file, diags := json.ParseFile(filename)
92 p.files[filename] = file
93 return file, diags
94}
95
96// AddFile allows a caller to record in a parser a file that was parsed some
97// other way, thus allowing it to be included in the registry of sources.
98func (p *Parser) AddFile(filename string, file *hcl.File) {
99 p.files[filename] = file
100}
101
102// Sources returns a map from filenames to the raw source code that was
103// read from them. This is intended to be used, for example, to print
104// diagnostics with contextual information.
105//
106// The arrays underlying the returned slices should not be modified.
107func (p *Parser) Sources() map[string][]byte {
108 ret := make(map[string][]byte)
109 for fn, f := range p.files {
110 ret[fn] = f.Bytes
111 }
112 return ret
113}
114
115// Files returns a map from filenames to the File objects produced from them.
116// This is intended to be used, for example, to print diagnostics with
117// contextual information.
118//
119// The returned map and all of the objects it refers to directly or indirectly
120// must not be modified.
121func (p *Parser) Files() map[string]*hcl.File {
122 return p.files
123}
diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go
index bab86c6..86085de 100644
--- a/vendor/github.com/hashicorp/hil/scanner/scanner.go
+++ b/vendor/github.com/hashicorp/hil/scanner/scanner.go
@@ -395,6 +395,12 @@ func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) {
395 pos.Column = pos.Column + 2 395 pos.Column = pos.Column + 2
396 litLen = litLen + 2 396 litLen = litLen + 2
397 continue 397 continue
398 } else if follow == '\\' {
399 // \\ escapes \
400 // so we will consume both characters here.
401 pos.Column = pos.Column + 2
402 litLen = litLen + 2
403 continue
398 } 404 }
399 } 405 }
400 } 406 }
diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go
index 5f4e89e..9d80c42 100644
--- a/vendor/github.com/hashicorp/terraform/config/append.go
+++ b/vendor/github.com/hashicorp/terraform/config/append.go
@@ -82,5 +82,11 @@ func Append(c1, c2 *Config) (*Config, error) {
82 c.Variables = append(c.Variables, c2.Variables...) 82 c.Variables = append(c.Variables, c2.Variables...)
83 } 83 }
84 84
85 if len(c1.Locals) > 0 || len(c2.Locals) > 0 {
86 c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals))
87 c.Locals = append(c.Locals, c1.Locals...)
88 c.Locals = append(c.Locals, c2.Locals...)
89 }
90
85 return c, nil 91 return c, nil
86} 92}
diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go
index 3f756dc..1772fd7 100644
--- a/vendor/github.com/hashicorp/terraform/config/config.go
+++ b/vendor/github.com/hashicorp/terraform/config/config.go
@@ -8,11 +8,11 @@ import (
8 "strconv" 8 "strconv"
9 "strings" 9 "strings"
10 10
11 "github.com/hashicorp/go-multierror" 11 hcl2 "github.com/hashicorp/hcl2/hcl"
12 "github.com/hashicorp/hil"
13 "github.com/hashicorp/hil/ast" 12 "github.com/hashicorp/hil/ast"
14 "github.com/hashicorp/terraform/helper/hilmapstructure" 13 "github.com/hashicorp/terraform/helper/hilmapstructure"
15 "github.com/hashicorp/terraform/plugin/discovery" 14 "github.com/hashicorp/terraform/plugin/discovery"
15 "github.com/hashicorp/terraform/tfdiags"
16 "github.com/mitchellh/reflectwalk" 16 "github.com/mitchellh/reflectwalk"
17) 17)
18 18
@@ -34,6 +34,7 @@ type Config struct {
34 ProviderConfigs []*ProviderConfig 34 ProviderConfigs []*ProviderConfig
35 Resources []*Resource 35 Resources []*Resource
36 Variables []*Variable 36 Variables []*Variable
37 Locals []*Local
37 Outputs []*Output 38 Outputs []*Output
38 39
39 // The fields below can be filled in by loaders for validation 40 // The fields below can be filled in by loaders for validation
@@ -55,6 +56,8 @@ type AtlasConfig struct {
55type Module struct { 56type Module struct {
56 Name string 57 Name string
57 Source string 58 Source string
59 Version string
60 Providers map[string]string
58 RawConfig *RawConfig 61 RawConfig *RawConfig
59} 62}
60 63
@@ -147,7 +150,7 @@ func (p *Provisioner) Copy() *Provisioner {
147 } 150 }
148} 151}
149 152
150// Variable is a variable defined within the configuration. 153// Variable is a module argument defined within the configuration.
151type Variable struct { 154type Variable struct {
152 Name string 155 Name string
153 DeclaredType string `mapstructure:"type"` 156 DeclaredType string `mapstructure:"type"`
@@ -155,6 +158,12 @@ type Variable struct {
155 Description string 158 Description string
156} 159}
157 160
161// Local is a local value defined within the configuration.
162type Local struct {
163 Name string
164 RawConfig *RawConfig
165}
166
158// Output is an output defined within the configuration. An output is 167// Output is an output defined within the configuration. An output is
159// resulting data that is highlighted by Terraform when finished. An 168// resulting data that is highlighted by Terraform when finished. An
160// output marked Sensitive will be output in a masked form following 169// output marked Sensitive will be output in a masked form following
@@ -222,7 +231,10 @@ func (r *Resource) Count() (int, error) {
222 231
223 v, err := strconv.ParseInt(count, 0, 0) 232 v, err := strconv.ParseInt(count, 0, 0)
224 if err != nil { 233 if err != nil {
225 return 0, err 234 return 0, fmt.Errorf(
235 "cannot parse %q as an integer",
236 count,
237 )
226 } 238 }
227 239
228 return int(v), nil 240 return int(v), nil
@@ -253,7 +265,9 @@ func (r *Resource) ProviderFullName() string {
253// the provider name is inferred from the resource type name. 265// the provider name is inferred from the resource type name.
254func ResourceProviderFullName(resourceType, explicitProvider string) string { 266func ResourceProviderFullName(resourceType, explicitProvider string) string {
255 if explicitProvider != "" { 267 if explicitProvider != "" {
256 return explicitProvider 268 // check for an explicit provider name, or return the original
269 parts := strings.SplitAfter(explicitProvider, "provider.")
270 return parts[len(parts)-1]
257 } 271 }
258 272
259 idx := strings.IndexRune(resourceType, '_') 273 idx := strings.IndexRune(resourceType, '_')
@@ -268,30 +282,35 @@ func ResourceProviderFullName(resourceType, explicitProvider string) string {
268} 282}
269 283
270// Validate does some basic semantic checking of the configuration. 284// Validate does some basic semantic checking of the configuration.
271func (c *Config) Validate() error { 285func (c *Config) Validate() tfdiags.Diagnostics {
272 if c == nil { 286 if c == nil {
273 return nil 287 return nil
274 } 288 }
275 289
276 var errs []error 290 var diags tfdiags.Diagnostics
277 291
278 for _, k := range c.unknownKeys { 292 for _, k := range c.unknownKeys {
279 errs = append(errs, fmt.Errorf( 293 diags = diags.Append(
280 "Unknown root level key: %s", k)) 294 fmt.Errorf("Unknown root level key: %s", k),
295 )
281 } 296 }
282 297
283 // Validate the Terraform config 298 // Validate the Terraform config
284 if tf := c.Terraform; tf != nil { 299 if tf := c.Terraform; tf != nil {
285 errs = append(errs, c.Terraform.Validate()...) 300 errs := c.Terraform.Validate()
301 for _, err := range errs {
302 diags = diags.Append(err)
303 }
286 } 304 }
287 305
288 vars := c.InterpolatedVariables() 306 vars := c.InterpolatedVariables()
289 varMap := make(map[string]*Variable) 307 varMap := make(map[string]*Variable)
290 for _, v := range c.Variables { 308 for _, v := range c.Variables {
291 if _, ok := varMap[v.Name]; ok { 309 if _, ok := varMap[v.Name]; ok {
292 errs = append(errs, fmt.Errorf( 310 diags = diags.Append(fmt.Errorf(
293 "Variable '%s': duplicate found. Variable names must be unique.", 311 "Variable '%s': duplicate found. Variable names must be unique.",
294 v.Name)) 312 v.Name,
313 ))
295 } 314 }
296 315
297 varMap[v.Name] = v 316 varMap[v.Name] = v
@@ -299,17 +318,19 @@ func (c *Config) Validate() error {
299 318
300 for k, _ := range varMap { 319 for k, _ := range varMap {
301 if !NameRegexp.MatchString(k) { 320 if !NameRegexp.MatchString(k) {
302 errs = append(errs, fmt.Errorf( 321 diags = diags.Append(fmt.Errorf(
303 "variable %q: variable name must match regular expresion %s", 322 "variable %q: variable name must match regular expression %s",
304 k, NameRegexp)) 323 k, NameRegexp,
324 ))
305 } 325 }
306 } 326 }
307 327
308 for _, v := range c.Variables { 328 for _, v := range c.Variables {
309 if v.Type() == VariableTypeUnknown { 329 if v.Type() == VariableTypeUnknown {
310 errs = append(errs, fmt.Errorf( 330 diags = diags.Append(fmt.Errorf(
311 "Variable '%s': must be a string or a map", 331 "Variable '%s': must be a string or a map",
312 v.Name)) 332 v.Name,
333 ))
313 continue 334 continue
314 } 335 }
315 336
@@ -330,9 +351,10 @@ func (c *Config) Validate() error {
330 if v.Default != nil { 351 if v.Default != nil {
331 if err := reflectwalk.Walk(v.Default, w); err == nil { 352 if err := reflectwalk.Walk(v.Default, w); err == nil {
332 if interp { 353 if interp {
333 errs = append(errs, fmt.Errorf( 354 diags = diags.Append(fmt.Errorf(
334 "Variable '%s': cannot contain interpolations", 355 "variable %q: default may not contain interpolations",
335 v.Name)) 356 v.Name,
357 ))
336 } 358 }
337 } 359 }
338 } 360 }
@@ -348,10 +370,11 @@ func (c *Config) Validate() error {
348 } 370 }
349 371
350 if _, ok := varMap[uv.Name]; !ok { 372 if _, ok := varMap[uv.Name]; !ok {
351 errs = append(errs, fmt.Errorf( 373 diags = diags.Append(fmt.Errorf(
352 "%s: unknown variable referenced: '%s'. define it with 'variable' blocks", 374 "%s: unknown variable referenced: '%s'; define it with a 'variable' block",
353 source, 375 source,
354 uv.Name)) 376 uv.Name,
377 ))
355 } 378 }
356 } 379 }
357 } 380 }
@@ -362,17 +385,19 @@ func (c *Config) Validate() error {
362 switch v := rawV.(type) { 385 switch v := rawV.(type) {
363 case *CountVariable: 386 case *CountVariable:
364 if v.Type == CountValueInvalid { 387 if v.Type == CountValueInvalid {
365 errs = append(errs, fmt.Errorf( 388 diags = diags.Append(fmt.Errorf(
366 "%s: invalid count variable: %s", 389 "%s: invalid count variable: %s",
367 source, 390 source,
368 v.FullKey())) 391 v.FullKey(),
392 ))
369 } 393 }
370 case *PathVariable: 394 case *PathVariable:
371 if v.Type == PathValueInvalid { 395 if v.Type == PathValueInvalid {
372 errs = append(errs, fmt.Errorf( 396 diags = diags.Append(fmt.Errorf(
373 "%s: invalid path variable: %s", 397 "%s: invalid path variable: %s",
374 source, 398 source,
375 v.FullKey())) 399 v.FullKey(),
400 ))
376 } 401 }
377 } 402 }
378 } 403 }
@@ -380,27 +405,35 @@ func (c *Config) Validate() error {
380 405
381 // Check that providers aren't declared multiple times and that their 406 // Check that providers aren't declared multiple times and that their
382 // version constraints, where present, are syntactically valid. 407 // version constraints, where present, are syntactically valid.
383 providerSet := make(map[string]struct{}) 408 providerSet := make(map[string]bool)
384 for _, p := range c.ProviderConfigs { 409 for _, p := range c.ProviderConfigs {
385 name := p.FullName() 410 name := p.FullName()
386 if _, ok := providerSet[name]; ok { 411 if _, ok := providerSet[name]; ok {
387 errs = append(errs, fmt.Errorf( 412 diags = diags.Append(fmt.Errorf(
388 "provider.%s: declared multiple times, you can only declare a provider once", 413 "provider.%s: multiple configurations present; only one configuration is allowed per provider",
389 name)) 414 name,
415 ))
390 continue 416 continue
391 } 417 }
392 418
393 if p.Version != "" { 419 if p.Version != "" {
394 _, err := discovery.ConstraintStr(p.Version).Parse() 420 _, err := discovery.ConstraintStr(p.Version).Parse()
395 if err != nil { 421 if err != nil {
396 errs = append(errs, fmt.Errorf( 422 diags = diags.Append(&hcl2.Diagnostic{
397 "provider.%s: invalid version constraint %q: %s", 423 Severity: hcl2.DiagError,
398 name, p.Version, err, 424 Summary: "Invalid provider version constraint",
399 )) 425 Detail: fmt.Sprintf(
426 "The value %q given for provider.%s is not a valid version constraint.",
427 p.Version, name,
428 ),
429 // TODO: include a "Subject" source reference in here,
430 // once the config loader is able to retain source
431 // location information.
432 })
400 } 433 }
401 } 434 }
402 435
403 providerSet[name] = struct{}{} 436 providerSet[name] = true
404 } 437 }
405 438
406 // Check that all references to modules are valid 439 // Check that all references to modules are valid
@@ -412,9 +445,10 @@ func (c *Config) Validate() error {
412 if _, ok := dupped[m.Id()]; !ok { 445 if _, ok := dupped[m.Id()]; !ok {
413 dupped[m.Id()] = struct{}{} 446 dupped[m.Id()] = struct{}{}
414 447
415 errs = append(errs, fmt.Errorf( 448 diags = diags.Append(fmt.Errorf(
416 "%s: module repeated multiple times", 449 "module %q: module repeated multiple times",
417 m.Id())) 450 m.Id(),
451 ))
418 } 452 }
419 453
420 // Already seen this module, just skip it 454 // Already seen this module, just skip it
@@ -428,21 +462,23 @@ func (c *Config) Validate() error {
428 "root": m.Source, 462 "root": m.Source,
429 }) 463 })
430 if err != nil { 464 if err != nil {
431 errs = append(errs, fmt.Errorf( 465 diags = diags.Append(fmt.Errorf(
432 "%s: module source error: %s", 466 "module %q: module source error: %s",
433 m.Id(), err)) 467 m.Id(), err,
468 ))
434 } else if len(rc.Interpolations) > 0 { 469 } else if len(rc.Interpolations) > 0 {
435 errs = append(errs, fmt.Errorf( 470 diags = diags.Append(fmt.Errorf(
436 "%s: module source cannot contain interpolations", 471 "module %q: module source cannot contain interpolations",
437 m.Id())) 472 m.Id(),
473 ))
438 } 474 }
439 475
440 // Check that the name matches our regexp 476 // Check that the name matches our regexp
441 if !NameRegexp.Match([]byte(m.Name)) { 477 if !NameRegexp.Match([]byte(m.Name)) {
442 errs = append(errs, fmt.Errorf( 478 diags = diags.Append(fmt.Errorf(
443 "%s: module name can only contain letters, numbers, "+ 479 "module %q: module name must be a letter or underscore followed by only letters, numbers, dashes, and underscores",
444 "dashes, and underscores", 480 m.Id(),
445 m.Id())) 481 ))
446 } 482 }
447 483
448 // Check that the configuration can all be strings, lists or maps 484 // Check that the configuration can all be strings, lists or maps
@@ -466,30 +502,47 @@ func (c *Config) Validate() error {
466 continue 502 continue
467 } 503 }
468 504
469 errs = append(errs, fmt.Errorf( 505 diags = diags.Append(fmt.Errorf(
470 "%s: variable %s must be a string, list or map value", 506 "module %q: argument %s must have a string, list, or map value",
471 m.Id(), k)) 507 m.Id(), k,
508 ))
472 } 509 }
473 510
474 // Check for invalid count variables 511 // Check for invalid count variables
475 for _, v := range m.RawConfig.Variables { 512 for _, v := range m.RawConfig.Variables {
476 switch v.(type) { 513 switch v.(type) {
477 case *CountVariable: 514 case *CountVariable:
478 errs = append(errs, fmt.Errorf( 515 diags = diags.Append(fmt.Errorf(
479 "%s: count variables are only valid within resources", m.Name)) 516 "module %q: count variables are only valid within resources",
517 m.Name,
518 ))
480 case *SelfVariable: 519 case *SelfVariable:
481 errs = append(errs, fmt.Errorf( 520 diags = diags.Append(fmt.Errorf(
482 "%s: self variables are only valid within resources", m.Name)) 521 "module %q: self variables are only valid within resources",
522 m.Name,
523 ))
483 } 524 }
484 } 525 }
485 526
486 // Update the raw configuration to only contain the string values 527 // Update the raw configuration to only contain the string values
487 m.RawConfig, err = NewRawConfig(raw) 528 m.RawConfig, err = NewRawConfig(raw)
488 if err != nil { 529 if err != nil {
489 errs = append(errs, fmt.Errorf( 530 diags = diags.Append(fmt.Errorf(
490 "%s: can't initialize configuration: %s", 531 "%s: can't initialize configuration: %s",
491 m.Id(), err)) 532 m.Id(), err,
533 ))
492 } 534 }
535
536 // check that all named providers actually exist
537 for _, p := range m.Providers {
538 if !providerSet[p] {
539 diags = diags.Append(fmt.Errorf(
540 "module %q: cannot pass non-existent provider %q",
541 m.Name, p,
542 ))
543 }
544 }
545
493 } 546 }
494 dupped = nil 547 dupped = nil
495 548
@@ -503,10 +556,10 @@ func (c *Config) Validate() error {
503 } 556 }
504 557
505 if _, ok := modules[mv.Name]; !ok { 558 if _, ok := modules[mv.Name]; !ok {
506 errs = append(errs, fmt.Errorf( 559 diags = diags.Append(fmt.Errorf(
507 "%s: unknown module referenced: %s", 560 "%s: unknown module referenced: %s",
508 source, 561 source, mv.Name,
509 mv.Name)) 562 ))
510 } 563 }
511 } 564 }
512 } 565 }
@@ -519,9 +572,10 @@ func (c *Config) Validate() error {
519 if _, ok := dupped[r.Id()]; !ok { 572 if _, ok := dupped[r.Id()]; !ok {
520 dupped[r.Id()] = struct{}{} 573 dupped[r.Id()] = struct{}{}
521 574
522 errs = append(errs, fmt.Errorf( 575 diags = diags.Append(fmt.Errorf(
523 "%s: resource repeated multiple times", 576 "%s: resource repeated multiple times",
524 r.Id())) 577 r.Id(),
578 ))
525 } 579 }
526 } 580 }
527 581
@@ -535,53 +589,42 @@ func (c *Config) Validate() error {
535 for _, v := range r.RawCount.Variables { 589 for _, v := range r.RawCount.Variables {
536 switch v.(type) { 590 switch v.(type) {
537 case *CountVariable: 591 case *CountVariable:
538 errs = append(errs, fmt.Errorf( 592 diags = diags.Append(fmt.Errorf(
539 "%s: resource count can't reference count variable: %s", 593 "%s: resource count can't reference count variable: %s",
540 n, 594 n, v.FullKey(),
541 v.FullKey())) 595 ))
542 case *SimpleVariable: 596 case *SimpleVariable:
543 errs = append(errs, fmt.Errorf( 597 diags = diags.Append(fmt.Errorf(
544 "%s: resource count can't reference variable: %s", 598 "%s: resource count can't reference variable: %s",
545 n, 599 n, v.FullKey(),
546 v.FullKey())) 600 ))
547 601
548 // Good 602 // Good
549 case *ModuleVariable: 603 case *ModuleVariable:
550 case *ResourceVariable: 604 case *ResourceVariable:
551 case *TerraformVariable: 605 case *TerraformVariable:
552 case *UserVariable: 606 case *UserVariable:
607 case *LocalVariable:
553 608
554 default: 609 default:
555 errs = append(errs, fmt.Errorf( 610 diags = diags.Append(fmt.Errorf(
556 "Internal error. Unknown type in count var in %s: %T", 611 "Internal error. Unknown type in count var in %s: %T",
557 n, v)) 612 n, v,
613 ))
558 } 614 }
559 } 615 }
560 616
561 // Interpolate with a fixed number to verify that its a number. 617 if !r.RawCount.couldBeInteger() {
562 r.RawCount.interpolate(func(root ast.Node) (interface{}, error) { 618 diags = diags.Append(fmt.Errorf(
563 // Execute the node but transform the AST so that it returns 619 "%s: resource count must be an integer", n,
564 // a fixed value of "5" for all interpolations. 620 ))
565 result, err := hil.Eval(
566 hil.FixedValueTransform(
567 root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}),
568 nil)
569 if err != nil {
570 return "", err
571 }
572
573 return result.Value, nil
574 })
575 _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0)
576 if err != nil {
577 errs = append(errs, fmt.Errorf(
578 "%s: resource count must be an integer",
579 n))
580 } 621 }
581 r.RawCount.init() 622 r.RawCount.init()
582 623
583 // Validate DependsOn 624 // Validate DependsOn
584 errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...) 625 for _, err := range c.validateDependsOn(n, r.DependsOn, resources, modules) {
626 diags = diags.Append(err)
627 }
585 628
586 // Verify provisioners 629 // Verify provisioners
587 for _, p := range r.Provisioners { 630 for _, p := range r.Provisioners {
@@ -595,9 +638,10 @@ func (c *Config) Validate() error {
595 } 638 }
596 639
597 if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { 640 if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
598 errs = append(errs, fmt.Errorf( 641 diags = diags.Append(fmt.Errorf(
599 "%s: connection info cannot contain splat variable "+ 642 "%s: connection info cannot contain splat variable referencing itself",
600 "referencing itself", n)) 643 n,
644 ))
601 break 645 break
602 } 646 }
603 } 647 }
@@ -609,9 +653,10 @@ func (c *Config) Validate() error {
609 } 653 }
610 654
611 if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { 655 if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
612 errs = append(errs, fmt.Errorf( 656 diags = diags.Append(fmt.Errorf(
613 "%s: connection info cannot contain splat variable "+ 657 "%s: connection info cannot contain splat variable referencing itself",
614 "referencing itself", n)) 658 n,
659 ))
615 break 660 break
616 } 661 }
617 } 662 }
@@ -619,21 +664,24 @@ func (c *Config) Validate() error {
619 // Check for invalid when/onFailure values, though this should be 664 // Check for invalid when/onFailure values, though this should be
620 // picked up by the loader we check here just in case. 665 // picked up by the loader we check here just in case.
621 if p.When == ProvisionerWhenInvalid { 666 if p.When == ProvisionerWhenInvalid {
622 errs = append(errs, fmt.Errorf( 667 diags = diags.Append(fmt.Errorf(
623 "%s: provisioner 'when' value is invalid", n)) 668 "%s: provisioner 'when' value is invalid", n,
669 ))
624 } 670 }
625 if p.OnFailure == ProvisionerOnFailureInvalid { 671 if p.OnFailure == ProvisionerOnFailureInvalid {
626 errs = append(errs, fmt.Errorf( 672 diags = diags.Append(fmt.Errorf(
627 "%s: provisioner 'on_failure' value is invalid", n)) 673 "%s: provisioner 'on_failure' value is invalid", n,
674 ))
628 } 675 }
629 } 676 }
630 677
631 // Verify ignore_changes contains valid entries 678 // Verify ignore_changes contains valid entries
632 for _, v := range r.Lifecycle.IgnoreChanges { 679 for _, v := range r.Lifecycle.IgnoreChanges {
633 if strings.Contains(v, "*") && v != "*" { 680 if strings.Contains(v, "*") && v != "*" {
634 errs = append(errs, fmt.Errorf( 681 diags = diags.Append(fmt.Errorf(
635 "%s: ignore_changes does not support using a partial string "+ 682 "%s: ignore_changes does not support using a partial string together with a wildcard: %s",
636 "together with a wildcard: %s", n, v)) 683 n, v,
684 ))
637 } 685 }
638 } 686 }
639 687
@@ -642,21 +690,24 @@ func (c *Config) Validate() error {
642 "root": r.Lifecycle.IgnoreChanges, 690 "root": r.Lifecycle.IgnoreChanges,
643 }) 691 })
644 if err != nil { 692 if err != nil {
645 errs = append(errs, fmt.Errorf( 693 diags = diags.Append(fmt.Errorf(
646 "%s: lifecycle ignore_changes error: %s", 694 "%s: lifecycle ignore_changes error: %s",
647 n, err)) 695 n, err,
696 ))
648 } else if len(rc.Interpolations) > 0 { 697 } else if len(rc.Interpolations) > 0 {
649 errs = append(errs, fmt.Errorf( 698 diags = diags.Append(fmt.Errorf(
650 "%s: lifecycle ignore_changes cannot contain interpolations", 699 "%s: lifecycle ignore_changes cannot contain interpolations",
651 n)) 700 n,
701 ))
652 } 702 }
653 703
654 // If it is a data source then it can't have provisioners 704 // If it is a data source then it can't have provisioners
655 if r.Mode == DataResourceMode { 705 if r.Mode == DataResourceMode {
656 if _, ok := r.RawConfig.Raw["provisioner"]; ok { 706 if _, ok := r.RawConfig.Raw["provisioner"]; ok {
657 errs = append(errs, fmt.Errorf( 707 diags = diags.Append(fmt.Errorf(
658 "%s: data sources cannot have provisioners", 708 "%s: data sources cannot have provisioners",
659 n)) 709 n,
710 ))
660 } 711 }
661 } 712 }
662 } 713 }
@@ -670,25 +721,50 @@ func (c *Config) Validate() error {
670 721
671 id := rv.ResourceId() 722 id := rv.ResourceId()
672 if _, ok := resources[id]; !ok { 723 if _, ok := resources[id]; !ok {
673 errs = append(errs, fmt.Errorf( 724 diags = diags.Append(fmt.Errorf(
674 "%s: unknown resource '%s' referenced in variable %s", 725 "%s: unknown resource '%s' referenced in variable %s",
675 source, 726 source,
676 id, 727 id,
677 rv.FullKey())) 728 rv.FullKey(),
729 ))
678 continue 730 continue
679 } 731 }
680 } 732 }
681 } 733 }
682 734
735 // Check that all locals are valid
736 {
737 found := make(map[string]struct{})
738 for _, l := range c.Locals {
739 if _, ok := found[l.Name]; ok {
740 diags = diags.Append(fmt.Errorf(
741 "%s: duplicate local. local value names must be unique",
742 l.Name,
743 ))
744 continue
745 }
746 found[l.Name] = struct{}{}
747
748 for _, v := range l.RawConfig.Variables {
749 if _, ok := v.(*CountVariable); ok {
750 diags = diags.Append(fmt.Errorf(
751 "local %s: count variables are only valid within resources", l.Name,
752 ))
753 }
754 }
755 }
756 }
757
683 // Check that all outputs are valid 758 // Check that all outputs are valid
684 { 759 {
685 found := make(map[string]struct{}) 760 found := make(map[string]struct{})
686 for _, o := range c.Outputs { 761 for _, o := range c.Outputs {
687 // Verify the output is new 762 // Verify the output is new
688 if _, ok := found[o.Name]; ok { 763 if _, ok := found[o.Name]; ok {
689 errs = append(errs, fmt.Errorf( 764 diags = diags.Append(fmt.Errorf(
690 "%s: duplicate output. output names must be unique.", 765 "output %q: an output of this name was already defined",
691 o.Name)) 766 o.Name,
767 ))
692 continue 768 continue
693 } 769 }
694 found[o.Name] = struct{}{} 770 found[o.Name] = struct{}{}
@@ -708,9 +784,10 @@ func (c *Config) Validate() error {
708 continue 784 continue
709 } 785 }
710 786
711 errs = append(errs, fmt.Errorf( 787 diags = diags.Append(fmt.Errorf(
712 "%s: value for 'sensitive' must be boolean", 788 "output %q: value for 'sensitive' must be boolean",
713 o.Name)) 789 o.Name,
790 ))
714 continue 791 continue
715 } 792 }
716 if k == "description" { 793 if k == "description" {
@@ -719,27 +796,78 @@ func (c *Config) Validate() error {
719 continue 796 continue
720 } 797 }
721 798
722 errs = append(errs, fmt.Errorf( 799 diags = diags.Append(fmt.Errorf(
723 "%s: value for 'description' must be string", 800 "output %q: value for 'description' must be string",
724 o.Name)) 801 o.Name,
802 ))
725 continue 803 continue
726 } 804 }
727 invalidKeys = append(invalidKeys, k) 805 invalidKeys = append(invalidKeys, k)
728 } 806 }
729 if len(invalidKeys) > 0 { 807 if len(invalidKeys) > 0 {
730 errs = append(errs, fmt.Errorf( 808 diags = diags.Append(fmt.Errorf(
731 "%s: output has invalid keys: %s", 809 "output %q: invalid keys: %s",
732 o.Name, strings.Join(invalidKeys, ", "))) 810 o.Name, strings.Join(invalidKeys, ", "),
811 ))
733 } 812 }
734 if !valueKeyFound { 813 if !valueKeyFound {
735 errs = append(errs, fmt.Errorf( 814 diags = diags.Append(fmt.Errorf(
736 "%s: output is missing required 'value' key", o.Name)) 815 "output %q: missing required 'value' argument", o.Name,
816 ))
737 } 817 }
738 818
739 for _, v := range o.RawConfig.Variables { 819 for _, v := range o.RawConfig.Variables {
740 if _, ok := v.(*CountVariable); ok { 820 if _, ok := v.(*CountVariable); ok {
741 errs = append(errs, fmt.Errorf( 821 diags = diags.Append(fmt.Errorf(
742 "%s: count variables are only valid within resources", o.Name)) 822 "output %q: count variables are only valid within resources",
823 o.Name,
824 ))
825 }
826 }
827
828 // Detect a common mistake of using a "count"ed resource in
829 // an output value without using the splat or index form.
830 // Prior to 0.11 this error was silently ignored, but outputs
831 // now have their errors checked like all other contexts.
832 //
833 // TODO: Remove this in 0.12.
834 for _, v := range o.RawConfig.Variables {
835 rv, ok := v.(*ResourceVariable)
836 if !ok {
837 continue
838 }
839
840 // If the variable seems to be treating the referenced
841 // resource as a singleton (no count specified) then
842 // we'll check to make sure it is indeed a singleton.
843 // It's a warning if not.
844
845 if rv.Multi || rv.Index != 0 {
846 // This reference is treating the resource as a
847 // multi-resource, so the warning doesn't apply.
848 continue
849 }
850
851 for _, r := range c.Resources {
852 if r.Id() != rv.ResourceId() {
853 continue
854 }
855
856 // We test specifically for the raw string "1" here
857 // because we _do_ want to generate this warning if
858 // the user has provided an expression that happens
859 // to return 1 right now, to catch situations where
860 // a count might dynamically be set to something
861 // other than 1 and thus splat syntax is still needed
862 // to be safe.
863 if r.RawCount != nil && r.RawCount.Raw != nil && r.RawCount.Raw["count"] != "1" && rv.Field != "count" {
864 diags = diags.Append(tfdiags.SimpleWarning(fmt.Sprintf(
865 "output %q: must use splat syntax to access %s attribute %q, because it has \"count\" set; use %s.*.%s to obtain a list of the attributes across all instances",
866 o.Name,
867 r.Id(), rv.Field,
868 r.Id(), rv.Field,
869 )))
870 }
743 } 871 }
744 } 872 }
745 } 873 }
@@ -755,17 +883,15 @@ func (c *Config) Validate() error {
755 883
756 for _, v := range rc.Variables { 884 for _, v := range rc.Variables {
757 if _, ok := v.(*SelfVariable); ok { 885 if _, ok := v.(*SelfVariable); ok {
758 errs = append(errs, fmt.Errorf( 886 diags = diags.Append(fmt.Errorf(
759 "%s: cannot contain self-reference %s", source, v.FullKey())) 887 "%s: cannot contain self-reference %s",
888 source, v.FullKey(),
889 ))
760 } 890 }
761 } 891 }
762 } 892 }
763 893
764 if len(errs) > 0 { 894 return diags
765 return &multierror.Error{Errors: errs}
766 }
767
768 return nil
769} 895}
770 896
771// InterpolatedVariables is a helper that returns a mapping of all the interpolated 897// InterpolatedVariables is a helper that returns a mapping of all the interpolated
diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go
index 0b3abbc..a6933c2 100644
--- a/vendor/github.com/hashicorp/terraform/config/config_string.go
+++ b/vendor/github.com/hashicorp/terraform/config/config_string.go
@@ -143,6 +143,46 @@ func outputsStr(os []*Output) string {
143 result += fmt.Sprintf(" %s: %s\n", kind, str) 143 result += fmt.Sprintf(" %s: %s\n", kind, str)
144 } 144 }
145 } 145 }
146
147 if o.Description != "" {
148 result += fmt.Sprintf(" description\n %s\n", o.Description)
149 }
150 }
151
152 return strings.TrimSpace(result)
153}
154
155func localsStr(ls []*Local) string {
156 ns := make([]string, 0, len(ls))
157 m := make(map[string]*Local)
158 for _, l := range ls {
159 ns = append(ns, l.Name)
160 m[l.Name] = l
161 }
162 sort.Strings(ns)
163
164 result := ""
165 for _, n := range ns {
166 l := m[n]
167
168 result += fmt.Sprintf("%s\n", n)
169
170 if len(l.RawConfig.Variables) > 0 {
171 result += fmt.Sprintf(" vars\n")
172 for _, rawV := range l.RawConfig.Variables {
173 kind := "unknown"
174 str := rawV.FullKey()
175
176 switch rawV.(type) {
177 case *ResourceVariable:
178 kind = "resource"
179 case *UserVariable:
180 kind = "user"
181 }
182
183 result += fmt.Sprintf(" %s: %s\n", kind, str)
184 }
185 }
146 } 186 }
147 187
148 return strings.TrimSpace(result) 188 return strings.TrimSpace(result)
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go
new file mode 100644
index 0000000..2b1b0ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go
@@ -0,0 +1,97 @@
1package configschema
2
3import (
4 "github.com/hashicorp/hcl2/hcldec"
5 "github.com/zclconf/go-cty/cty"
6)
7
8var mapLabelNames = []string{"key"}
9
10// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body
11// using the facilities in the hcldec package.
12//
13// The returned specification is guaranteed to return a value of the same type
14// returned by method ImpliedType, but it may contain null or unknown values if
15// any of the block attributes are defined as optional and/or computed
16// respectively.
17func (b *Block) DecoderSpec() hcldec.Spec {
18 ret := hcldec.ObjectSpec{}
19 if b == nil {
20 return ret
21 }
22
23 for name, attrS := range b.Attributes {
24 switch {
25 case attrS.Computed && attrS.Optional:
26 // In this special case we use an unknown value as a default
27 // to get the intended behavior that the result is computed
28 // unless it has been explicitly set in config.
29 ret[name] = &hcldec.DefaultSpec{
30 Primary: &hcldec.AttrSpec{
31 Name: name,
32 Type: attrS.Type,
33 },
34 Default: &hcldec.LiteralSpec{
35 Value: cty.UnknownVal(attrS.Type),
36 },
37 }
38 case attrS.Computed:
39 ret[name] = &hcldec.LiteralSpec{
40 Value: cty.UnknownVal(attrS.Type),
41 }
42 default:
43 ret[name] = &hcldec.AttrSpec{
44 Name: name,
45 Type: attrS.Type,
46 Required: attrS.Required,
47 }
48 }
49 }
50
51 for name, blockS := range b.BlockTypes {
52 if _, exists := ret[name]; exists {
53 // This indicates an invalid schema, since it's not valid to
54 // define both an attribute and a block type of the same name.
55 // However, we don't raise this here since it's checked by
56 // InternalValidate.
57 continue
58 }
59
60 childSpec := blockS.Block.DecoderSpec()
61
62 switch blockS.Nesting {
63 case NestingSingle:
64 ret[name] = &hcldec.BlockSpec{
65 TypeName: name,
66 Nested: childSpec,
67 Required: blockS.MinItems == 1 && blockS.MaxItems >= 1,
68 }
69 case NestingList:
70 ret[name] = &hcldec.BlockListSpec{
71 TypeName: name,
72 Nested: childSpec,
73 MinItems: blockS.MinItems,
74 MaxItems: blockS.MaxItems,
75 }
76 case NestingSet:
77 ret[name] = &hcldec.BlockSetSpec{
78 TypeName: name,
79 Nested: childSpec,
80 MinItems: blockS.MinItems,
81 MaxItems: blockS.MaxItems,
82 }
83 case NestingMap:
84 ret[name] = &hcldec.BlockMapSpec{
85 TypeName: name,
86 Nested: childSpec,
87 LabelNames: mapLabelNames,
88 }
89 default:
90 // Invalid nesting type is just ignored. It's checked by
91 // InternalValidate.
92 continue
93 }
94 }
95
96 return ret
97}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/doc.go b/vendor/github.com/hashicorp/terraform/config/configschema/doc.go
new file mode 100644
index 0000000..caf8d73
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/configschema/doc.go
@@ -0,0 +1,14 @@
1// Package configschema contains types for describing the expected structure
2// of a configuration block whose shape is not known until runtime.
3//
4// For example, this is used to describe the expected contents of a resource
5// configuration block, which is defined by the corresponding provider plugin
6// and thus not compiled into Terraform core.
7//
8// A configschema primarily describes the shape of configuration, but it is
9// also suitable for use with other structures derived from the configuration,
10// such as the cached state of a resource or a resource diff.
11//
12// This package should not be confused with the package helper/schema, which
13// is the higher-level helper library used to implement providers themselves.
14package configschema
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go
new file mode 100644
index 0000000..67324eb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go
@@ -0,0 +1,21 @@
1package configschema
2
3import (
4 "github.com/hashicorp/hcl2/hcldec"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// ImpliedType returns the cty.Type that would result from decoding a
9// configuration block using the receiving block schema.
10//
11// ImpliedType always returns a result, even if the given schema is
12// inconsistent. Code that creates configschema.Block objects should be
13// tested using the InternalValidate method to detect any inconsistencies
14// that would cause this method to fall back on defaults and assumptions.
15func (b *Block) ImpliedType() cty.Type {
16 if b == nil {
17 return cty.EmptyObject
18 }
19
20 return hcldec.ImpliedType(b.DecoderSpec())
21}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go
new file mode 100644
index 0000000..33cbe88
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go
@@ -0,0 +1,92 @@
1package configschema
2
3import (
4 "fmt"
5 "regexp"
6
7 "github.com/zclconf/go-cty/cty"
8
9 multierror "github.com/hashicorp/go-multierror"
10)
11
12var validName = regexp.MustCompile(`^[a-z0-9_]+$`)
13
14// InternalValidate returns an error if the receiving block and its child
15// schema definitions have any consistencies with the documented rules for
16// valid schema.
17//
18// This is intended to be used within unit tests to detect when a given
19// schema is invalid.
20func (b *Block) InternalValidate() error {
21 if b == nil {
22 return fmt.Errorf("top-level block schema is nil")
23 }
24 return b.internalValidate("", nil)
25
26}
27
28func (b *Block) internalValidate(prefix string, err error) error {
29 for name, attrS := range b.Attributes {
30 if attrS == nil {
31 err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name))
32 continue
33 }
34 if !validName.MatchString(name) {
35 err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name))
36 }
37 if attrS.Optional == false && attrS.Required == false && attrS.Computed == false {
38 err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name))
39 }
40 if attrS.Optional && attrS.Required {
41 err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name))
42 }
43 if attrS.Computed && attrS.Required {
44 err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name))
45 }
46 if attrS.Type == cty.NilType {
47 err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name))
48 }
49 }
50
51 for name, blockS := range b.BlockTypes {
52 if blockS == nil {
53 err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name))
54 continue
55 }
56
57 if _, isAttr := b.Attributes[name]; isAttr {
58 err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name))
59 } else if !validName.MatchString(name) {
60 err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name))
61 }
62
63 if blockS.MinItems < 0 || blockS.MaxItems < 0 {
64 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name))
65 }
66
67 switch blockS.Nesting {
68 case NestingSingle:
69 switch {
70 case blockS.MinItems != blockS.MaxItems:
71 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name))
72 case blockS.MinItems < 0 || blockS.MinItems > 1:
73 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name))
74 }
75 case NestingList, NestingSet:
76 if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 {
77 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting))
78 }
79 case NestingMap:
80 if blockS.MinItems != 0 || blockS.MaxItems != 0 {
81 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name))
82 }
83 default:
84 err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting))
85 }
86
87 subPrefix := prefix + name + "."
88 err = blockS.Block.internalValidate(subPrefix, err)
89 }
90
91 return err
92}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go
new file mode 100644
index 0000000..6cb9313
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=NestingMode"; DO NOT EDIT.
2
3package configschema
4
5import "strconv"
6
7const _NestingMode_name = "nestingModeInvalidNestingSingleNestingListNestingSetNestingMap"
8
9var _NestingMode_index = [...]uint8{0, 18, 31, 42, 52, 62}
10
11func (i NestingMode) String() string {
12 if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) {
13 return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")"
14 }
15 return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/schema.go b/vendor/github.com/hashicorp/terraform/config/configschema/schema.go
new file mode 100644
index 0000000..9a8ee55
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/configschema/schema.go
@@ -0,0 +1,107 @@
1package configschema
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// Block represents a configuration block.
8//
9// "Block" here is a logical grouping construct, though it happens to map
10// directly onto the physical block syntax of Terraform's native configuration
11// syntax. It may be a more a matter of convention in other syntaxes, such as
12// JSON.
13//
14// When converted to a value, a Block always becomes an instance of an object
15// type derived from its defined attributes and nested blocks
16type Block struct {
17 // Attributes describes any attributes that may appear directly inside
18 // the block.
19 Attributes map[string]*Attribute
20
21 // BlockTypes describes any nested block types that may appear directly
22 // inside the block.
23 BlockTypes map[string]*NestedBlock
24}
25
26// Attribute represents a configuration attribute, within a block.
27type Attribute struct {
28 // Type is a type specification that the attribute's value must conform to.
29 Type cty.Type
30
31 // Required, if set to true, specifies that an omitted or null value is
32 // not permitted.
33 Required bool
34
35 // Optional, if set to true, specifies that an omitted or null value is
36 // permitted. This field conflicts with Required.
37 Optional bool
38
39 // Computed, if set to true, specifies that the value comes from the
40 // provider rather than from configuration. If combined with Optional,
41 // then the config may optionally provide an overridden value.
42 Computed bool
43
44 // Sensitive, if set to true, indicates that an attribute may contain
45 // sensitive information.
46 //
47 // At present nothing is done with this information, but callers are
48 // encouraged to set it where appropriate so that it may be used in the
49 // future to help Terraform mask sensitive information. (Terraform
50 // currently achieves this in a limited sense via other mechanisms.)
51 Sensitive bool
52}
53
54// NestedBlock represents the embedding of one block within another.
55type NestedBlock struct {
56 // Block is the description of the block that's nested.
57 Block
58
59 // Nesting provides the nesting mode for the child block, which determines
60 // how many instances of the block are allowed, how many labels it expects,
61 // and how the resulting data will be converted into a data structure.
62 Nesting NestingMode
63
64 // MinItems and MaxItems set, for the NestingList and NestingSet nesting
65 // modes, lower and upper limits on the number of child blocks allowed
66 // of the given type. If both are left at zero, no limit is applied.
67 //
68 // As a special case, both values can be set to 1 for NestingSingle in
69 // order to indicate that a particular single block is required.
70 //
71 // These fields are ignored for other nesting modes and must both be left
72 // at zero.
73 MinItems, MaxItems int
74}
75
76// NestingMode is an enumeration of modes for nesting blocks inside other
77// blocks.
78type NestingMode int
79
80//go:generate stringer -type=NestingMode
81
82const (
83 nestingModeInvalid NestingMode = iota
84
85 // NestingSingle indicates that only a single instance of a given
86 // block type is permitted, with no labels, and its content should be
87 // provided directly as an object value.
88 NestingSingle
89
90 // NestingList indicates that multiple blocks of the given type are
91 // permitted, with no labels, and that their corresponding objects should
92 // be provided in a list.
93 NestingList
94
95 // NestingSet indicates that multiple blocks of the given type are
96 // permitted, with no labels, and that their corresponding objects should
97 // be provided in a set.
98 NestingSet
99
100 // NestingMap indicates that multiple blocks of the given type are
101 // permitted, each with a single label, and that their corresponding
102 // objects should be provided in a map whose keys are the labels.
103 //
104 // It's an error, therefore, to use the same label value on multiple
105 // blocks.
106 NestingMap
107)
diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go b/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go
new file mode 100644
index 0000000..207d105
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go
@@ -0,0 +1,134 @@
1package config
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty/function/stdlib"
7
8 "github.com/hashicorp/hil/ast"
9 "github.com/hashicorp/terraform/config/hcl2shim"
10
11 hcl2 "github.com/hashicorp/hcl2/hcl"
12 "github.com/zclconf/go-cty/cty"
13 "github.com/zclconf/go-cty/cty/convert"
14 "github.com/zclconf/go-cty/cty/function"
15)
16
17// ---------------------------------------------------------------------------
18// This file contains some helper functions that are used to shim between
19// HCL2 concepts and HCL/HIL concepts, to help us mostly preserve the existing
20// public API that was built around HCL/HIL-oriented approaches.
21// ---------------------------------------------------------------------------
22
23func hcl2InterpolationFuncs() map[string]function.Function {
24 hcl2Funcs := map[string]function.Function{}
25
26 for name, hilFunc := range Funcs() {
27 hcl2Funcs[name] = hcl2InterpolationFuncShim(hilFunc)
28 }
29
30 // Some functions in the old world are dealt with inside langEvalConfig
31 // due to their legacy reliance on direct access to the symbol table.
32 // Since 0.7 they don't actually need it anymore and just ignore it,
33 // so we're cheating a bit here and exploiting that detail by passing nil.
34 hcl2Funcs["lookup"] = hcl2InterpolationFuncShim(interpolationFuncLookup(nil))
35 hcl2Funcs["keys"] = hcl2InterpolationFuncShim(interpolationFuncKeys(nil))
36 hcl2Funcs["values"] = hcl2InterpolationFuncShim(interpolationFuncValues(nil))
37
38 // As a bonus, we'll provide the JSON-handling functions from the cty
39 // function library since its "jsonencode" is more complete (doesn't force
40 // weird type conversions) and HIL's type system can't represent
41 // "jsondecode" at all. The result of jsondecode will eventually be forced
42 // to conform to the HIL type system on exit into the rest of Terraform due
43 // to our shimming right now, but it should be usable for decoding _within_
44 // an expression.
45 hcl2Funcs["jsonencode"] = stdlib.JSONEncodeFunc
46 hcl2Funcs["jsondecode"] = stdlib.JSONDecodeFunc
47
48 return hcl2Funcs
49}
50
51func hcl2InterpolationFuncShim(hilFunc ast.Function) function.Function {
52 spec := &function.Spec{}
53
54 for i, hilArgType := range hilFunc.ArgTypes {
55 spec.Params = append(spec.Params, function.Parameter{
56 Type: hcl2shim.HCL2TypeForHILType(hilArgType),
57 Name: fmt.Sprintf("arg%d", i+1), // HIL args don't have names, so we'll fudge it
58 })
59 }
60
61 if hilFunc.Variadic {
62 spec.VarParam = &function.Parameter{
63 Type: hcl2shim.HCL2TypeForHILType(hilFunc.VariadicType),
64 Name: "varargs", // HIL args don't have names, so we'll fudge it
65 }
66 }
67
68 spec.Type = func(args []cty.Value) (cty.Type, error) {
69 return hcl2shim.HCL2TypeForHILType(hilFunc.ReturnType), nil
70 }
71 spec.Impl = func(args []cty.Value, retType cty.Type) (cty.Value, error) {
72 hilArgs := make([]interface{}, len(args))
73 for i, arg := range args {
74 hilV := hcl2shim.HILVariableFromHCL2Value(arg)
75
76 // Although the cty function system does automatic type conversions
77 // to match the argument types, cty doesn't distinguish int and
78 // float and so we may need to adjust here to ensure that the
79 // wrapped function gets exactly the Go type it was expecting.
80 var wantType ast.Type
81 if i < len(hilFunc.ArgTypes) {
82 wantType = hilFunc.ArgTypes[i]
83 } else {
84 wantType = hilFunc.VariadicType
85 }
86 switch {
87 case hilV.Type == ast.TypeInt && wantType == ast.TypeFloat:
88 hilV.Type = wantType
89 hilV.Value = float64(hilV.Value.(int))
90 case hilV.Type == ast.TypeFloat && wantType == ast.TypeInt:
91 hilV.Type = wantType
92 hilV.Value = int(hilV.Value.(float64))
93 }
94
95 // HIL functions actually expect to have the outermost variable
96 // "peeled" but any nested values (in lists or maps) will
97 // still have their ast.Variable wrapping.
98 hilArgs[i] = hilV.Value
99 }
100
101 hilResult, err := hilFunc.Callback(hilArgs)
102 if err != nil {
103 return cty.DynamicVal, err
104 }
105
106 // Just as on the way in, we get back a partially-peeled ast.Variable
107 // which we need to re-wrap in order to convert it back into what
108 // we're calling a "config value".
109 rv := hcl2shim.HCL2ValueFromHILVariable(ast.Variable{
110 Type: hilFunc.ReturnType,
111 Value: hilResult,
112 })
113
114 return convert.Convert(rv, retType) // if result is unknown we'll force the correct type here
115 }
116 return function.New(spec)
117}
118
119func hcl2EvalWithUnknownVars(expr hcl2.Expression) (cty.Value, hcl2.Diagnostics) {
120 trs := expr.Variables()
121 vars := map[string]cty.Value{}
122 val := cty.DynamicVal
123
124 for _, tr := range trs {
125 name := tr.RootName()
126 vars[name] = val
127 }
128
129 ctx := &hcl2.EvalContext{
130 Variables: vars,
131 Functions: hcl2InterpolationFuncs(),
132 }
133 return expr.Value(ctx)
134}
diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go
new file mode 100644
index 0000000..19651c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go
@@ -0,0 +1,85 @@
1package hcl2shim
2
3import (
4 "fmt"
5
6 hcl2 "github.com/hashicorp/hcl2/hcl"
7)
8
9// SingleAttrBody is a weird implementation of hcl2.Body that acts as if
10// it has a single attribute whose value is the given expression.
11//
12// This is used to shim Resource.RawCount and Output.RawConfig to behave
13// more like they do in the old HCL loader.
14type SingleAttrBody struct {
15 Name string
16 Expr hcl2.Expression
17}
18
19var _ hcl2.Body = SingleAttrBody{}
20
21func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) {
22 content, all, diags := b.content(schema)
23 if !all {
24 // This should never happen because this body implementation should only
25 // be used by code that is aware that it's using a single-attr body.
26 diags = append(diags, &hcl2.Diagnostic{
27 Severity: hcl2.DiagError,
28 Summary: "Invalid attribute",
29 Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name),
30 Subject: b.Expr.Range().Ptr(),
31 })
32 }
33 return content, diags
34}
35
36func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) {
37 content, all, diags := b.content(schema)
38 var remain hcl2.Body
39 if all {
40 // If the request matched the one attribute we represent, then the
41 // remaining body is empty.
42 remain = hcl2.EmptyBody()
43 } else {
44 remain = b
45 }
46 return content, remain, diags
47}
48
49func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) {
50 ret := &hcl2.BodyContent{}
51 all := false
52 var diags hcl2.Diagnostics
53
54 for _, attrS := range schema.Attributes {
55 if attrS.Name == b.Name {
56 attrs, _ := b.JustAttributes()
57 ret.Attributes = attrs
58 all = true
59 } else if attrS.Required {
60 diags = append(diags, &hcl2.Diagnostic{
61 Severity: hcl2.DiagError,
62 Summary: "Missing attribute",
63 Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name),
64 Subject: b.Expr.Range().Ptr(),
65 })
66 }
67 }
68
69 return ret, all, diags
70}
71
72func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) {
73 return hcl2.Attributes{
74 b.Name: {
75 Expr: b.Expr,
76 Name: b.Name,
77 NameRange: b.Expr.Range(),
78 Range: b.Expr.Range(),
79 },
80 }, nil
81}
82
83func (b SingleAttrBody) MissingItemRange() hcl2.Range {
84 return b.Expr.Range()
85}
diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go
new file mode 100644
index 0000000..0b697a5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go
@@ -0,0 +1,246 @@
1package hcl2shim
2
3import (
4 "fmt"
5 "math/big"
6
7 "github.com/hashicorp/hil/ast"
8 "github.com/zclconf/go-cty/cty"
9)
10
11// UnknownVariableValue is a sentinel value that can be used
12// to denote that the value of a variable is unknown at this time.
13// RawConfig uses this information to build up data about
14// unknown keys.
15const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
16
17// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic
18// types library that HCL2 uses) to a value type that matches what would've
19// been produced from the HCL-based interpolator for an equivalent structure.
20//
21// This function will transform a cty null value into a Go nil value, which
22// isn't a possible outcome of the HCL/HIL-based decoder and so callers may
23// need to detect and reject any null values.
24func ConfigValueFromHCL2(v cty.Value) interface{} {
25 if !v.IsKnown() {
26 return UnknownVariableValue
27 }
28 if v.IsNull() {
29 return nil
30 }
31
32 switch v.Type() {
33 case cty.Bool:
34 return v.True() // like HCL.BOOL
35 case cty.String:
36 return v.AsString() // like HCL token.STRING or token.HEREDOC
37 case cty.Number:
38 // We can't match HCL _exactly_ here because it distinguishes between
39 // int and float values, but we'll get as close as we can by using
40 // an int if the number is exactly representable, and a float if not.
41 // The conversion to float will force precision to that of a float64,
42 // which is potentially losing information from the specific number
43 // given, but no worse than what HCL would've done in its own conversion
44 // to float.
45
46 f := v.AsBigFloat()
47 if i, acc := f.Int64(); acc == big.Exact {
48 // if we're on a 32-bit system and the number is too big for 32-bit
49 // int then we'll fall through here and use a float64.
50 const MaxInt = int(^uint(0) >> 1)
51 const MinInt = -MaxInt - 1
52 if i <= int64(MaxInt) && i >= int64(MinInt) {
53 return int(i) // Like HCL token.NUMBER
54 }
55 }
56
57 f64, _ := f.Float64()
58 return f64 // like HCL token.FLOAT
59 }
60
61 if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() {
62 l := make([]interface{}, 0, v.LengthInt())
63 it := v.ElementIterator()
64 for it.Next() {
65 _, ev := it.Element()
66 l = append(l, ConfigValueFromHCL2(ev))
67 }
68 return l
69 }
70
71 if v.Type().IsMapType() || v.Type().IsObjectType() {
72 l := make(map[string]interface{})
73 it := v.ElementIterator()
74 for it.Next() {
75 ek, ev := it.Element()
76 l[ek.AsString()] = ConfigValueFromHCL2(ev)
77 }
78 return l
79 }
80
81 // If we fall out here then we have some weird type that we haven't
82 // accounted for. This should never happen unless the caller is using
83 // capsule types, and we don't currently have any such types defined.
84 panic(fmt.Errorf("can't convert %#v to config value", v))
85}
86
87// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes
88// a value as would be returned from the old interpolator and turns it into
89// a cty.Value so it can be used within, for example, an HCL2 EvalContext.
90func HCL2ValueFromConfigValue(v interface{}) cty.Value {
91 if v == nil {
92 return cty.NullVal(cty.DynamicPseudoType)
93 }
94 if v == UnknownVariableValue {
95 return cty.DynamicVal
96 }
97
98 switch tv := v.(type) {
99 case bool:
100 return cty.BoolVal(tv)
101 case string:
102 return cty.StringVal(tv)
103 case int:
104 return cty.NumberIntVal(int64(tv))
105 case float64:
106 return cty.NumberFloatVal(tv)
107 case []interface{}:
108 vals := make([]cty.Value, len(tv))
109 for i, ev := range tv {
110 vals[i] = HCL2ValueFromConfigValue(ev)
111 }
112 return cty.TupleVal(vals)
113 case map[string]interface{}:
114 vals := map[string]cty.Value{}
115 for k, ev := range tv {
116 vals[k] = HCL2ValueFromConfigValue(ev)
117 }
118 return cty.ObjectVal(vals)
119 default:
120 // HCL/HIL should never generate anything that isn't caught by
121 // the above, so if we get here something has gone very wrong.
122 panic(fmt.Errorf("can't convert %#v to cty.Value", v))
123 }
124}
125
126func HILVariableFromHCL2Value(v cty.Value) ast.Variable {
127 if v.IsNull() {
128 // Caller should guarantee/check this before calling
129 panic("Null values cannot be represented in HIL")
130 }
131 if !v.IsKnown() {
132 return ast.Variable{
133 Type: ast.TypeUnknown,
134 Value: UnknownVariableValue,
135 }
136 }
137
138 switch v.Type() {
139 case cty.Bool:
140 return ast.Variable{
141 Type: ast.TypeBool,
142 Value: v.True(),
143 }
144 case cty.Number:
145 v := ConfigValueFromHCL2(v)
146 switch tv := v.(type) {
147 case int:
148 return ast.Variable{
149 Type: ast.TypeInt,
150 Value: tv,
151 }
152 case float64:
153 return ast.Variable{
154 Type: ast.TypeFloat,
155 Value: tv,
156 }
157 default:
158 // should never happen
159 panic("invalid return value for configValueFromHCL2")
160 }
161 case cty.String:
162 return ast.Variable{
163 Type: ast.TypeString,
164 Value: v.AsString(),
165 }
166 }
167
168 if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() {
169 l := make([]ast.Variable, 0, v.LengthInt())
170 it := v.ElementIterator()
171 for it.Next() {
172 _, ev := it.Element()
173 l = append(l, HILVariableFromHCL2Value(ev))
174 }
175 // If we were given a tuple then this could actually produce an invalid
176 // list with non-homogenous types, which we expect to be caught inside
177 // HIL just like a user-supplied non-homogenous list would be.
178 return ast.Variable{
179 Type: ast.TypeList,
180 Value: l,
181 }
182 }
183
184 if v.Type().IsMapType() || v.Type().IsObjectType() {
185 l := make(map[string]ast.Variable)
186 it := v.ElementIterator()
187 for it.Next() {
188 ek, ev := it.Element()
189 l[ek.AsString()] = HILVariableFromHCL2Value(ev)
190 }
191 // If we were given an object then this could actually produce an invalid
192 // map with non-homogenous types, which we expect to be caught inside
193 // HIL just like a user-supplied non-homogenous map would be.
194 return ast.Variable{
195 Type: ast.TypeMap,
196 Value: l,
197 }
198 }
199
200 // If we fall out here then we have some weird type that we haven't
201 // accounted for. This should never happen unless the caller is using
202 // capsule types, and we don't currently have any such types defined.
203 panic(fmt.Errorf("can't convert %#v to HIL variable", v))
204}
205
206func HCL2ValueFromHILVariable(v ast.Variable) cty.Value {
207 switch v.Type {
208 case ast.TypeList:
209 vals := make([]cty.Value, len(v.Value.([]ast.Variable)))
210 for i, ev := range v.Value.([]ast.Variable) {
211 vals[i] = HCL2ValueFromHILVariable(ev)
212 }
213 return cty.TupleVal(vals)
214 case ast.TypeMap:
215 vals := make(map[string]cty.Value, len(v.Value.(map[string]ast.Variable)))
216 for k, ev := range v.Value.(map[string]ast.Variable) {
217 vals[k] = HCL2ValueFromHILVariable(ev)
218 }
219 return cty.ObjectVal(vals)
220 default:
221 return HCL2ValueFromConfigValue(v.Value)
222 }
223}
224
225func HCL2TypeForHILType(hilType ast.Type) cty.Type {
226 switch hilType {
227 case ast.TypeAny:
228 return cty.DynamicPseudoType
229 case ast.TypeUnknown:
230 return cty.DynamicPseudoType
231 case ast.TypeBool:
232 return cty.Bool
233 case ast.TypeInt:
234 return cty.Number
235 case ast.TypeFloat:
236 return cty.Number
237 case ast.TypeString:
238 return cty.String
239 case ast.TypeList:
240 return cty.List(cty.DynamicPseudoType)
241 case ast.TypeMap:
242 return cty.Map(cty.DynamicPseudoType)
243 default:
244 return cty.NilType // equilvalent to ast.TypeInvalid
245 }
246}
diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go
index 37ec11a..08cbc77 100644
--- a/vendor/github.com/hashicorp/terraform/config/import_tree.go
+++ b/vendor/github.com/hashicorp/terraform/config/import_tree.go
@@ -1,8 +1,12 @@
1package config 1package config
2 2
3import ( 3import (
4 "bufio"
4 "fmt" 5 "fmt"
5 "io" 6 "io"
7 "os"
8
9 "github.com/hashicorp/errwrap"
6) 10)
7 11
8// configurable is an interface that must be implemented by any configuration 12// configurable is an interface that must be implemented by any configuration
@@ -27,15 +31,52 @@ type importTree struct {
27// imports. 31// imports.
28type fileLoaderFunc func(path string) (configurable, []string, error) 32type fileLoaderFunc func(path string) (configurable, []string, error)
29 33
34// Set this to a non-empty value at link time to enable the HCL2 experiment.
35// This is not currently enabled for release builds.
36//
37// For example:
38// go install -ldflags="-X github.com/hashicorp/terraform/config.enableHCL2Experiment=true" github.com/hashicorp/terraform
39var enableHCL2Experiment = ""
40
30// loadTree takes a single file and loads the entire importTree for that 41// loadTree takes a single file and loads the entire importTree for that
31// file. This function detects what kind of configuration file it is an 42// file. This function detects what kind of configuration file it is an
32// executes the proper fileLoaderFunc. 43// executes the proper fileLoaderFunc.
33func loadTree(root string) (*importTree, error) { 44func loadTree(root string) (*importTree, error) {
34 var f fileLoaderFunc 45 var f fileLoaderFunc
35 switch ext(root) { 46
36 case ".tf", ".tf.json": 47 // HCL2 experiment is currently activated at build time via the linker.
37 f = loadFileHcl 48 // See the comment on this variable for more information.
38 default: 49 if enableHCL2Experiment == "" {
50 // Main-line behavior: always use the original HCL parser
51 switch ext(root) {
52 case ".tf", ".tf.json":
53 f = loadFileHcl
54 default:
55 }
56 } else {
57 // Experimental behavior: use the HCL2 parser if the opt-in comment
58 // is present.
59 switch ext(root) {
60 case ".tf":
61 // We need to sniff the file for the opt-in comment line to decide
62 // if the file is participating in the HCL2 experiment.
63 cf, err := os.Open(root)
64 if err != nil {
65 return nil, err
66 }
67 sc := bufio.NewScanner(cf)
68 for sc.Scan() {
69 if sc.Text() == "#terraform:hcl2" {
70 f = globalHCL2Loader.loadFile
71 }
72 }
73 if f == nil {
74 f = loadFileHcl
75 }
76 case ".tf.json":
77 f = loadFileHcl
78 default:
79 }
39 } 80 }
40 81
41 if f == nil { 82 if f == nil {
@@ -86,10 +127,7 @@ func (t *importTree) Close() error {
86func (t *importTree) ConfigTree() (*configTree, error) { 127func (t *importTree) ConfigTree() (*configTree, error) {
87 config, err := t.Raw.Config() 128 config, err := t.Raw.Config()
88 if err != nil { 129 if err != nil {
89 return nil, fmt.Errorf( 130 return nil, errwrap.Wrapf(fmt.Sprintf("Error loading %s: {{err}}", t.Path), err)
90 "Error loading %s: %s",
91 t.Path,
92 err)
93 } 131 }
94 132
95 // Build our result 133 // Build our result
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go
index bbb3555..599e5ec 100644
--- a/vendor/github.com/hashicorp/terraform/config/interpolate.go
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate.go
@@ -5,6 +5,8 @@ import (
5 "strconv" 5 "strconv"
6 "strings" 6 "strings"
7 7
8 "github.com/hashicorp/terraform/tfdiags"
9
8 "github.com/hashicorp/hil/ast" 10 "github.com/hashicorp/hil/ast"
9) 11)
10 12
@@ -14,6 +16,21 @@ import (
14// variables can come from: user variables, resources, etc. 16// variables can come from: user variables, resources, etc.
15type InterpolatedVariable interface { 17type InterpolatedVariable interface {
16 FullKey() string 18 FullKey() string
19 SourceRange() tfdiags.SourceRange
20}
21
22// varRange can be embedded into an InterpolatedVariable implementation to
23// implement the SourceRange method.
24type varRange struct {
25 rng tfdiags.SourceRange
26}
27
28func (r varRange) SourceRange() tfdiags.SourceRange {
29 return r.rng
30}
31
32func makeVarRange(rng tfdiags.SourceRange) varRange {
33 return varRange{rng}
17} 34}
18 35
19// CountVariable is a variable for referencing information about 36// CountVariable is a variable for referencing information about
@@ -21,6 +38,7 @@ type InterpolatedVariable interface {
21type CountVariable struct { 38type CountVariable struct {
22 Type CountValueType 39 Type CountValueType
23 key string 40 key string
41 varRange
24} 42}
25 43
26// CountValueType is the type of the count variable that is referenced. 44// CountValueType is the type of the count variable that is referenced.
@@ -37,6 +55,7 @@ type ModuleVariable struct {
37 Name string 55 Name string
38 Field string 56 Field string
39 key string 57 key string
58 varRange
40} 59}
41 60
42// A PathVariable is a variable that references path information about the 61// A PathVariable is a variable that references path information about the
@@ -44,6 +63,7 @@ type ModuleVariable struct {
44type PathVariable struct { 63type PathVariable struct {
45 Type PathValueType 64 Type PathValueType
46 key string 65 key string
66 varRange
47} 67}
48 68
49type PathValueType byte 69type PathValueType byte
@@ -67,6 +87,7 @@ type ResourceVariable struct {
67 Index int // Index for multi-variable: aws_instance.foo.1.id == 1 87 Index int // Index for multi-variable: aws_instance.foo.1.id == 1
68 88
69 key string 89 key string
90 varRange
70} 91}
71 92
72// SelfVariable is a variable that is referencing the same resource 93// SelfVariable is a variable that is referencing the same resource
@@ -75,6 +96,7 @@ type SelfVariable struct {
75 Field string 96 Field string
76 97
77 key string 98 key string
99 varRange
78} 100}
79 101
80// SimpleVariable is an unprefixed variable, which can show up when users have 102// SimpleVariable is an unprefixed variable, which can show up when users have
@@ -82,6 +104,7 @@ type SelfVariable struct {
82// internally. The template_file resource is an example of this. 104// internally. The template_file resource is an example of this.
83type SimpleVariable struct { 105type SimpleVariable struct {
84 Key string 106 Key string
107 varRange
85} 108}
86 109
87// TerraformVariable is a "terraform."-prefixed variable used to access 110// TerraformVariable is a "terraform."-prefixed variable used to access
@@ -89,6 +112,7 @@ type SimpleVariable struct {
89type TerraformVariable struct { 112type TerraformVariable struct {
90 Field string 113 Field string
91 key string 114 key string
115 varRange
92} 116}
93 117
94// A UserVariable is a variable that is referencing a user variable 118// A UserVariable is a variable that is referencing a user variable
@@ -99,6 +123,14 @@ type UserVariable struct {
99 Elem string 123 Elem string
100 124
101 key string 125 key string
126 varRange
127}
128
129// A LocalVariable is a variable that references a local value defined within
130// the current module, via a "locals" block. This looks like "${local.foo}".
131type LocalVariable struct {
132 Name string
133 varRange
102} 134}
103 135
104func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { 136func NewInterpolatedVariable(v string) (InterpolatedVariable, error) {
@@ -112,6 +144,8 @@ func NewInterpolatedVariable(v string) (InterpolatedVariable, error) {
112 return NewTerraformVariable(v) 144 return NewTerraformVariable(v)
113 } else if strings.HasPrefix(v, "var.") { 145 } else if strings.HasPrefix(v, "var.") {
114 return NewUserVariable(v) 146 return NewUserVariable(v)
147 } else if strings.HasPrefix(v, "local.") {
148 return NewLocalVariable(v)
115 } else if strings.HasPrefix(v, "module.") { 149 } else if strings.HasPrefix(v, "module.") {
116 return NewModuleVariable(v) 150 return NewModuleVariable(v)
117 } else if !strings.ContainsRune(v, '.') { 151 } else if !strings.ContainsRune(v, '.') {
@@ -276,7 +310,7 @@ func (v *SelfVariable) GoString() string {
276} 310}
277 311
278func NewSimpleVariable(key string) (*SimpleVariable, error) { 312func NewSimpleVariable(key string) (*SimpleVariable, error) {
279 return &SimpleVariable{key}, nil 313 return &SimpleVariable{Key: key}, nil
280} 314}
281 315
282func (v *SimpleVariable) FullKey() string { 316func (v *SimpleVariable) FullKey() string {
@@ -331,6 +365,25 @@ func (v *UserVariable) GoString() string {
331 return fmt.Sprintf("*%#v", *v) 365 return fmt.Sprintf("*%#v", *v)
332} 366}
333 367
368func NewLocalVariable(key string) (*LocalVariable, error) {
369 name := key[len("local."):]
370 if idx := strings.Index(name, "."); idx > -1 {
371 return nil, fmt.Errorf("Can't use dot (.) attribute access in local.%s; use square bracket indexing", name)
372 }
373
374 return &LocalVariable{
375 Name: name,
376 }, nil
377}
378
379func (v *LocalVariable) FullKey() string {
380 return fmt.Sprintf("local.%s", v.Name)
381}
382
383func (v *LocalVariable) GoString() string {
384 return fmt.Sprintf("*%#v", *v)
385}
386
334// DetectVariables takes an AST root and returns all the interpolated 387// DetectVariables takes an AST root and returns all the interpolated
335// variables that are detected in the AST tree. 388// variables that are detected in the AST tree.
336func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) { 389func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) {
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
index a298cf2..421edb0 100644
--- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
@@ -1,17 +1,23 @@
1package config 1package config
2 2
3import ( 3import (
4 "bytes"
5 "compress/gzip"
4 "crypto/md5" 6 "crypto/md5"
7 "crypto/rsa"
5 "crypto/sha1" 8 "crypto/sha1"
6 "crypto/sha256" 9 "crypto/sha256"
7 "crypto/sha512" 10 "crypto/sha512"
11 "crypto/x509"
8 "encoding/base64" 12 "encoding/base64"
9 "encoding/hex" 13 "encoding/hex"
10 "encoding/json" 14 "encoding/json"
15 "encoding/pem"
11 "fmt" 16 "fmt"
12 "io/ioutil" 17 "io/ioutil"
13 "math" 18 "math"
14 "net" 19 "net"
20 "net/url"
15 "path/filepath" 21 "path/filepath"
16 "regexp" 22 "regexp"
17 "sort" 23 "sort"
@@ -55,59 +61,74 @@ func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) {
55// Funcs is the mapping of built-in functions for configuration. 61// Funcs is the mapping of built-in functions for configuration.
56func Funcs() map[string]ast.Function { 62func Funcs() map[string]ast.Function {
57 return map[string]ast.Function{ 63 return map[string]ast.Function{
58 "basename": interpolationFuncBasename(), 64 "abs": interpolationFuncAbs(),
59 "base64decode": interpolationFuncBase64Decode(), 65 "basename": interpolationFuncBasename(),
60 "base64encode": interpolationFuncBase64Encode(), 66 "base64decode": interpolationFuncBase64Decode(),
61 "base64sha256": interpolationFuncBase64Sha256(), 67 "base64encode": interpolationFuncBase64Encode(),
62 "base64sha512": interpolationFuncBase64Sha512(), 68 "base64gzip": interpolationFuncBase64Gzip(),
63 "bcrypt": interpolationFuncBcrypt(), 69 "base64sha256": interpolationFuncBase64Sha256(),
64 "ceil": interpolationFuncCeil(), 70 "base64sha512": interpolationFuncBase64Sha512(),
65 "chomp": interpolationFuncChomp(), 71 "bcrypt": interpolationFuncBcrypt(),
66 "cidrhost": interpolationFuncCidrHost(), 72 "ceil": interpolationFuncCeil(),
67 "cidrnetmask": interpolationFuncCidrNetmask(), 73 "chomp": interpolationFuncChomp(),
68 "cidrsubnet": interpolationFuncCidrSubnet(), 74 "cidrhost": interpolationFuncCidrHost(),
69 "coalesce": interpolationFuncCoalesce(), 75 "cidrnetmask": interpolationFuncCidrNetmask(),
70 "coalescelist": interpolationFuncCoalesceList(), 76 "cidrsubnet": interpolationFuncCidrSubnet(),
71 "compact": interpolationFuncCompact(), 77 "coalesce": interpolationFuncCoalesce(),
72 "concat": interpolationFuncConcat(), 78 "coalescelist": interpolationFuncCoalesceList(),
73 "contains": interpolationFuncContains(), 79 "compact": interpolationFuncCompact(),
74 "dirname": interpolationFuncDirname(), 80 "concat": interpolationFuncConcat(),
75 "distinct": interpolationFuncDistinct(), 81 "contains": interpolationFuncContains(),
76 "element": interpolationFuncElement(), 82 "dirname": interpolationFuncDirname(),
77 "file": interpolationFuncFile(), 83 "distinct": interpolationFuncDistinct(),
78 "matchkeys": interpolationFuncMatchKeys(), 84 "element": interpolationFuncElement(),
79 "floor": interpolationFuncFloor(), 85 "chunklist": interpolationFuncChunklist(),
80 "format": interpolationFuncFormat(), 86 "file": interpolationFuncFile(),
81 "formatlist": interpolationFuncFormatList(), 87 "filebase64sha256": interpolationFuncMakeFileHash(interpolationFuncBase64Sha256()),
82 "index": interpolationFuncIndex(), 88 "filebase64sha512": interpolationFuncMakeFileHash(interpolationFuncBase64Sha512()),
83 "join": interpolationFuncJoin(), 89 "filemd5": interpolationFuncMakeFileHash(interpolationFuncMd5()),
84 "jsonencode": interpolationFuncJSONEncode(), 90 "filesha1": interpolationFuncMakeFileHash(interpolationFuncSha1()),
85 "length": interpolationFuncLength(), 91 "filesha256": interpolationFuncMakeFileHash(interpolationFuncSha256()),
86 "list": interpolationFuncList(), 92 "filesha512": interpolationFuncMakeFileHash(interpolationFuncSha512()),
87 "log": interpolationFuncLog(), 93 "matchkeys": interpolationFuncMatchKeys(),
88 "lower": interpolationFuncLower(), 94 "flatten": interpolationFuncFlatten(),
89 "map": interpolationFuncMap(), 95 "floor": interpolationFuncFloor(),
90 "max": interpolationFuncMax(), 96 "format": interpolationFuncFormat(),
91 "md5": interpolationFuncMd5(), 97 "formatlist": interpolationFuncFormatList(),
92 "merge": interpolationFuncMerge(), 98 "indent": interpolationFuncIndent(),
93 "min": interpolationFuncMin(), 99 "index": interpolationFuncIndex(),
94 "pathexpand": interpolationFuncPathExpand(), 100 "join": interpolationFuncJoin(),
95 "pow": interpolationFuncPow(), 101 "jsonencode": interpolationFuncJSONEncode(),
96 "uuid": interpolationFuncUUID(), 102 "length": interpolationFuncLength(),
97 "replace": interpolationFuncReplace(), 103 "list": interpolationFuncList(),
98 "sha1": interpolationFuncSha1(), 104 "log": interpolationFuncLog(),
99 "sha256": interpolationFuncSha256(), 105 "lower": interpolationFuncLower(),
100 "sha512": interpolationFuncSha512(), 106 "map": interpolationFuncMap(),
101 "signum": interpolationFuncSignum(), 107 "max": interpolationFuncMax(),
102 "slice": interpolationFuncSlice(), 108 "md5": interpolationFuncMd5(),
103 "sort": interpolationFuncSort(), 109 "merge": interpolationFuncMerge(),
104 "split": interpolationFuncSplit(), 110 "min": interpolationFuncMin(),
105 "substr": interpolationFuncSubstr(), 111 "pathexpand": interpolationFuncPathExpand(),
106 "timestamp": interpolationFuncTimestamp(), 112 "pow": interpolationFuncPow(),
107 "title": interpolationFuncTitle(), 113 "uuid": interpolationFuncUUID(),
108 "trimspace": interpolationFuncTrimSpace(), 114 "replace": interpolationFuncReplace(),
109 "upper": interpolationFuncUpper(), 115 "rsadecrypt": interpolationFuncRsaDecrypt(),
110 "zipmap": interpolationFuncZipMap(), 116 "sha1": interpolationFuncSha1(),
117 "sha256": interpolationFuncSha256(),
118 "sha512": interpolationFuncSha512(),
119 "signum": interpolationFuncSignum(),
120 "slice": interpolationFuncSlice(),
121 "sort": interpolationFuncSort(),
122 "split": interpolationFuncSplit(),
123 "substr": interpolationFuncSubstr(),
124 "timestamp": interpolationFuncTimestamp(),
125 "timeadd": interpolationFuncTimeAdd(),
126 "title": interpolationFuncTitle(),
127 "transpose": interpolationFuncTranspose(),
128 "trimspace": interpolationFuncTrimSpace(),
129 "upper": interpolationFuncUpper(),
130 "urlencode": interpolationFuncURLEncode(),
131 "zipmap": interpolationFuncZipMap(),
111 } 132 }
112} 133}
113 134
@@ -669,6 +690,21 @@ func interpolationFuncFormatList() ast.Function {
669 } 690 }
670} 691}
671 692
693// interpolationFuncIndent indents a multi-line string with the
694// specified number of spaces
695func interpolationFuncIndent() ast.Function {
696 return ast.Function{
697 ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString},
698 ReturnType: ast.TypeString,
699 Callback: func(args []interface{}) (interface{}, error) {
700 spaces := args[0].(int)
701 data := args[1].(string)
702 pad := strings.Repeat(" ", spaces)
703 return strings.Replace(data, "\n", "\n"+pad, -1), nil
704 },
705 }
706}
707
672// interpolationFuncIndex implements the "index" function that allows one to 708// interpolationFuncIndex implements the "index" function that allows one to
673// find the index of a specific element in a list 709// find the index of a specific element in a list
674func interpolationFuncIndex() ast.Function { 710func interpolationFuncIndex() ast.Function {
@@ -823,8 +859,7 @@ func interpolationFuncJoin() ast.Function {
823} 859}
824 860
825// interpolationFuncJSONEncode implements the "jsonencode" function that encodes 861// interpolationFuncJSONEncode implements the "jsonencode" function that encodes
826// a string, list, or map as its JSON representation. For now, values in the 862// a string, list, or map as its JSON representation.
827// list or map may only be strings.
828func interpolationFuncJSONEncode() ast.Function { 863func interpolationFuncJSONEncode() ast.Function {
829 return ast.Function{ 864 return ast.Function{
830 ArgTypes: []ast.Type{ast.TypeAny}, 865 ArgTypes: []ast.Type{ast.TypeAny},
@@ -837,28 +872,36 @@ func interpolationFuncJSONEncode() ast.Function {
837 toEncode = typedArg 872 toEncode = typedArg
838 873
839 case []ast.Variable: 874 case []ast.Variable:
840 // We preallocate the list here. Note that it's important that in
841 // the length 0 case, we have an empty list rather than nil, as
842 // they encode differently.
843 // XXX It would be nice to support arbitrarily nested data here. Is
844 // there an inverse of hil.InterfaceToVariable?
845 strings := make([]string, len(typedArg)) 875 strings := make([]string, len(typedArg))
846 876
847 for i, v := range typedArg { 877 for i, v := range typedArg {
848 if v.Type != ast.TypeString { 878 if v.Type != ast.TypeString {
849 return "", fmt.Errorf("list elements must be strings") 879 variable, _ := hil.InterfaceToVariable(typedArg)
880 toEncode, _ = hil.VariableToInterface(variable)
881
882 jEnc, err := json.Marshal(toEncode)
883 if err != nil {
884 return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode)
885 }
886 return string(jEnc), nil
887
850 } 888 }
851 strings[i] = v.Value.(string) 889 strings[i] = v.Value.(string)
852 } 890 }
853 toEncode = strings 891 toEncode = strings
854 892
855 case map[string]ast.Variable: 893 case map[string]ast.Variable:
856 // XXX It would be nice to support arbitrarily nested data here. Is
857 // there an inverse of hil.InterfaceToVariable?
858 stringMap := make(map[string]string) 894 stringMap := make(map[string]string)
859 for k, v := range typedArg { 895 for k, v := range typedArg {
860 if v.Type != ast.TypeString { 896 if v.Type != ast.TypeString {
861 return "", fmt.Errorf("map values must be strings") 897 variable, _ := hil.InterfaceToVariable(typedArg)
898 toEncode, _ = hil.VariableToInterface(variable)
899
900 jEnc, err := json.Marshal(toEncode)
901 if err != nil {
902 return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode)
903 }
904 return string(jEnc), nil
862 } 905 }
863 stringMap[k] = v.Value.(string) 906 stringMap[k] = v.Value.(string)
864 } 907 }
@@ -1098,6 +1141,56 @@ func interpolationFuncElement() ast.Function {
1098 } 1141 }
1099} 1142}
1100 1143
1144// returns the `list` items chunked by `size`.
1145func interpolationFuncChunklist() ast.Function {
1146 return ast.Function{
1147 ArgTypes: []ast.Type{
1148 ast.TypeList, // inputList
1149 ast.TypeInt, // size
1150 },
1151 ReturnType: ast.TypeList,
1152 Callback: func(args []interface{}) (interface{}, error) {
1153 output := make([]ast.Variable, 0)
1154
1155 values, _ := args[0].([]ast.Variable)
1156 size, _ := args[1].(int)
1157
1158 // errors if size is negative
1159 if size < 0 {
1160 return nil, fmt.Errorf("The size argument must be positive")
1161 }
1162
1163 // if size is 0, returns a list made of the initial list
1164 if size == 0 {
1165 output = append(output, ast.Variable{
1166 Type: ast.TypeList,
1167 Value: values,
1168 })
1169 return output, nil
1170 }
1171
1172 variables := make([]ast.Variable, 0)
1173 chunk := ast.Variable{
1174 Type: ast.TypeList,
1175 Value: variables,
1176 }
1177 l := len(values)
1178 for i, v := range values {
1179 variables = append(variables, v)
1180
1181 // Chunk when index isn't 0, or when reaching the values's length
1182 if (i+1)%size == 0 || (i+1) == l {
1183 chunk.Value = variables
1184 output = append(output, chunk)
1185 variables = make([]ast.Variable, 0)
1186 }
1187 }
1188
1189 return output, nil
1190 },
1191 }
1192}
1193
1101// interpolationFuncKeys implements the "keys" function that yields a list of 1194// interpolationFuncKeys implements the "keys" function that yields a list of
1102// keys of map types within a Terraform configuration. 1195// keys of map types within a Terraform configuration.
1103func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function { 1196func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function {
@@ -1197,6 +1290,32 @@ func interpolationFuncBase64Decode() ast.Function {
1197 } 1290 }
1198} 1291}
1199 1292
1293// interpolationFuncBase64Gzip implements the "gzip" function that allows gzip
1294// compression encoding the result using base64
1295func interpolationFuncBase64Gzip() ast.Function {
1296 return ast.Function{
1297 ArgTypes: []ast.Type{ast.TypeString},
1298 ReturnType: ast.TypeString,
1299 Callback: func(args []interface{}) (interface{}, error) {
1300 s := args[0].(string)
1301
1302 var b bytes.Buffer
1303 gz := gzip.NewWriter(&b)
1304 if _, err := gz.Write([]byte(s)); err != nil {
1305 return "", fmt.Errorf("failed to write gzip raw data: '%s'", s)
1306 }
1307 if err := gz.Flush(); err != nil {
1308 return "", fmt.Errorf("failed to flush gzip writer: '%s'", s)
1309 }
1310 if err := gz.Close(); err != nil {
1311 return "", fmt.Errorf("failed to close gzip writer: '%s'", s)
1312 }
1313
1314 return base64.StdEncoding.EncodeToString(b.Bytes()), nil
1315 },
1316 }
1317}
1318
1200// interpolationFuncLower implements the "lower" function that does 1319// interpolationFuncLower implements the "lower" function that does
1201// string lower casing. 1320// string lower casing.
1202func interpolationFuncLower() ast.Function { 1321func interpolationFuncLower() ast.Function {
@@ -1396,6 +1515,29 @@ func interpolationFuncTimestamp() ast.Function {
1396 } 1515 }
1397} 1516}
1398 1517
1518func interpolationFuncTimeAdd() ast.Function {
1519 return ast.Function{
1520 ArgTypes: []ast.Type{
1521 ast.TypeString, // input timestamp string in RFC3339 format
1522 ast.TypeString, // duration to add to input timestamp that should be parsable by time.ParseDuration
1523 },
1524 ReturnType: ast.TypeString,
1525 Callback: func(args []interface{}) (interface{}, error) {
1526
1527 ts, err := time.Parse(time.RFC3339, args[0].(string))
1528 if err != nil {
1529 return nil, err
1530 }
1531 duration, err := time.ParseDuration(args[1].(string))
1532 if err != nil {
1533 return nil, err
1534 }
1535
1536 return ts.Add(duration).Format(time.RFC3339), nil
1537 },
1538 }
1539}
1540
1399// interpolationFuncTitle implements the "title" function that returns a copy of the 1541// interpolationFuncTitle implements the "title" function that returns a copy of the
1400// string in which first characters of all the words are capitalized. 1542// string in which first characters of all the words are capitalized.
1401func interpolationFuncTitle() ast.Function { 1543func interpolationFuncTitle() ast.Function {
@@ -1441,7 +1583,7 @@ func interpolationFuncSubstr() ast.Function {
1441 return nil, fmt.Errorf("length should be a non-negative integer") 1583 return nil, fmt.Errorf("length should be a non-negative integer")
1442 } 1584 }
1443 1585
1444 if offset > len(str) { 1586 if offset > len(str) || offset < 0 {
1445 return nil, fmt.Errorf("offset cannot be larger than the length of the string") 1587 return nil, fmt.Errorf("offset cannot be larger than the length of the string")
1446 } 1588 }
1447 1589
@@ -1453,3 +1595,160 @@ func interpolationFuncSubstr() ast.Function {
1453 }, 1595 },
1454 } 1596 }
1455} 1597}
1598
1599// Flatten until it's not ast.TypeList
1600func flattener(finalList []ast.Variable, flattenList []ast.Variable) []ast.Variable {
1601 for _, val := range flattenList {
1602 if val.Type == ast.TypeList {
1603 finalList = flattener(finalList, val.Value.([]ast.Variable))
1604 } else {
1605 finalList = append(finalList, val)
1606 }
1607 }
1608 return finalList
1609}
1610
1611// Flatten to single list
1612func interpolationFuncFlatten() ast.Function {
1613 return ast.Function{
1614 ArgTypes: []ast.Type{ast.TypeList},
1615 ReturnType: ast.TypeList,
1616 Variadic: false,
1617 Callback: func(args []interface{}) (interface{}, error) {
1618 inputList := args[0].([]ast.Variable)
1619
1620 var outputList []ast.Variable
1621 return flattener(outputList, inputList), nil
1622 },
1623 }
1624}
1625
1626func interpolationFuncURLEncode() ast.Function {
1627 return ast.Function{
1628 ArgTypes: []ast.Type{ast.TypeString},
1629 ReturnType: ast.TypeString,
1630 Callback: func(args []interface{}) (interface{}, error) {
1631 s := args[0].(string)
1632 return url.QueryEscape(s), nil
1633 },
1634 }
1635}
1636
1637// interpolationFuncTranspose implements the "transpose" function
1638// that converts a map (string,list) to a map (string,list) where
1639// the unique values of the original lists become the keys of the
1640// new map and the keys of the original map become values for the
1641// corresponding new keys.
1642func interpolationFuncTranspose() ast.Function {
1643 return ast.Function{
1644 ArgTypes: []ast.Type{ast.TypeMap},
1645 ReturnType: ast.TypeMap,
1646 Callback: func(args []interface{}) (interface{}, error) {
1647
1648 inputMap := args[0].(map[string]ast.Variable)
1649 outputMap := make(map[string]ast.Variable)
1650 tmpMap := make(map[string][]string)
1651
1652 for inKey, inVal := range inputMap {
1653 if inVal.Type != ast.TypeList {
1654 return nil, fmt.Errorf("transpose requires a map of lists of strings")
1655 }
1656 values := inVal.Value.([]ast.Variable)
1657 for _, listVal := range values {
1658 if listVal.Type != ast.TypeString {
1659 return nil, fmt.Errorf("transpose requires the given map values to be lists of strings")
1660 }
1661 outKey := listVal.Value.(string)
1662 if _, ok := tmpMap[outKey]; !ok {
1663 tmpMap[outKey] = make([]string, 0)
1664 }
1665 outVal := tmpMap[outKey]
1666 outVal = append(outVal, inKey)
1667 sort.Strings(outVal)
1668 tmpMap[outKey] = outVal
1669 }
1670 }
1671
1672 for outKey, outVal := range tmpMap {
1673 values := make([]ast.Variable, 0)
1674 for _, v := range outVal {
1675 values = append(values, ast.Variable{Type: ast.TypeString, Value: v})
1676 }
1677 outputMap[outKey] = ast.Variable{Type: ast.TypeList, Value: values}
1678 }
1679 return outputMap, nil
1680 },
1681 }
1682}
1683
1684// interpolationFuncAbs returns the absolute value of a given float.
1685func interpolationFuncAbs() ast.Function {
1686 return ast.Function{
1687 ArgTypes: []ast.Type{ast.TypeFloat},
1688 ReturnType: ast.TypeFloat,
1689 Callback: func(args []interface{}) (interface{}, error) {
1690 return math.Abs(args[0].(float64)), nil
1691 },
1692 }
1693}
1694
1695// interpolationFuncRsaDecrypt implements the "rsadecrypt" function that does
1696// RSA decryption.
1697func interpolationFuncRsaDecrypt() ast.Function {
1698 return ast.Function{
1699 ArgTypes: []ast.Type{ast.TypeString, ast.TypeString},
1700 ReturnType: ast.TypeString,
1701 Callback: func(args []interface{}) (interface{}, error) {
1702 s := args[0].(string)
1703 key := args[1].(string)
1704
1705 b, err := base64.StdEncoding.DecodeString(s)
1706 if err != nil {
1707 return "", fmt.Errorf("Failed to decode input %q: cipher text must be base64-encoded", s)
1708 }
1709
1710 block, _ := pem.Decode([]byte(key))
1711 if block == nil {
1712 return "", fmt.Errorf("Failed to read key %q: no key found", key)
1713 }
1714 if block.Headers["Proc-Type"] == "4,ENCRYPTED" {
1715 return "", fmt.Errorf(
1716 "Failed to read key %q: password protected keys are\n"+
1717 "not supported. Please decrypt the key prior to use.", key)
1718 }
1719
1720 x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
1721 if err != nil {
1722 return "", err
1723 }
1724
1725 out, err := rsa.DecryptPKCS1v15(nil, x509Key, b)
1726 if err != nil {
1727 return "", err
1728 }
1729
1730 return string(out), nil
1731 },
1732 }
1733}
1734
1735// interpolationFuncMakeFileHash constructs a function that hashes the contents
1736// of a file by combining the implementations of the file(...) function and
1737// a given other function that is assumed to take a single string argument and
1738// return a hash value.
1739func interpolationFuncMakeFileHash(hashFunc ast.Function) ast.Function {
1740 fileFunc := interpolationFuncFile()
1741
1742 return ast.Function{
1743 ArgTypes: []ast.Type{ast.TypeString},
1744 ReturnType: ast.TypeString,
1745 Callback: func(args []interface{}) (interface{}, error) {
1746 filename := args[0].(string)
1747 contents, err := fileFunc.Callback([]interface{}{filename})
1748 if err != nil {
1749 return nil, err
1750 }
1751 return hashFunc.Callback([]interface{}{contents})
1752 },
1753 }
1754}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
index ead3d10..66a677d 100644
--- a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
@@ -271,9 +271,7 @@ func (w *interpolationWalker) splitSlice() {
271 result = append(result, val.Value) 271 result = append(result, val.Value)
272 } 272 }
273 case []interface{}: 273 case []interface{}:
274 for _, element := range val { 274 result = append(result, val...)
275 result = append(result, element)
276 }
277 default: 275 default:
278 result = append(result, v) 276 result = append(result, v)
279 } 277 }
diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go
index 5dd7d46..6e34781 100644
--- a/vendor/github.com/hashicorp/terraform/config/loader.go
+++ b/vendor/github.com/hashicorp/terraform/config/loader.go
@@ -80,7 +80,7 @@ func LoadDir(root string) (*Config, error) {
80 if err != nil { 80 if err != nil {
81 return nil, err 81 return nil, err
82 } 82 }
83 if len(files) == 0 { 83 if len(files) == 0 && len(overrides) == 0 {
84 return nil, &ErrNoConfigsFound{Dir: root} 84 return nil, &ErrNoConfigsFound{Dir: root}
85 } 85 }
86 86
@@ -112,6 +112,9 @@ func LoadDir(root string) (*Config, error) {
112 result = c 112 result = c
113 } 113 }
114 } 114 }
115 if len(files) == 0 {
116 result = &Config{}
117 }
115 118
116 // Load all the overrides, and merge them into the config 119 // Load all the overrides, and merge them into the config
117 for _, f := range overrides { 120 for _, f := range overrides {
diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
index e85e493..68cffe2 100644
--- a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
+++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
@@ -17,10 +17,20 @@ type hclConfigurable struct {
17 Root *ast.File 17 Root *ast.File
18} 18}
19 19
20var ReservedDataSourceFields = []string{
21 "connection",
22 "count",
23 "depends_on",
24 "lifecycle",
25 "provider",
26 "provisioner",
27}
28
20var ReservedResourceFields = []string{ 29var ReservedResourceFields = []string{
21 "connection", 30 "connection",
22 "count", 31 "count",
23 "depends_on", 32 "depends_on",
33 "id",
24 "lifecycle", 34 "lifecycle",
25 "provider", 35 "provider",
26 "provisioner", 36 "provisioner",
@@ -35,6 +45,7 @@ func (t *hclConfigurable) Config() (*Config, error) {
35 validKeys := map[string]struct{}{ 45 validKeys := map[string]struct{}{
36 "atlas": struct{}{}, 46 "atlas": struct{}{},
37 "data": struct{}{}, 47 "data": struct{}{},
48 "locals": struct{}{},
38 "module": struct{}{}, 49 "module": struct{}{},
39 "output": struct{}{}, 50 "output": struct{}{},
40 "provider": struct{}{}, 51 "provider": struct{}{},
@@ -70,6 +81,15 @@ func (t *hclConfigurable) Config() (*Config, error) {
70 } 81 }
71 } 82 }
72 83
84 // Build local values
85 if locals := list.Filter("locals"); len(locals.Items) > 0 {
86 var err error
87 config.Locals, err = loadLocalsHcl(locals)
88 if err != nil {
89 return nil, err
90 }
91 }
92
73 // Get Atlas configuration 93 // Get Atlas configuration
74 if atlas := list.Filter("atlas"); len(atlas.Items) > 0 { 94 if atlas := list.Filter("atlas"); len(atlas.Items) > 0 {
75 var err error 95 var err error
@@ -373,9 +393,6 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
373 err) 393 err)
374 } 394 }
375 395
376 // Remove the fields we handle specially
377 delete(config, "source")
378
379 rawConfig, err := NewRawConfig(config) 396 rawConfig, err := NewRawConfig(config)
380 if err != nil { 397 if err != nil {
381 return nil, fmt.Errorf( 398 return nil, fmt.Errorf(
@@ -384,7 +401,11 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
384 err) 401 err)
385 } 402 }
386 403
387 // If we have a count, then figure it out 404 // Remove the fields we handle specially
405 delete(config, "source")
406 delete(config, "version")
407 delete(config, "providers")
408
388 var source string 409 var source string
389 if o := listVal.Filter("source"); len(o.Items) > 0 { 410 if o := listVal.Filter("source"); len(o.Items) > 0 {
390 err = hcl.DecodeObject(&source, o.Items[0].Val) 411 err = hcl.DecodeObject(&source, o.Items[0].Val)
@@ -396,9 +417,33 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
396 } 417 }
397 } 418 }
398 419
420 var version string
421 if o := listVal.Filter("version"); len(o.Items) > 0 {
422 err = hcl.DecodeObject(&version, o.Items[0].Val)
423 if err != nil {
424 return nil, fmt.Errorf(
425 "Error parsing version for %s: %s",
426 k,
427 err)
428 }
429 }
430
431 var providers map[string]string
432 if o := listVal.Filter("providers"); len(o.Items) > 0 {
433 err = hcl.DecodeObject(&providers, o.Items[0].Val)
434 if err != nil {
435 return nil, fmt.Errorf(
436 "Error parsing providers for %s: %s",
437 k,
438 err)
439 }
440 }
441
399 result = append(result, &Module{ 442 result = append(result, &Module{
400 Name: k, 443 Name: k,
401 Source: source, 444 Source: source,
445 Version: version,
446 Providers: providers,
402 RawConfig: rawConfig, 447 RawConfig: rawConfig,
403 }) 448 })
404 } 449 }
@@ -406,6 +451,59 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
406 return result, nil 451 return result, nil
407} 452}
408 453
454// loadLocalsHcl recurses into the given HCL object turns it into
455// a list of locals.
456func loadLocalsHcl(list *ast.ObjectList) ([]*Local, error) {
457
458 result := make([]*Local, 0, len(list.Items))
459
460 for _, block := range list.Items {
461 if len(block.Keys) > 0 {
462 return nil, fmt.Errorf(
463 "locals block at %s should not have label %q",
464 block.Pos(), block.Keys[0].Token.Value(),
465 )
466 }
467
468 blockObj, ok := block.Val.(*ast.ObjectType)
469 if !ok {
470 return nil, fmt.Errorf("locals value at %s should be a block", block.Val.Pos())
471 }
472
473 // blockObj now contains directly our local decls
474 for _, item := range blockObj.List.Items {
475 if len(item.Keys) != 1 {
476 return nil, fmt.Errorf("local declaration at %s may not be a block", item.Val.Pos())
477 }
478
479 // By the time we get here there can only be one item left, but
480 // we'll decode into a map anyway because it's a convenient way
481 // to extract both the key and the value robustly.
482 kv := map[string]interface{}{}
483 hcl.DecodeObject(&kv, item)
484 for k, v := range kv {
485 rawConfig, err := NewRawConfig(map[string]interface{}{
486 "value": v,
487 })
488
489 if err != nil {
490 return nil, fmt.Errorf(
491 "error parsing local value %q at %s: %s",
492 k, item.Val.Pos(), err,
493 )
494 }
495
496 result = append(result, &Local{
497 Name: k,
498 RawConfig: rawConfig,
499 })
500 }
501 }
502 }
503
504 return result, nil
505}
506
409// LoadOutputsHcl recurses into the given HCL object and turns 507// LoadOutputsHcl recurses into the given HCL object and turns
410// it into a mapping of outputs. 508// it into a mapping of outputs.
411func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { 509func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
@@ -434,6 +532,7 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
434 532
435 // Delete special keys 533 // Delete special keys
436 delete(config, "depends_on") 534 delete(config, "depends_on")
535 delete(config, "description")
437 536
438 rawConfig, err := NewRawConfig(config) 537 rawConfig, err := NewRawConfig(config)
439 if err != nil { 538 if err != nil {
@@ -455,10 +554,23 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
455 } 554 }
456 } 555 }
457 556
557 // If we have a description field, then filter that
558 var description string
559 if o := listVal.Filter("description"); len(o.Items) > 0 {
560 err := hcl.DecodeObject(&description, o.Items[0].Val)
561 if err != nil {
562 return nil, fmt.Errorf(
563 "Error reading description for output %q: %s",
564 n,
565 err)
566 }
567 }
568
458 result = append(result, &Output{ 569 result = append(result, &Output{
459 Name: n, 570 Name: n,
460 RawConfig: rawConfig, 571 RawConfig: rawConfig,
461 DependsOn: dependsOn, 572 DependsOn: dependsOn,
573 Description: description,
462 }) 574 })
463 } 575 }
464 576
diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go
new file mode 100644
index 0000000..4f9f129
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go
@@ -0,0 +1,473 @@
1package config
2
3import (
4 "fmt"
5 "sort"
6 "strings"
7
8 gohcl2 "github.com/hashicorp/hcl2/gohcl"
9 hcl2 "github.com/hashicorp/hcl2/hcl"
10 hcl2parse "github.com/hashicorp/hcl2/hclparse"
11 "github.com/hashicorp/terraform/config/hcl2shim"
12 "github.com/zclconf/go-cty/cty"
13)
14
15// hcl2Configurable is an implementation of configurable that knows
16// how to turn a HCL Body into a *Config object.
17type hcl2Configurable struct {
18 SourceFilename string
19 Body hcl2.Body
20}
21
22// hcl2Loader is a wrapper around a HCL parser that provides a fileLoaderFunc.
23type hcl2Loader struct {
24 Parser *hcl2parse.Parser
25}
26
27// For the moment we'll just have a global loader since we don't have anywhere
28// better to stash this.
29// TODO: refactor the loader API so that it uses some sort of object we can
30// stash the parser inside.
31var globalHCL2Loader = newHCL2Loader()
32
33// newHCL2Loader creates a new hcl2Loader containing a new HCL Parser.
34//
35// HCL parsers retain information about files that are loaded to aid in
36// producing diagnostic messages, so all files within a single configuration
37// should be loaded with the same parser to ensure the availability of
38// full diagnostic information.
39func newHCL2Loader() hcl2Loader {
40 return hcl2Loader{
41 Parser: hcl2parse.NewParser(),
42 }
43}
44
45// loadFile is a fileLoaderFunc that knows how to read a HCL2 file and turn it
46// into a hcl2Configurable.
47func (l hcl2Loader) loadFile(filename string) (configurable, []string, error) {
48 var f *hcl2.File
49 var diags hcl2.Diagnostics
50 if strings.HasSuffix(filename, ".json") {
51 f, diags = l.Parser.ParseJSONFile(filename)
52 } else {
53 f, diags = l.Parser.ParseHCLFile(filename)
54 }
55 if diags.HasErrors() {
56 // Return diagnostics as an error; callers may type-assert this to
57 // recover the original diagnostics, if it doesn't end up wrapped
58 // in another error.
59 return nil, nil, diags
60 }
61
62 return &hcl2Configurable{
63 SourceFilename: filename,
64 Body: f.Body,
65 }, nil, nil
66}
67
68func (t *hcl2Configurable) Config() (*Config, error) {
69 config := &Config{}
70
71 // these structs are used only for the initial shallow decoding; we'll
72 // expand this into the main, public-facing config structs afterwards.
73 type atlas struct {
74 Name string `hcl:"name"`
75 Include *[]string `hcl:"include"`
76 Exclude *[]string `hcl:"exclude"`
77 }
78 type provider struct {
79 Name string `hcl:"name,label"`
80 Alias *string `hcl:"alias,attr"`
81 Version *string `hcl:"version,attr"`
82 Config hcl2.Body `hcl:",remain"`
83 }
84 type module struct {
85 Name string `hcl:"name,label"`
86 Source string `hcl:"source,attr"`
87 Version *string `hcl:"version,attr"`
88 Providers *map[string]string `hcl:"providers,attr"`
89 Config hcl2.Body `hcl:",remain"`
90 }
91 type resourceLifecycle struct {
92 CreateBeforeDestroy *bool `hcl:"create_before_destroy,attr"`
93 PreventDestroy *bool `hcl:"prevent_destroy,attr"`
94 IgnoreChanges *[]string `hcl:"ignore_changes,attr"`
95 }
96 type connection struct {
97 Config hcl2.Body `hcl:",remain"`
98 }
99 type provisioner struct {
100 Type string `hcl:"type,label"`
101
102 When *string `hcl:"when,attr"`
103 OnFailure *string `hcl:"on_failure,attr"`
104
105 Connection *connection `hcl:"connection,block"`
106 Config hcl2.Body `hcl:",remain"`
107 }
108 type managedResource struct {
109 Type string `hcl:"type,label"`
110 Name string `hcl:"name,label"`
111
112 CountExpr hcl2.Expression `hcl:"count,attr"`
113 Provider *string `hcl:"provider,attr"`
114 DependsOn *[]string `hcl:"depends_on,attr"`
115
116 Lifecycle *resourceLifecycle `hcl:"lifecycle,block"`
117 Provisioners []provisioner `hcl:"provisioner,block"`
118 Connection *connection `hcl:"connection,block"`
119
120 Config hcl2.Body `hcl:",remain"`
121 }
122 type dataResource struct {
123 Type string `hcl:"type,label"`
124 Name string `hcl:"name,label"`
125
126 CountExpr hcl2.Expression `hcl:"count,attr"`
127 Provider *string `hcl:"provider,attr"`
128 DependsOn *[]string `hcl:"depends_on,attr"`
129
130 Config hcl2.Body `hcl:",remain"`
131 }
132 type variable struct {
133 Name string `hcl:"name,label"`
134
135 DeclaredType *string `hcl:"type,attr"`
136 Default *cty.Value `hcl:"default,attr"`
137 Description *string `hcl:"description,attr"`
138 Sensitive *bool `hcl:"sensitive,attr"`
139 }
140 type output struct {
141 Name string `hcl:"name,label"`
142
143 ValueExpr hcl2.Expression `hcl:"value,attr"`
144 DependsOn *[]string `hcl:"depends_on,attr"`
145 Description *string `hcl:"description,attr"`
146 Sensitive *bool `hcl:"sensitive,attr"`
147 }
148 type locals struct {
149 Definitions hcl2.Attributes `hcl:",remain"`
150 }
151 type backend struct {
152 Type string `hcl:"type,label"`
153 Config hcl2.Body `hcl:",remain"`
154 }
155 type terraform struct {
156 RequiredVersion *string `hcl:"required_version,attr"`
157 Backend *backend `hcl:"backend,block"`
158 }
159 type topLevel struct {
160 Atlas *atlas `hcl:"atlas,block"`
161 Datas []dataResource `hcl:"data,block"`
162 Modules []module `hcl:"module,block"`
163 Outputs []output `hcl:"output,block"`
164 Providers []provider `hcl:"provider,block"`
165 Resources []managedResource `hcl:"resource,block"`
166 Terraform *terraform `hcl:"terraform,block"`
167 Variables []variable `hcl:"variable,block"`
168 Locals []*locals `hcl:"locals,block"`
169 }
170
171 var raw topLevel
172 diags := gohcl2.DecodeBody(t.Body, nil, &raw)
173 if diags.HasErrors() {
174 // Do some minimal decoding to see if we can at least get the
175 // required Terraform version, which might help explain why we
176 // couldn't parse the rest.
177 if raw.Terraform != nil && raw.Terraform.RequiredVersion != nil {
178 config.Terraform = &Terraform{
179 RequiredVersion: *raw.Terraform.RequiredVersion,
180 }
181 }
182
183 // We return the diags as an implementation of error, which the
184 // caller than then type-assert if desired to recover the individual
185 // diagnostics.
186 // FIXME: The current API gives us no way to return warnings in the
187 // absense of any errors.
188 return config, diags
189 }
190
191 if raw.Terraform != nil {
192 var reqdVersion string
193 var backend *Backend
194
195 if raw.Terraform.RequiredVersion != nil {
196 reqdVersion = *raw.Terraform.RequiredVersion
197 }
198 if raw.Terraform.Backend != nil {
199 backend = new(Backend)
200 backend.Type = raw.Terraform.Backend.Type
201
202 // We don't permit interpolations or nested blocks inside the
203 // backend config, so we can decode the config early here and
204 // get direct access to the values, which is important for the
205 // config hashing to work as expected.
206 var config map[string]string
207 configDiags := gohcl2.DecodeBody(raw.Terraform.Backend.Config, nil, &config)
208 diags = append(diags, configDiags...)
209
210 raw := make(map[string]interface{}, len(config))
211 for k, v := range config {
212 raw[k] = v
213 }
214
215 var err error
216 backend.RawConfig, err = NewRawConfig(raw)
217 if err != nil {
218 diags = append(diags, &hcl2.Diagnostic{
219 Severity: hcl2.DiagError,
220 Summary: "Invalid backend configuration",
221 Detail: fmt.Sprintf("Error in backend configuration: %s", err),
222 })
223 }
224 }
225
226 config.Terraform = &Terraform{
227 RequiredVersion: reqdVersion,
228 Backend: backend,
229 }
230 }
231
232 if raw.Atlas != nil {
233 var include, exclude []string
234 if raw.Atlas.Include != nil {
235 include = *raw.Atlas.Include
236 }
237 if raw.Atlas.Exclude != nil {
238 exclude = *raw.Atlas.Exclude
239 }
240 config.Atlas = &AtlasConfig{
241 Name: raw.Atlas.Name,
242 Include: include,
243 Exclude: exclude,
244 }
245 }
246
247 for _, rawM := range raw.Modules {
248 m := &Module{
249 Name: rawM.Name,
250 Source: rawM.Source,
251 RawConfig: NewRawConfigHCL2(rawM.Config),
252 }
253
254 if rawM.Version != nil {
255 m.Version = *rawM.Version
256 }
257
258 if rawM.Providers != nil {
259 m.Providers = *rawM.Providers
260 }
261
262 config.Modules = append(config.Modules, m)
263 }
264
265 for _, rawV := range raw.Variables {
266 v := &Variable{
267 Name: rawV.Name,
268 }
269 if rawV.DeclaredType != nil {
270 v.DeclaredType = *rawV.DeclaredType
271 }
272 if rawV.Default != nil {
273 v.Default = hcl2shim.ConfigValueFromHCL2(*rawV.Default)
274 }
275 if rawV.Description != nil {
276 v.Description = *rawV.Description
277 }
278
279 config.Variables = append(config.Variables, v)
280 }
281
282 for _, rawO := range raw.Outputs {
283 o := &Output{
284 Name: rawO.Name,
285 }
286
287 if rawO.Description != nil {
288 o.Description = *rawO.Description
289 }
290 if rawO.DependsOn != nil {
291 o.DependsOn = *rawO.DependsOn
292 }
293 if rawO.Sensitive != nil {
294 o.Sensitive = *rawO.Sensitive
295 }
296
297 // The result is expected to be a map like map[string]interface{}{"value": something},
298 // so we'll fake that with our hcl2shim.SingleAttrBody shim.
299 o.RawConfig = NewRawConfigHCL2(hcl2shim.SingleAttrBody{
300 Name: "value",
301 Expr: rawO.ValueExpr,
302 })
303
304 config.Outputs = append(config.Outputs, o)
305 }
306
307 for _, rawR := range raw.Resources {
308 r := &Resource{
309 Mode: ManagedResourceMode,
310 Type: rawR.Type,
311 Name: rawR.Name,
312 }
313 if rawR.Lifecycle != nil {
314 var l ResourceLifecycle
315 if rawR.Lifecycle.CreateBeforeDestroy != nil {
316 l.CreateBeforeDestroy = *rawR.Lifecycle.CreateBeforeDestroy
317 }
318 if rawR.Lifecycle.PreventDestroy != nil {
319 l.PreventDestroy = *rawR.Lifecycle.PreventDestroy
320 }
321 if rawR.Lifecycle.IgnoreChanges != nil {
322 l.IgnoreChanges = *rawR.Lifecycle.IgnoreChanges
323 }
324 r.Lifecycle = l
325 }
326 if rawR.Provider != nil {
327 r.Provider = *rawR.Provider
328 }
329 if rawR.DependsOn != nil {
330 r.DependsOn = *rawR.DependsOn
331 }
332
333 var defaultConnInfo *RawConfig
334 if rawR.Connection != nil {
335 defaultConnInfo = NewRawConfigHCL2(rawR.Connection.Config)
336 }
337
338 for _, rawP := range rawR.Provisioners {
339 p := &Provisioner{
340 Type: rawP.Type,
341 }
342
343 switch {
344 case rawP.When == nil:
345 p.When = ProvisionerWhenCreate
346 case *rawP.When == "create":
347 p.When = ProvisionerWhenCreate
348 case *rawP.When == "destroy":
349 p.When = ProvisionerWhenDestroy
350 default:
351 p.When = ProvisionerWhenInvalid
352 }
353
354 switch {
355 case rawP.OnFailure == nil:
356 p.OnFailure = ProvisionerOnFailureFail
357 case *rawP.When == "fail":
358 p.OnFailure = ProvisionerOnFailureFail
359 case *rawP.When == "continue":
360 p.OnFailure = ProvisionerOnFailureContinue
361 default:
362 p.OnFailure = ProvisionerOnFailureInvalid
363 }
364
365 if rawP.Connection != nil {
366 p.ConnInfo = NewRawConfigHCL2(rawP.Connection.Config)
367 } else {
368 p.ConnInfo = defaultConnInfo
369 }
370
371 p.RawConfig = NewRawConfigHCL2(rawP.Config)
372
373 r.Provisioners = append(r.Provisioners, p)
374 }
375
376 // The old loader records the count expression as a weird RawConfig with
377 // a single-element map inside. Since the rest of the world is assuming
378 // that, we'll mimic it here.
379 {
380 countBody := hcl2shim.SingleAttrBody{
381 Name: "count",
382 Expr: rawR.CountExpr,
383 }
384
385 r.RawCount = NewRawConfigHCL2(countBody)
386 r.RawCount.Key = "count"
387 }
388
389 r.RawConfig = NewRawConfigHCL2(rawR.Config)
390
391 config.Resources = append(config.Resources, r)
392
393 }
394
395 for _, rawR := range raw.Datas {
396 r := &Resource{
397 Mode: DataResourceMode,
398 Type: rawR.Type,
399 Name: rawR.Name,
400 }
401
402 if rawR.Provider != nil {
403 r.Provider = *rawR.Provider
404 }
405 if rawR.DependsOn != nil {
406 r.DependsOn = *rawR.DependsOn
407 }
408
409 // The old loader records the count expression as a weird RawConfig with
410 // a single-element map inside. Since the rest of the world is assuming
411 // that, we'll mimic it here.
412 {
413 countBody := hcl2shim.SingleAttrBody{
414 Name: "count",
415 Expr: rawR.CountExpr,
416 }
417
418 r.RawCount = NewRawConfigHCL2(countBody)
419 r.RawCount.Key = "count"
420 }
421
422 r.RawConfig = NewRawConfigHCL2(rawR.Config)
423
424 config.Resources = append(config.Resources, r)
425 }
426
427 for _, rawP := range raw.Providers {
428 p := &ProviderConfig{
429 Name: rawP.Name,
430 }
431
432 if rawP.Alias != nil {
433 p.Alias = *rawP.Alias
434 }
435 if rawP.Version != nil {
436 p.Version = *rawP.Version
437 }
438
439 // The result is expected to be a map like map[string]interface{}{"value": something},
440 // so we'll fake that with our hcl2shim.SingleAttrBody shim.
441 p.RawConfig = NewRawConfigHCL2(rawP.Config)
442
443 config.ProviderConfigs = append(config.ProviderConfigs, p)
444 }
445
446 for _, rawL := range raw.Locals {
447 names := make([]string, 0, len(rawL.Definitions))
448 for n := range rawL.Definitions {
449 names = append(names, n)
450 }
451 sort.Strings(names)
452 for _, n := range names {
453 attr := rawL.Definitions[n]
454 l := &Local{
455 Name: n,
456 RawConfig: NewRawConfigHCL2(hcl2shim.SingleAttrBody{
457 Name: "value",
458 Expr: attr.Expr,
459 }),
460 }
461 config.Locals = append(config.Locals, l)
462 }
463 }
464
465 // FIXME: The current API gives us no way to return warnings in the
466 // absense of any errors.
467 var err error
468 if diags.HasErrors() {
469 err = diags
470 }
471
472 return config, err
473}
diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go
index db214be..55fc864 100644
--- a/vendor/github.com/hashicorp/terraform/config/merge.go
+++ b/vendor/github.com/hashicorp/terraform/config/merge.go
@@ -137,6 +137,17 @@ func Merge(c1, c2 *Config) (*Config, error) {
137 } 137 }
138 } 138 }
139 139
140 // Local Values
141 // These are simpler than the other config elements because they are just
142 // flat values and so no deep merging is required.
143 if localsCount := len(c1.Locals) + len(c2.Locals); localsCount != 0 {
144 // Explicit length check above because we want c.Locals to remain
145 // nil if the result would be empty.
146 c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals))
147 c.Locals = append(c.Locals, c1.Locals...)
148 c.Locals = append(c.Locals, c2.Locals...)
149 }
150
140 return c, nil 151 return c, nil
141} 152}
142 153
diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go
index 96b4a63..5073d0d 100644
--- a/vendor/github.com/hashicorp/terraform/config/module/get.go
+++ b/vendor/github.com/hashicorp/terraform/config/module/get.go
@@ -3,6 +3,7 @@ package module
3import ( 3import (
4 "io/ioutil" 4 "io/ioutil"
5 "os" 5 "os"
6 "path/filepath"
6 7
7 "github.com/hashicorp/go-getter" 8 "github.com/hashicorp/go-getter"
8) 9)
@@ -37,13 +38,10 @@ func GetCopy(dst, src string) error {
37 if err != nil { 38 if err != nil {
38 return err 39 return err
39 } 40 }
40 // FIXME: This isn't completely safe. Creating and removing our temp path
41 // exposes where to race to inject files.
42 if err := os.RemoveAll(tmpDir); err != nil {
43 return err
44 }
45 defer os.RemoveAll(tmpDir) 41 defer os.RemoveAll(tmpDir)
46 42
43 tmpDir = filepath.Join(tmpDir, "module")
44
47 // Get to that temporary dir 45 // Get to that temporary dir
48 if err := getter.Get(tmpDir, src); err != nil { 46 if err := getter.Get(tmpDir, src); err != nil {
49 return err 47 return err
@@ -57,15 +55,3 @@ func GetCopy(dst, src string) error {
57 // Copy to the final location 55 // Copy to the final location
58 return copyDir(dst, tmpDir) 56 return copyDir(dst, tmpDir)
59} 57}
60
61func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) {
62 // Get the module with the level specified if we were told to.
63 if mode > GetModeNone {
64 if err := s.Get(key, src, mode == GetModeUpdate); err != nil {
65 return "", false, err
66 }
67 }
68
69 // Get the directory where the module is.
70 return s.Dir(key)
71}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go
index 8603ee2..da520ab 100644
--- a/vendor/github.com/hashicorp/terraform/config/module/inode.go
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode.go
@@ -1,4 +1,4 @@
1// +build linux darwin openbsd netbsd solaris 1// +build linux darwin openbsd netbsd solaris dragonfly
2 2
3package module 3package module
4 4
diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go
index f8649f6..7dc8fcc 100644
--- a/vendor/github.com/hashicorp/terraform/config/module/module.go
+++ b/vendor/github.com/hashicorp/terraform/config/module/module.go
@@ -2,6 +2,8 @@ package module
2 2
3// Module represents the metadata for a single module. 3// Module represents the metadata for a single module.
4type Module struct { 4type Module struct {
5 Name string 5 Name string
6 Source string 6 Source string
7 Version string
8 Providers map[string]string
7} 9}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/storage.go b/vendor/github.com/hashicorp/terraform/config/module/storage.go
new file mode 100644
index 0000000..58e3a10
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/storage.go
@@ -0,0 +1,365 @@
1package module
2
3import (
4 "encoding/json"
5 "fmt"
6 "io/ioutil"
7 "log"
8 "os"
9 "path/filepath"
10 "strings"
11
12 getter "github.com/hashicorp/go-getter"
13 "github.com/hashicorp/terraform/registry"
14 "github.com/hashicorp/terraform/registry/regsrc"
15 "github.com/hashicorp/terraform/svchost/disco"
16 "github.com/mitchellh/cli"
17)
18
19const manifestName = "modules.json"
20
21// moduleManifest is the serialization structure used to record the stored
22// module's metadata.
23type moduleManifest struct {
24 Modules []moduleRecord
25}
26
27// moduleRecords represents the stored module's metadata.
28// This is compared for equality using '==', so all fields needs to remain
29// comparable.
30type moduleRecord struct {
31 // Source is the module source string from the config, minus any
32 // subdirectory.
33 Source string
34
35 // Key is the locally unique identifier for this module.
36 Key string
37
38 // Version is the exact version string for the stored module.
39 Version string
40
41 // Dir is the directory name returned by the FileStorage. This is what
42 // allows us to correlate a particular module version with the location on
43 // disk.
44 Dir string
45
46 // Root is the root directory containing the module. If the module is
47 // unpacked from an archive, and not located in the root directory, this is
48 // used to direct the loader to the correct subdirectory. This is
49 // independent from any subdirectory in the original source string, which
50 // may traverse further into the module tree.
51 Root string
52
53 // url is the location of the module source
54 url string
55
56 // Registry is true if this module is sourced from a registry
57 registry bool
58}
59
60// Storage implements methods to manage the storage of modules.
61// This is used by Tree.Load to query registries, authenticate requests, and
62// store modules locally.
63type Storage struct {
64 // StorageDir is the full path to the directory where all modules will be
65 // stored.
66 StorageDir string
67
68 // Ui is an optional cli.Ui for user output
69 Ui cli.Ui
70
71 // Mode is the GetMode that will be used for various operations.
72 Mode GetMode
73
74 registry *registry.Client
75}
76
77// NewStorage returns a new initialized Storage object.
78func NewStorage(dir string, services *disco.Disco) *Storage {
79 regClient := registry.NewClient(services, nil)
80
81 return &Storage{
82 StorageDir: dir,
83 registry: regClient,
84 }
85}
86
87// loadManifest returns the moduleManifest file from the parent directory.
88func (s Storage) loadManifest() (moduleManifest, error) {
89 manifest := moduleManifest{}
90
91 manifestPath := filepath.Join(s.StorageDir, manifestName)
92 data, err := ioutil.ReadFile(manifestPath)
93 if err != nil && !os.IsNotExist(err) {
94 return manifest, err
95 }
96
97 if len(data) == 0 {
98 return manifest, nil
99 }
100
101 if err := json.Unmarshal(data, &manifest); err != nil {
102 return manifest, err
103 }
104
105 for i, rec := range manifest.Modules {
106 // If the path was recorded before we changed to always using a
107 // slash as separator, we delete the record from the manifest so
108 // it can be discovered again and will be recorded using a slash.
109 if strings.Contains(rec.Dir, "\\") {
110 manifest.Modules[i] = manifest.Modules[len(manifest.Modules)-1]
111 manifest.Modules = manifest.Modules[:len(manifest.Modules)-1]
112 continue
113 }
114
115 // Make sure we use the correct path separator.
116 rec.Dir = filepath.FromSlash(rec.Dir)
117 }
118
119 return manifest, nil
120}
121
122// Store the location of the module, along with the version used and the module
123// root directory. The storage method loads the entire file and rewrites it
124// each time. This is only done a few times during init, so efficiency is
125// not a concern.
126func (s Storage) recordModule(rec moduleRecord) error {
127 manifest, err := s.loadManifest()
128 if err != nil {
129 // if there was a problem with the file, we will attempt to write a new
130 // one. Any non-data related error should surface there.
131 log.Printf("[WARN] error reading module manifest: %s", err)
132 }
133
134 // do nothing if we already have the exact module
135 for i, stored := range manifest.Modules {
136 if rec == stored {
137 return nil
138 }
139
140 // they are not equal, but if the storage path is the same we need to
141 // remove this rec to be replaced.
142 if rec.Dir == stored.Dir {
143 manifest.Modules[i] = manifest.Modules[len(manifest.Modules)-1]
144 manifest.Modules = manifest.Modules[:len(manifest.Modules)-1]
145 break
146 }
147 }
148
149 // Make sure we always use a slash separator.
150 rec.Dir = filepath.ToSlash(rec.Dir)
151
152 manifest.Modules = append(manifest.Modules, rec)
153
154 js, err := json.Marshal(manifest)
155 if err != nil {
156 panic(err)
157 }
158
159 manifestPath := filepath.Join(s.StorageDir, manifestName)
160 return ioutil.WriteFile(manifestPath, js, 0644)
161}
162
163// load the manifest from dir, and return all module versions matching the
164// provided source. Records with no version info will be skipped, as they need
165// to be uniquely identified by other means.
166func (s Storage) moduleVersions(source string) ([]moduleRecord, error) {
167 manifest, err := s.loadManifest()
168 if err != nil {
169 return manifest.Modules, err
170 }
171
172 var matching []moduleRecord
173
174 for _, m := range manifest.Modules {
175 if m.Source == source && m.Version != "" {
176 log.Printf("[DEBUG] found local version %q for module %s", m.Version, m.Source)
177 matching = append(matching, m)
178 }
179 }
180
181 return matching, nil
182}
183
184func (s Storage) moduleDir(key string) (string, error) {
185 manifest, err := s.loadManifest()
186 if err != nil {
187 return "", err
188 }
189
190 for _, m := range manifest.Modules {
191 if m.Key == key {
192 return m.Dir, nil
193 }
194 }
195
196 return "", nil
197}
198
199// return only the root directory of the module stored in dir.
200func (s Storage) getModuleRoot(dir string) (string, error) {
201 manifest, err := s.loadManifest()
202 if err != nil {
203 return "", err
204 }
205
206 for _, mod := range manifest.Modules {
207 if mod.Dir == dir {
208 return mod.Root, nil
209 }
210 }
211 return "", nil
212}
213
214// record only the Root directory for the module stored at dir.
215func (s Storage) recordModuleRoot(dir, root string) error {
216 rec := moduleRecord{
217 Dir: dir,
218 Root: root,
219 }
220
221 return s.recordModule(rec)
222}
223
224func (s Storage) output(msg string) {
225 if s.Ui == nil || s.Mode == GetModeNone {
226 return
227 }
228 s.Ui.Output(msg)
229}
230
231func (s Storage) getStorage(key string, src string) (string, bool, error) {
232 storage := &getter.FolderStorage{
233 StorageDir: s.StorageDir,
234 }
235
236 log.Printf("[DEBUG] fetching module from %s", src)
237
238 // Get the module with the level specified if we were told to.
239 if s.Mode > GetModeNone {
240 log.Printf("[DEBUG] fetching %q with key %q", src, key)
241 if err := storage.Get(key, src, s.Mode == GetModeUpdate); err != nil {
242 return "", false, err
243 }
244 }
245
246 // Get the directory where the module is.
247 dir, found, err := storage.Dir(key)
248 log.Printf("[DEBUG] found %q in %q: %t", src, dir, found)
249 return dir, found, err
250}
251
252// find a stored module that's not from a registry
253func (s Storage) findModule(key string) (string, error) {
254 if s.Mode == GetModeUpdate {
255 return "", nil
256 }
257
258 return s.moduleDir(key)
259}
260
261// GetModule fetches a module source into the specified directory. This is used
262// as a convenience function by the CLI to initialize a configuration.
263func (s Storage) GetModule(dst, src string) error {
264 // reset this in case the caller was going to re-use it
265 mode := s.Mode
266 s.Mode = GetModeUpdate
267 defer func() {
268 s.Mode = mode
269 }()
270
271 rec, err := s.findRegistryModule(src, anyVersion)
272 if err != nil {
273 return err
274 }
275
276 pwd, err := os.Getwd()
277 if err != nil {
278 return err
279 }
280
281 source := rec.url
282 if source == "" {
283 source, err = getter.Detect(src, pwd, getter.Detectors)
284 if err != nil {
285 return fmt.Errorf("module %s: %s", src, err)
286 }
287 }
288
289 if source == "" {
290 return fmt.Errorf("module %q not found", src)
291 }
292
293 return GetCopy(dst, source)
294}
295
296// find a registry module
297func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, error) {
298 rec := moduleRecord{
299 Source: mSource,
300 }
301 // detect if we have a registry source
302 mod, err := regsrc.ParseModuleSource(mSource)
303 switch err {
304 case nil:
305 //ok
306 case regsrc.ErrInvalidModuleSource:
307 return rec, nil
308 default:
309 return rec, err
310 }
311 rec.registry = true
312
313 log.Printf("[TRACE] %q is a registry module", mod.Display())
314
315 versions, err := s.moduleVersions(mod.String())
316 if err != nil {
317 log.Printf("[ERROR] error looking up versions for %q: %s", mod.Display(), err)
318 return rec, err
319 }
320
321 match, err := newestRecord(versions, constraint)
322 if err != nil {
323 log.Printf("[INFO] no matching version for %q<%s>, %s", mod.Display(), constraint, err)
324 }
325 log.Printf("[DEBUG] matched %q version %s for %s", mod, match.Version, constraint)
326
327 rec.Dir = match.Dir
328 rec.Version = match.Version
329 found := rec.Dir != ""
330
331 // we need to lookup available versions
332 // Only on Get if it's not found, on unconditionally on Update
333 if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) {
334 resp, err := s.registry.Versions(mod)
335 if err != nil {
336 return rec, err
337 }
338
339 if len(resp.Modules) == 0 {
340 return rec, fmt.Errorf("module %q not found in registry", mod.Display())
341 }
342
343 match, err := newestVersion(resp.Modules[0].Versions, constraint)
344 if err != nil {
345 return rec, err
346 }
347
348 if match == nil {
349 return rec, fmt.Errorf("no versions for %q found matching %q", mod.Display(), constraint)
350 }
351
352 rec.Version = match.Version
353
354 rec.url, err = s.registry.Location(mod, rec.Version)
355 if err != nil {
356 return rec, err
357 }
358
359 // we've already validated this by now
360 host, _ := mod.SvcHost()
361 s.output(fmt.Sprintf(" Found version %s of %s on %s", rec.Version, mod.Module(), host.ForDisplay()))
362
363 }
364 return rec, nil
365}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/testing.go b/vendor/github.com/hashicorp/terraform/config/module/testing.go
index fc9e733..6f1ff05 100644
--- a/vendor/github.com/hashicorp/terraform/config/module/testing.go
+++ b/vendor/github.com/hashicorp/terraform/config/module/testing.go
@@ -4,8 +4,6 @@ import (
4 "io/ioutil" 4 "io/ioutil"
5 "os" 5 "os"
6 "testing" 6 "testing"
7
8 "github.com/hashicorp/go-getter"
9) 7)
10 8
11// TestTree loads a module at the given path and returns the tree as well 9// TestTree loads a module at the given path and returns the tree as well
@@ -26,8 +24,8 @@ func TestTree(t *testing.T, path string) (*Tree, func()) {
26 } 24 }
27 25
28 // Get the child modules 26 // Get the child modules
29 s := &getter.FolderStorage{StorageDir: dir} 27 s := &Storage{StorageDir: dir, Mode: GetModeGet}
30 if err := mod.Load(s, GetModeGet); err != nil { 28 if err := mod.Load(s); err != nil {
31 t.Fatalf("err: %s", err) 29 t.Fatalf("err: %s", err)
32 return nil, nil 30 return nil, nil
33 } 31 }
diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go
index 4b0b153..f56d69b 100644
--- a/vendor/github.com/hashicorp/terraform/config/module/tree.go
+++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go
@@ -4,11 +4,14 @@ import (
4 "bufio" 4 "bufio"
5 "bytes" 5 "bytes"
6 "fmt" 6 "fmt"
7 "log"
7 "path/filepath" 8 "path/filepath"
8 "strings" 9 "strings"
9 "sync" 10 "sync"
10 11
11 "github.com/hashicorp/go-getter" 12 "github.com/hashicorp/terraform/tfdiags"
13
14 getter "github.com/hashicorp/go-getter"
12 "github.com/hashicorp/terraform/config" 15 "github.com/hashicorp/terraform/config"
13) 16)
14 17
@@ -26,6 +29,17 @@ type Tree struct {
26 children map[string]*Tree 29 children map[string]*Tree
27 path []string 30 path []string
28 lock sync.RWMutex 31 lock sync.RWMutex
32
33 // version is the final version of the config loaded for the Tree's module
34 version string
35 // source is the "source" string used to load this module. It's possible
36 // for a module source to change, but the path remains the same, preventing
37 // it from being reloaded.
38 source string
39 // parent allows us to walk back up the tree and determine if there are any
40 // versioned ancestor modules which may effect the stored location of
41 // submodules
42 parent *Tree
29} 43}
30 44
31// NewTree returns a new Tree for the given config structure. 45// NewTree returns a new Tree for the given config structure.
@@ -40,7 +54,7 @@ func NewEmptyTree() *Tree {
40 // We do this dummy load so that the tree is marked as "loaded". It 54 // We do this dummy load so that the tree is marked as "loaded". It
41 // should never fail because this is just about a no-op. If it does fail 55 // should never fail because this is just about a no-op. If it does fail
42 // we panic so we can know its a bug. 56 // we panic so we can know its a bug.
43 if err := t.Load(nil, GetModeGet); err != nil { 57 if err := t.Load(&Storage{Mode: GetModeGet}); err != nil {
44 panic(err) 58 panic(err)
45 } 59 }
46 60
@@ -126,8 +140,10 @@ func (t *Tree) Modules() []*Module {
126 result := make([]*Module, len(t.config.Modules)) 140 result := make([]*Module, len(t.config.Modules))
127 for i, m := range t.config.Modules { 141 for i, m := range t.config.Modules {
128 result[i] = &Module{ 142 result[i] = &Module{
129 Name: m.Name, 143 Name: m.Name,
130 Source: m.Source, 144 Version: m.Version,
145 Source: m.Source,
146 Providers: m.Providers,
131 } 147 }
132 } 148 }
133 149
@@ -155,81 +171,178 @@ func (t *Tree) Name() string {
155// module trees inherently require the configuration to be in a reasonably 171// module trees inherently require the configuration to be in a reasonably
156// sane state: no circular dependencies, proper module sources, etc. A full 172// sane state: no circular dependencies, proper module sources, etc. A full
157// suite of validations can be done by running Validate (after loading). 173// suite of validations can be done by running Validate (after loading).
158func (t *Tree) Load(s getter.Storage, mode GetMode) error { 174func (t *Tree) Load(s *Storage) error {
159 t.lock.Lock() 175 t.lock.Lock()
160 defer t.lock.Unlock() 176 defer t.lock.Unlock()
161 177
162 // Reset the children if we have any 178 children, err := t.getChildren(s)
163 t.children = nil 179 if err != nil {
180 return err
181 }
182
183 // Go through all the children and load them.
184 for _, c := range children {
185 if err := c.Load(s); err != nil {
186 return err
187 }
188 }
189
190 // Set our tree up
191 t.children = children
164 192
165 modules := t.Modules() 193 return nil
194}
195
196func (t *Tree) getChildren(s *Storage) (map[string]*Tree, error) {
166 children := make(map[string]*Tree) 197 children := make(map[string]*Tree)
167 198
168 // Go through all the modules and get the directory for them. 199 // Go through all the modules and get the directory for them.
169 for _, m := range modules { 200 for _, m := range t.Modules() {
170 if _, ok := children[m.Name]; ok { 201 if _, ok := children[m.Name]; ok {
171 return fmt.Errorf( 202 return nil, fmt.Errorf(
172 "module %s: duplicated. module names must be unique", m.Name) 203 "module %s: duplicated. module names must be unique", m.Name)
173 } 204 }
174 205
175 // Determine the path to this child 206 // Determine the path to this child
176 path := make([]string, len(t.path), len(t.path)+1) 207 modPath := make([]string, len(t.path), len(t.path)+1)
177 copy(path, t.path) 208 copy(modPath, t.path)
178 path = append(path, m.Name) 209 modPath = append(modPath, m.Name)
179 210
180 // Split out the subdir if we have one 211 log.Printf("[TRACE] module source: %q", m.Source)
181 source, subDir := getter.SourceDirSubdir(m.Source)
182 212
183 source, err := getter.Detect(source, t.config.Dir, getter.Detectors) 213 // add the module path to help indicate where modules with relative
214 // paths are being loaded from
215 s.output(fmt.Sprintf("- module.%s", strings.Join(modPath, ".")))
216
217 // Lookup the local location of the module.
218 // dir is the local directory where the module is stored
219 mod, err := s.findRegistryModule(m.Source, m.Version)
184 if err != nil { 220 if err != nil {
185 return fmt.Errorf("module %s: %s", m.Name, err) 221 return nil, err
186 } 222 }
187 223
224 // The key is the string that will be used to uniquely id the Source in
225 // the local storage. The prefix digit can be incremented to
226 // invalidate the local module storage.
227 key := "1." + t.versionedPathKey(m)
228 if mod.Version != "" {
229 key += "." + mod.Version
230 }
231
232 // Check for the exact key if it's not a registry module
233 if !mod.registry {
234 mod.Dir, err = s.findModule(key)
235 if err != nil {
236 return nil, err
237 }
238 }
239
240 if mod.Dir != "" && s.Mode != GetModeUpdate {
241 // We found it locally, but in order to load the Tree we need to
242 // find out if there was another subDir stored from detection.
243 subDir, err := s.getModuleRoot(mod.Dir)
244 if err != nil {
245 // If there's a problem with the subdir record, we'll let the
246 // recordSubdir method fix it up. Any other filesystem errors
247 // will turn up again below.
248 log.Println("[WARN] error reading subdir record:", err)
249 }
250
251 fullDir := filepath.Join(mod.Dir, subDir)
252
253 child, err := NewTreeModule(m.Name, fullDir)
254 if err != nil {
255 return nil, fmt.Errorf("module %s: %s", m.Name, err)
256 }
257 child.path = modPath
258 child.parent = t
259 child.version = mod.Version
260 child.source = m.Source
261 children[m.Name] = child
262 continue
263 }
264
265 // Split out the subdir if we have one.
266 // Terraform keeps the entire requested tree, so that modules can
267 // reference sibling modules from the same archive or repo.
268 rawSource, subDir := getter.SourceDirSubdir(m.Source)
269
270 // we haven't found a source, so fallback to the go-getter detectors
271 source := mod.url
272 if source == "" {
273 source, err = getter.Detect(rawSource, t.config.Dir, getter.Detectors)
274 if err != nil {
275 return nil, fmt.Errorf("module %s: %s", m.Name, err)
276 }
277 }
278
279 log.Printf("[TRACE] detected module source %q", source)
280
188 // Check if the detector introduced something new. 281 // Check if the detector introduced something new.
189 source, subDir2 := getter.SourceDirSubdir(source) 282 // For example, the registry always adds a subdir of `//*`,
190 if subDir2 != "" { 283 // indicating that we need to strip off the first component from the
191 subDir = filepath.Join(subDir2, subDir) 284 // tar archive, though we may not yet know what it is called.
285 source, detectedSubDir := getter.SourceDirSubdir(source)
286 if detectedSubDir != "" {
287 subDir = filepath.Join(detectedSubDir, subDir)
288 }
289
290 output := ""
291 switch s.Mode {
292 case GetModeUpdate:
293 output = fmt.Sprintf(" Updating source %q", m.Source)
294 default:
295 output = fmt.Sprintf(" Getting source %q", m.Source)
192 } 296 }
297 s.output(output)
193 298
194 // Get the directory where this module is so we can load it 299 dir, ok, err := s.getStorage(key, source)
195 key := strings.Join(path, ".")
196 key = fmt.Sprintf("root.%s-%s", key, m.Source)
197 dir, ok, err := getStorage(s, key, source, mode)
198 if err != nil { 300 if err != nil {
199 return err 301 return nil, err
200 } 302 }
201 if !ok { 303 if !ok {
202 return fmt.Errorf( 304 return nil, fmt.Errorf("module %s: not found, may need to run 'terraform init'", m.Name)
203 "module %s: not found, may need to be downloaded using 'terraform get'", m.Name)
204 } 305 }
205 306
206 // If we have a subdirectory, then merge that in 307 log.Printf("[TRACE] %q stored in %q", source, dir)
308
309 // expand and record the subDir for later
310 fullDir := dir
207 if subDir != "" { 311 if subDir != "" {
208 dir = filepath.Join(dir, subDir) 312 fullDir, err = getter.SubdirGlob(dir, subDir)
209 } 313 if err != nil {
314 return nil, err
315 }
210 316
211 // Load the configurations.Dir(source) 317 // +1 to account for the pathsep
212 children[m.Name], err = NewTreeModule(m.Name, dir) 318 if len(dir)+1 > len(fullDir) {
213 if err != nil { 319 return nil, fmt.Errorf("invalid module storage path %q", fullDir)
214 return fmt.Errorf( 320 }
215 "module %s: %s", m.Name, err) 321 subDir = fullDir[len(dir)+1:]
216 } 322 }
217 323
218 // Set the path of this child 324 // add new info to the module record
219 children[m.Name].path = path 325 mod.Key = key
220 } 326 mod.Dir = dir
327 mod.Root = subDir
221 328
222 // Go through all the children and load them. 329 // record the module in our manifest
223 for _, c := range children { 330 if err := s.recordModule(mod); err != nil {
224 if err := c.Load(s, mode); err != nil { 331 return nil, err
225 return err
226 } 332 }
227 }
228 333
229 // Set our tree up 334 child, err := NewTreeModule(m.Name, fullDir)
230 t.children = children 335 if err != nil {
336 return nil, fmt.Errorf("module %s: %s", m.Name, err)
337 }
338 child.path = modPath
339 child.parent = t
340 child.version = mod.Version
341 child.source = m.Source
342 children[m.Name] = child
343 }
231 344
232 return nil 345 return children, nil
233} 346}
234 347
235// Path is the full path to this tree. 348// Path is the full path to this tree.
@@ -272,32 +385,35 @@ func (t *Tree) String() string {
272// as verifying things such as parameters/outputs between the various modules. 385// as verifying things such as parameters/outputs between the various modules.
273// 386//
274// Load must be called prior to calling Validate or an error will be returned. 387// Load must be called prior to calling Validate or an error will be returned.
275func (t *Tree) Validate() error { 388func (t *Tree) Validate() tfdiags.Diagnostics {
389 var diags tfdiags.Diagnostics
390
276 if !t.Loaded() { 391 if !t.Loaded() {
277 return fmt.Errorf("tree must be loaded before calling Validate") 392 diags = diags.Append(fmt.Errorf(
393 "tree must be loaded before calling Validate",
394 ))
395 return diags
278 } 396 }
279 397
280 // If something goes wrong, here is our error template
281 newErr := &treeError{Name: []string{t.Name()}}
282
283 // Terraform core does not handle root module children named "root". 398 // Terraform core does not handle root module children named "root".
284 // We plan to fix this in the future but this bug was brought up in 399 // We plan to fix this in the future but this bug was brought up in
285 // the middle of a release and we don't want to introduce wide-sweeping 400 // the middle of a release and we don't want to introduce wide-sweeping
286 // changes at that time. 401 // changes at that time.
287 if len(t.path) == 1 && t.name == "root" { 402 if len(t.path) == 1 && t.name == "root" {
288 return fmt.Errorf("root module cannot contain module named 'root'") 403 diags = diags.Append(fmt.Errorf(
404 "root module cannot contain module named 'root'",
405 ))
406 return diags
289 } 407 }
290 408
291 // Validate our configuration first. 409 // Validate our configuration first.
292 if err := t.config.Validate(); err != nil { 410 diags = diags.Append(t.config.Validate())
293 newErr.Add(err)
294 }
295 411
296 // If we're the root, we do extra validation. This validation usually 412 // If we're the root, we do extra validation. This validation usually
297 // requires the entire tree (since children don't have parent pointers). 413 // requires the entire tree (since children don't have parent pointers).
298 if len(t.path) == 0 { 414 if len(t.path) == 0 {
299 if err := t.validateProviderAlias(); err != nil { 415 if err := t.validateProviderAlias(); err != nil {
300 newErr.Add(err) 416 diags = diags.Append(err)
301 } 417 }
302 } 418 }
303 419
@@ -306,20 +422,11 @@ func (t *Tree) Validate() error {
306 422
307 // Validate all our children 423 // Validate all our children
308 for _, c := range children { 424 for _, c := range children {
309 err := c.Validate() 425 childDiags := c.Validate()
310 if err == nil { 426 diags = diags.Append(childDiags)
427 if diags.HasErrors() {
311 continue 428 continue
312 } 429 }
313
314 verr, ok := err.(*treeError)
315 if !ok {
316 // Unknown error, just return...
317 return err
318 }
319
320 // Append ourselves to the error and then return
321 verr.Name = append(verr.Name, t.Name())
322 newErr.AddChild(verr)
323 } 430 }
324 431
325 // Go over all the modules and verify that any parameters are valid 432 // Go over all the modules and verify that any parameters are valid
@@ -345,9 +452,10 @@ func (t *Tree) Validate() error {
345 // Compare to the keys in our raw config for the module 452 // Compare to the keys in our raw config for the module
346 for k, _ := range m.RawConfig.Raw { 453 for k, _ := range m.RawConfig.Raw {
347 if _, ok := varMap[k]; !ok { 454 if _, ok := varMap[k]; !ok {
348 newErr.Add(fmt.Errorf( 455 diags = diags.Append(fmt.Errorf(
349 "module %s: %s is not a valid parameter", 456 "module %q: %q is not a valid argument",
350 m.Name, k)) 457 m.Name, k,
458 ))
351 } 459 }
352 460
353 // Remove the required 461 // Remove the required
@@ -356,9 +464,10 @@ func (t *Tree) Validate() error {
356 464
357 // If we have any required left over, they aren't set. 465 // If we have any required left over, they aren't set.
358 for k, _ := range requiredMap { 466 for k, _ := range requiredMap {
359 newErr.Add(fmt.Errorf( 467 diags = diags.Append(fmt.Errorf(
360 "module %s: required variable %q not set", 468 "module %q: missing required argument %q",
361 m.Name, k)) 469 m.Name, k,
470 ))
362 } 471 }
363 } 472 }
364 473
@@ -373,9 +482,10 @@ func (t *Tree) Validate() error {
373 482
374 tree, ok := children[mv.Name] 483 tree, ok := children[mv.Name]
375 if !ok { 484 if !ok {
376 newErr.Add(fmt.Errorf( 485 diags = diags.Append(fmt.Errorf(
377 "%s: undefined module referenced %s", 486 "%s: reference to undefined module %q",
378 source, mv.Name)) 487 source, mv.Name,
488 ))
379 continue 489 continue
380 } 490 }
381 491
@@ -387,14 +497,56 @@ func (t *Tree) Validate() error {
387 } 497 }
388 } 498 }
389 if !found { 499 if !found {
390 newErr.Add(fmt.Errorf( 500 diags = diags.Append(fmt.Errorf(
391 "%s: %s is not a valid output for module %s", 501 "%s: %q is not a valid output for module %q",
392 source, mv.Field, mv.Name)) 502 source, mv.Field, mv.Name,
503 ))
393 } 504 }
394 } 505 }
395 } 506 }
396 507
397 return newErr.ErrOrNil() 508 return diags
509}
510
511// versionedPathKey returns a path string with every levels full name, version
512// and source encoded. This is to provide a unique key for our module storage,
513// since submodules need to know which versions of their ancestor modules they
514// are loaded from.
515// For example, if module A has a subdirectory B, if module A's source or
516// version is updated B's storage key must reflect this change in order for the
517// correct version of B's source to be loaded.
518func (t *Tree) versionedPathKey(m *Module) string {
519 path := make([]string, len(t.path)+1)
520 path[len(path)-1] = m.Name + ";" + m.Source
521 // We're going to load these in order for easier reading and debugging, but
522 // in practice they only need to be unique and consistent.
523
524 p := t
525 i := len(path) - 2
526 for ; i >= 0; i-- {
527 if p == nil {
528 break
529 }
530 // we may have been loaded under a blank Tree, so always check for a name
531 // too.
532 if p.name == "" {
533 break
534 }
535 seg := p.name
536 if p.version != "" {
537 seg += "#" + p.version
538 }
539
540 if p.source != "" {
541 seg += ";" + p.source
542 }
543
544 path[i] = seg
545 p = p.parent
546 }
547
548 key := strings.Join(path, "|")
549 return key
398} 550}
399 551
400// treeError is an error use by Tree.Validate to accumulates all 552// treeError is an error use by Tree.Validate to accumulates all
diff --git a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
index 090d4f7..f203556 100644
--- a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
+++ b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
@@ -67,7 +67,7 @@ func (t *Tree) validateProviderAlias() error {
67 67
68 // We didn't find the alias, error! 68 // We didn't find the alias, error!
69 err = multierror.Append(err, fmt.Errorf( 69 err = multierror.Append(err, fmt.Errorf(
70 "module %s: provider alias must be defined by the module or a parent: %s", 70 "module %s: provider alias must be defined by the module: %s",
71 strings.Join(pv.Path, "."), k)) 71 strings.Join(pv.Path, "."), k))
72 } 72 }
73 } 73 }
diff --git a/vendor/github.com/hashicorp/terraform/config/module/versions.go b/vendor/github.com/hashicorp/terraform/config/module/versions.go
new file mode 100644
index 0000000..8348d4b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/versions.go
@@ -0,0 +1,95 @@
1package module
2
3import (
4 "errors"
5 "fmt"
6 "sort"
7
8 version "github.com/hashicorp/go-version"
9 "github.com/hashicorp/terraform/registry/response"
10)
11
12const anyVersion = ">=0.0.0"
13
14// return the newest version that satisfies the provided constraint
15func newest(versions []string, constraint string) (string, error) {
16 if constraint == "" {
17 constraint = anyVersion
18 }
19 cs, err := version.NewConstraint(constraint)
20 if err != nil {
21 return "", err
22 }
23
24 switch len(versions) {
25 case 0:
26 return "", errors.New("no versions found")
27 case 1:
28 v, err := version.NewVersion(versions[0])
29 if err != nil {
30 return "", err
31 }
32
33 if !cs.Check(v) {
34 return "", fmt.Errorf("no version found matching constraint %q", constraint)
35 }
36 return versions[0], nil
37 }
38
39 sort.Slice(versions, func(i, j int) bool {
40 // versions should have already been validated
41 // sort invalid version strings to the end
42 iv, err := version.NewVersion(versions[i])
43 if err != nil {
44 return true
45 }
46 jv, err := version.NewVersion(versions[j])
47 if err != nil {
48 return true
49 }
50 return iv.GreaterThan(jv)
51 })
52
53 // versions are now in order, so just find the first which satisfies the
54 // constraint
55 for i := range versions {
56 v, err := version.NewVersion(versions[i])
57 if err != nil {
58 continue
59 }
60 if cs.Check(v) {
61 return versions[i], nil
62 }
63 }
64
65 return "", nil
66}
67
68// return the newest *moduleVersion that matches the given constraint
69// TODO: reconcile these two types and newest* functions
70func newestVersion(moduleVersions []*response.ModuleVersion, constraint string) (*response.ModuleVersion, error) {
71 var versions []string
72 modules := make(map[string]*response.ModuleVersion)
73
74 for _, m := range moduleVersions {
75 versions = append(versions, m.Version)
76 modules[m.Version] = m
77 }
78
79 match, err := newest(versions, constraint)
80 return modules[match], err
81}
82
83// return the newest moduleRecord that matches the given constraint
84func newestRecord(moduleVersions []moduleRecord, constraint string) (moduleRecord, error) {
85 var versions []string
86 modules := make(map[string]moduleRecord)
87
88 for _, m := range moduleVersions {
89 versions = append(versions, m.Version)
90 modules[m.Version] = m
91 }
92
93 match, err := newest(versions, constraint)
94 return modules[match], err
95}
diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go
index f8498d8..1854a8b 100644
--- a/vendor/github.com/hashicorp/terraform/config/raw_config.go
+++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go
@@ -3,8 +3,14 @@ package config
3import ( 3import (
4 "bytes" 4 "bytes"
5 "encoding/gob" 5 "encoding/gob"
6 "errors"
7 "strconv"
6 "sync" 8 "sync"
7 9
10 "github.com/zclconf/go-cty/cty"
11 "github.com/zclconf/go-cty/cty/convert"
12
13 hcl2 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hil" 14 "github.com/hashicorp/hil"
9 "github.com/hashicorp/hil/ast" 15 "github.com/hashicorp/hil/ast"
10 "github.com/mitchellh/copystructure" 16 "github.com/mitchellh/copystructure"
@@ -27,8 +33,24 @@ const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
27// RawConfig supports a query-like interface to request 33// RawConfig supports a query-like interface to request
28// information from deep within the structure. 34// information from deep within the structure.
29type RawConfig struct { 35type RawConfig struct {
30 Key string 36 Key string
31 Raw map[string]interface{} 37
38 // Only _one_ of Raw and Body may be populated at a time.
39 //
40 // In the normal case, Raw is populated and Body is nil.
41 //
42 // When the experimental HCL2 parsing mode is enabled, "Body"
43 // is populated and RawConfig serves only to transport the hcl2.Body
44 // through the rest of Terraform core so we can ultimately decode it
45 // once its schema is known.
46 //
47 // Once we transition to HCL2 as the primary representation, RawConfig
48 // should be removed altogether and the hcl2.Body should be passed
49 // around directly.
50
51 Raw map[string]interface{}
52 Body hcl2.Body
53
32 Interpolations []ast.Node 54 Interpolations []ast.Node
33 Variables map[string]InterpolatedVariable 55 Variables map[string]InterpolatedVariable
34 56
@@ -48,6 +70,26 @@ func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) {
48 return result, nil 70 return result, nil
49} 71}
50 72
73// NewRawConfigHCL2 creates a new RawConfig that is serving as a capsule
74// to transport a hcl2.Body. In this mode, the publicly-readable struct
75// fields are not populated since all operations should instead be diverted
76// to the HCL2 body.
77//
78// For a RawConfig object constructed with this function, the only valid use
79// is to later retrieve the Body value and call its own methods. Callers
80// may choose to set and then later handle the Key field, in a manner
81// consistent with how it is handled by the Value method, but the Value
82// method itself must not be used.
83//
84// This is an experimental codepath to be used only by the HCL2 config loader.
85// Non-experimental parsing should _always_ use NewRawConfig to produce a
86// fully-functional RawConfig object.
87func NewRawConfigHCL2(body hcl2.Body) *RawConfig {
88 return &RawConfig{
89 Body: body,
90 }
91}
92
51// RawMap returns a copy of the RawConfig.Raw map. 93// RawMap returns a copy of the RawConfig.Raw map.
52func (r *RawConfig) RawMap() map[string]interface{} { 94func (r *RawConfig) RawMap() map[string]interface{} {
53 r.lock.Lock() 95 r.lock.Lock()
@@ -69,6 +111,10 @@ func (r *RawConfig) Copy() *RawConfig {
69 r.lock.Lock() 111 r.lock.Lock()
70 defer r.lock.Unlock() 112 defer r.lock.Unlock()
71 113
114 if r.Body != nil {
115 return NewRawConfigHCL2(r.Body)
116 }
117
72 newRaw := make(map[string]interface{}) 118 newRaw := make(map[string]interface{})
73 for k, v := range r.Raw { 119 for k, v := range r.Raw {
74 newRaw[k] = v 120 newRaw[k] = v
@@ -223,6 +269,13 @@ func (r *RawConfig) init() error {
223} 269}
224 270
225func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error { 271func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error {
272 if r.Body != nil {
273 // For RawConfigs created for the HCL2 experiement, callers must
274 // use the HCL2 Body API directly rather than interpolating via
275 // the RawConfig.
276 return errors.New("this feature is not yet supported under the HCL2 experiment")
277 }
278
226 config, err := copystructure.Copy(r.Raw) 279 config, err := copystructure.Copy(r.Raw)
227 if err != nil { 280 if err != nil {
228 return err 281 return err
@@ -268,6 +321,74 @@ func (r *RawConfig) merge(r2 *RawConfig) *RawConfig {
268 return result 321 return result
269} 322}
270 323
324// couldBeInteger is a helper that determines if the represented value could
325// result in an integer.
326//
327// This function only works for RawConfigs that have "Key" set, meaning that
328// a single result can be produced. Calling this function will overwrite
329// the Config and Value results to be a test value.
330//
331// This function is conservative. If there is some doubt about whether the
332// result could be an integer -- for example, if it depends on a variable
333// whose type we don't know yet -- it will still return true.
334func (r *RawConfig) couldBeInteger() bool {
335 if r.Key == "" {
336 // un-keyed RawConfigs can never produce numbers
337 return false
338 }
339 if r.Body == nil {
340 // Normal path: using the interpolator in this package
341 // Interpolate with a fixed number to verify that its a number.
342 r.interpolate(func(root ast.Node) (interface{}, error) {
343 // Execute the node but transform the AST so that it returns
344 // a fixed value of "5" for all interpolations.
345 result, err := hil.Eval(
346 hil.FixedValueTransform(
347 root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}),
348 nil)
349 if err != nil {
350 return "", err
351 }
352
353 return result.Value, nil
354 })
355 _, err := strconv.ParseInt(r.Value().(string), 0, 0)
356 return err == nil
357 } else {
358 // HCL2 experiment path: using the HCL2 API via shims
359 //
360 // This path catches fewer situations because we have to assume all
361 // variables are entirely unknown in HCL2, rather than the assumption
362 // above that all variables can be numbers because names like "var.foo"
363 // are considered a single variable rather than an attribute access.
364 // This is fine in practice, because we get a definitive answer
365 // during the graph walk when we have real values to work with.
366 attrs, diags := r.Body.JustAttributes()
367 if diags.HasErrors() {
368 // This body is not just a single attribute with a value, so
369 // this can't be a number.
370 return false
371 }
372 attr, hasAttr := attrs[r.Key]
373 if !hasAttr {
374 return false
375 }
376 result, diags := hcl2EvalWithUnknownVars(attr.Expr)
377 if diags.HasErrors() {
378 // We'll conservatively assume that this error is a result of
379 // us not being ready to fully-populate the scope, and catch
380 // any further problems during the main graph walk.
381 return true
382 }
383
384 // If the result is convertable to number then we'll allow it.
385 // We do this because an unknown string is optimistically convertable
386 // to number (might be "5") but a _known_ string "hello" is not.
387 _, err := convert.Convert(result, cty.Number)
388 return err == nil
389 }
390}
391
271// UnknownKeys returns the keys of the configuration that are unknown 392// UnknownKeys returns the keys of the configuration that are unknown
272// because they had interpolated variables that must be computed. 393// because they had interpolated variables that must be computed.
273func (r *RawConfig) UnknownKeys() []string { 394func (r *RawConfig) UnknownKeys() []string {
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
index ea68b4f..8a55e06 100644
--- a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
+++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
@@ -2,7 +2,7 @@
2 2
3package config 3package config
4 4
5import "fmt" 5import "strconv"
6 6
7const _ResourceMode_name = "ManagedResourceModeDataResourceMode" 7const _ResourceMode_name = "ManagedResourceModeDataResourceMode"
8 8
@@ -10,7 +10,7 @@ var _ResourceMode_index = [...]uint8{0, 19, 35}
10 10
11func (i ResourceMode) String() string { 11func (i ResourceMode) String() string {
12 if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { 12 if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) {
13 return fmt.Sprintf("ResourceMode(%d)", i) 13 return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")"
14 } 14 }
15 return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] 15 return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]]
16} 16}
diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go
index f7bfadd..831fc77 100644
--- a/vendor/github.com/hashicorp/terraform/config/testing.go
+++ b/vendor/github.com/hashicorp/terraform/config/testing.go
@@ -6,6 +6,8 @@ import (
6 6
7// TestRawConfig is used to create a RawConfig for testing. 7// TestRawConfig is used to create a RawConfig for testing.
8func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig { 8func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig {
9 t.Helper()
10
9 cfg, err := NewRawConfig(c) 11 cfg, err := NewRawConfig(c)
10 if err != nil { 12 if err != nil {
11 t.Fatalf("err: %s", err) 13 t.Fatalf("err: %s", err)
diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go
index f8776bc..b7eb10c 100644
--- a/vendor/github.com/hashicorp/terraform/dag/dag.go
+++ b/vendor/github.com/hashicorp/terraform/dag/dag.go
@@ -106,7 +106,7 @@ func (g *AcyclicGraph) TransitiveReduction() {
106 uTargets := g.DownEdges(u) 106 uTargets := g.DownEdges(u)
107 vs := AsVertexList(g.DownEdges(u)) 107 vs := AsVertexList(g.DownEdges(u))
108 108
109 g.DepthFirstWalk(vs, func(v Vertex, d int) error { 109 g.depthFirstWalk(vs, false, func(v Vertex, d int) error {
110 shared := uTargets.Intersection(g.DownEdges(v)) 110 shared := uTargets.Intersection(g.DownEdges(v))
111 for _, vPrime := range AsVertexList(shared) { 111 for _, vPrime := range AsVertexList(shared) {
112 g.RemoveEdge(BasicEdge(u, vPrime)) 112 g.RemoveEdge(BasicEdge(u, vPrime))
@@ -187,9 +187,18 @@ type vertexAtDepth struct {
187} 187}
188 188
189// depthFirstWalk does a depth-first walk of the graph starting from 189// depthFirstWalk does a depth-first walk of the graph starting from
190// the vertices in start. This is not exported now but it would make sense 190// the vertices in start.
191// to export this publicly at some point.
192func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { 191func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
192 return g.depthFirstWalk(start, true, f)
193}
194
195// This internal method provides the option of not sorting the vertices during
196// the walk, which we use for the Transitive reduction.
197// Some configurations can lead to fully-connected subgraphs, which makes our
198// transitive reduction algorithm O(n^3). This is still passable for the size
199// of our graphs, but the additional n^2 sort operations would make this
200// uncomputable in a reasonable amount of time.
201func (g *AcyclicGraph) depthFirstWalk(start []Vertex, sorted bool, f DepthWalkFunc) error {
193 defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") 202 defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("")
194 203
195 seen := make(map[Vertex]struct{}) 204 seen := make(map[Vertex]struct{})
@@ -219,7 +228,11 @@ func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
219 228
220 // Visit targets of this in a consistent order. 229 // Visit targets of this in a consistent order.
221 targets := AsVertexList(g.DownEdges(current.Vertex)) 230 targets := AsVertexList(g.DownEdges(current.Vertex))
222 sort.Sort(byVertexName(targets)) 231
232 if sorted {
233 sort.Sort(byVertexName(targets))
234 }
235
223 for _, t := range targets { 236 for _, t := range targets {
224 frontier = append(frontier, &vertexAtDepth{ 237 frontier = append(frontier, &vertexAtDepth{
225 Vertex: t, 238 Vertex: t,
diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go
index 16d5dd6..c567d27 100644
--- a/vendor/github.com/hashicorp/terraform/dag/marshal.go
+++ b/vendor/github.com/hashicorp/terraform/dag/marshal.go
@@ -273,6 +273,9 @@ func (e *encoder) Encode(i interface{}) {
273} 273}
274 274
275func (e *encoder) Add(v Vertex) { 275func (e *encoder) Add(v Vertex) {
276 if e == nil {
277 return
278 }
276 e.Encode(marshalTransform{ 279 e.Encode(marshalTransform{
277 Type: typeTransform, 280 Type: typeTransform,
278 AddVertex: newMarshalVertex(v), 281 AddVertex: newMarshalVertex(v),
@@ -281,6 +284,9 @@ func (e *encoder) Add(v Vertex) {
281 284
282// Remove records the removal of Vertex v. 285// Remove records the removal of Vertex v.
283func (e *encoder) Remove(v Vertex) { 286func (e *encoder) Remove(v Vertex) {
287 if e == nil {
288 return
289 }
284 e.Encode(marshalTransform{ 290 e.Encode(marshalTransform{
285 Type: typeTransform, 291 Type: typeTransform,
286 RemoveVertex: newMarshalVertex(v), 292 RemoveVertex: newMarshalVertex(v),
@@ -288,6 +294,9 @@ func (e *encoder) Remove(v Vertex) {
288} 294}
289 295
290func (e *encoder) Connect(edge Edge) { 296func (e *encoder) Connect(edge Edge) {
297 if e == nil {
298 return
299 }
291 e.Encode(marshalTransform{ 300 e.Encode(marshalTransform{
292 Type: typeTransform, 301 Type: typeTransform,
293 AddEdge: newMarshalEdge(edge), 302 AddEdge: newMarshalEdge(edge),
@@ -295,6 +304,9 @@ func (e *encoder) Connect(edge Edge) {
295} 304}
296 305
297func (e *encoder) RemoveEdge(edge Edge) { 306func (e *encoder) RemoveEdge(edge Edge) {
307 if e == nil {
308 return
309 }
298 e.Encode(marshalTransform{ 310 e.Encode(marshalTransform{
299 Type: typeTransform, 311 Type: typeTransform,
300 RemoveEdge: newMarshalEdge(edge), 312 RemoveEdge: newMarshalEdge(edge),
diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go
index 23c87ad..f03b100 100644
--- a/vendor/github.com/hashicorp/terraform/dag/walk.go
+++ b/vendor/github.com/hashicorp/terraform/dag/walk.go
@@ -166,7 +166,7 @@ func (w *Walker) Update(g *AcyclicGraph) {
166 w.wait.Add(1) 166 w.wait.Add(1)
167 167
168 // Add to our own set so we know about it already 168 // Add to our own set so we know about it already
169 log.Printf("[DEBUG] dag/walk: added new vertex: %q", VertexName(v)) 169 log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v))
170 w.vertices.Add(raw) 170 w.vertices.Add(raw)
171 171
172 // Initialize the vertex info 172 // Initialize the vertex info
@@ -198,7 +198,7 @@ func (w *Walker) Update(g *AcyclicGraph) {
198 // Delete it out of the map 198 // Delete it out of the map
199 delete(w.vertexMap, v) 199 delete(w.vertexMap, v)
200 200
201 log.Printf("[DEBUG] dag/walk: removed vertex: %q", VertexName(v)) 201 log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v))
202 w.vertices.Delete(raw) 202 w.vertices.Delete(raw)
203 } 203 }
204 204
@@ -229,7 +229,7 @@ func (w *Walker) Update(g *AcyclicGraph) {
229 changedDeps.Add(waiter) 229 changedDeps.Add(waiter)
230 230
231 log.Printf( 231 log.Printf(
232 "[DEBUG] dag/walk: added edge: %q waiting on %q", 232 "[TRACE] dag/walk: added edge: %q waiting on %q",
233 VertexName(waiter), VertexName(dep)) 233 VertexName(waiter), VertexName(dep))
234 w.edges.Add(raw) 234 w.edges.Add(raw)
235 } 235 }
@@ -253,7 +253,7 @@ func (w *Walker) Update(g *AcyclicGraph) {
253 changedDeps.Add(waiter) 253 changedDeps.Add(waiter)
254 254
255 log.Printf( 255 log.Printf(
256 "[DEBUG] dag/walk: removed edge: %q waiting on %q", 256 "[TRACE] dag/walk: removed edge: %q waiting on %q",
257 VertexName(waiter), VertexName(dep)) 257 VertexName(waiter), VertexName(dep))
258 w.edges.Delete(raw) 258 w.edges.Delete(raw)
259 } 259 }
@@ -296,7 +296,7 @@ func (w *Walker) Update(g *AcyclicGraph) {
296 info.depsCancelCh = cancelCh 296 info.depsCancelCh = cancelCh
297 297
298 log.Printf( 298 log.Printf(
299 "[DEBUG] dag/walk: dependencies changed for %q, sending new deps", 299 "[TRACE] dag/walk: dependencies changed for %q, sending new deps",
300 VertexName(v)) 300 VertexName(v))
301 301
302 // Start the waiter 302 // Start the waiter
@@ -383,10 +383,10 @@ func (w *Walker) walkVertex(v Vertex, info *walkerVertex) {
383 // Run our callback or note that our upstream failed 383 // Run our callback or note that our upstream failed
384 var err error 384 var err error
385 if depsSuccess { 385 if depsSuccess {
386 log.Printf("[DEBUG] dag/walk: walking %q", VertexName(v)) 386 log.Printf("[TRACE] dag/walk: walking %q", VertexName(v))
387 err = w.Callback(v) 387 err = w.Callback(v)
388 } else { 388 } else {
389 log.Printf("[DEBUG] dag/walk: upstream errored, not walking %q", VertexName(v)) 389 log.Printf("[TRACE] dag/walk: upstream errored, not walking %q", VertexName(v))
390 err = errWalkUpstream 390 err = errWalkUpstream
391 } 391 }
392 392
@@ -423,7 +423,7 @@ func (w *Walker) waitDeps(
423 return 423 return
424 424
425 case <-time.After(time.Second * 5): 425 case <-time.After(time.Second * 5):
426 log.Printf("[DEBUG] dag/walk: vertex %q, waiting for: %q", 426 log.Printf("[TRACE] dag/walk: vertex %q, waiting for: %q",
427 VertexName(v), VertexName(dep)) 427 VertexName(v), VertexName(dep))
428 } 428 }
429 } 429 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
deleted file mode 100644
index 18b8837..0000000
--- a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
+++ /dev/null
@@ -1,154 +0,0 @@
1// experiment package contains helper functions for tracking experimental
2// features throughout Terraform.
3//
4// This package should be used for creating, enabling, querying, and deleting
5// experimental features. By unifying all of that onto a single interface,
6// we can have the Go compiler help us by enforcing every place we touch
7// an experimental feature.
8//
9// To create a new experiment:
10//
11// 1. Add the experiment to the global vars list below, prefixed with X_
12//
13// 2. Add the experiment variable to the All listin the init() function
14//
15// 3. Use it!
16//
17// To remove an experiment:
18//
19// 1. Delete the experiment global var.
20//
21// 2. Try to compile and fix all the places where the var was referenced.
22//
23// To use an experiment:
24//
25// 1. Use Flag() if you want the experiment to be available from the CLI.
26//
27// 2. Use Enabled() to check whether it is enabled.
28//
29// As a general user:
30//
31// 1. The `-Xexperiment-name` flag
32// 2. The `TF_X_<experiment-name>` env var.
33// 3. The `TF_X_FORCE` env var can be set to force an experimental feature
34// without human verifications.
35//
36package experiment
37
38import (
39 "flag"
40 "fmt"
41 "os"
42 "strconv"
43 "strings"
44 "sync"
45)
46
47// The experiments that are available are listed below. Any package in
48// Terraform defining an experiment should define the experiments below.
49// By keeping them all within the experiment package we force a single point
50// of definition and use. This allows the compiler to enforce references
51// so it becomes easy to remove the features.
52var (
53 // Shadow graph. This is already on by default. Disabling it will be
54 // allowed for awhile in order for it to not block operations.
55 X_shadow = newBasicID("shadow", "SHADOW", false)
56)
57
58// Global variables this package uses because we are a package
59// with global state.
60var (
61 // all is the list of all experiements. Do not modify this.
62 All []ID
63
64 // enabled keeps track of what flags have been enabled
65 enabled map[string]bool
66 enabledLock sync.Mutex
67
68 // Hidden "experiment" that forces all others to be on without verification
69 x_force = newBasicID("force", "FORCE", false)
70)
71
72func init() {
73 // The list of all experiments, update this when an experiment is added.
74 All = []ID{
75 X_shadow,
76 x_force,
77 }
78
79 // Load
80 reload()
81}
82
83// reload is used by tests to reload the global state. This is called by
84// init publicly.
85func reload() {
86 // Initialize
87 enabledLock.Lock()
88 enabled = make(map[string]bool)
89 enabledLock.Unlock()
90
91 // Set defaults and check env vars
92 for _, id := range All {
93 // Get the default value
94 def := id.Default()
95
96 // If we set it in the env var, default it to true
97 key := fmt.Sprintf("TF_X_%s", strings.ToUpper(id.Env()))
98 if v := os.Getenv(key); v != "" {
99 def = v != "0"
100 }
101
102 // Set the default
103 SetEnabled(id, def)
104 }
105}
106
107// Enabled returns whether an experiment has been enabled or not.
108func Enabled(id ID) bool {
109 enabledLock.Lock()
110 defer enabledLock.Unlock()
111 return enabled[id.Flag()]
112}
113
114// SetEnabled sets an experiment to enabled/disabled. Please check with
115// the experiment docs for when calling this actually affects the experiment.
116func SetEnabled(id ID, v bool) {
117 enabledLock.Lock()
118 defer enabledLock.Unlock()
119 enabled[id.Flag()] = v
120}
121
122// Force returns true if the -Xforce of TF_X_FORCE flag is present, which
123// advises users of this package to not verify with the user that they want
124// experimental behavior and to just continue with it.
125func Force() bool {
126 return Enabled(x_force)
127}
128
129// Flag configures the given FlagSet with the flags to configure
130// all active experiments.
131func Flag(fs *flag.FlagSet) {
132 for _, id := range All {
133 desc := id.Flag()
134 key := fmt.Sprintf("X%s", id.Flag())
135 fs.Var(&idValue{X: id}, key, desc)
136 }
137}
138
139// idValue implements flag.Value for setting the enabled/disabled state
140// of an experiment from the CLI.
141type idValue struct {
142 X ID
143}
144
145func (v *idValue) IsBoolFlag() bool { return true }
146func (v *idValue) String() string { return strconv.FormatBool(Enabled(v.X)) }
147func (v *idValue) Set(raw string) error {
148 b, err := strconv.ParseBool(raw)
149 if err == nil {
150 SetEnabled(v.X, b)
151 }
152
153 return err
154}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
deleted file mode 100644
index 8e2f707..0000000
--- a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
+++ /dev/null
@@ -1,34 +0,0 @@
1package experiment
2
3// ID represents an experimental feature.
4//
5// The global vars defined on this package should be used as ID values.
6// This interface is purposely not implement-able outside of this package
7// so that we can rely on the Go compiler to enforce all experiment references.
8type ID interface {
9 Env() string
10 Flag() string
11 Default() bool
12
13 unexported() // So the ID can't be implemented externally.
14}
15
16// basicID implements ID.
17type basicID struct {
18 EnvValue string
19 FlagValue string
20 DefaultValue bool
21}
22
23func newBasicID(flag, env string, def bool) ID {
24 return &basicID{
25 EnvValue: env,
26 FlagValue: flag,
27 DefaultValue: def,
28 }
29}
30
31func (id *basicID) Env() string { return id.EnvValue }
32func (id *basicID) Flag() string { return id.FlagValue }
33func (id *basicID) Default() bool { return id.DefaultValue }
34func (id *basicID) unexported() {}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
index 64d8263..6ccc523 100644
--- a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
+++ b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
@@ -1,6 +1,8 @@
1package hashcode 1package hashcode
2 2
3import ( 3import (
4 "bytes"
5 "fmt"
4 "hash/crc32" 6 "hash/crc32"
5) 7)
6 8
@@ -20,3 +22,14 @@ func String(s string) int {
20 // v == MinInt 22 // v == MinInt
21 return 0 23 return 0
22} 24}
25
26// Strings hashes a list of strings to a unique hashcode.
27func Strings(strings []string) string {
28 var buf bytes.Buffer
29
30 for _, s := range strings {
31 buf.WriteString(fmt.Sprintf("%s-", s))
32 }
33
34 return fmt.Sprintf("%d", String(buf.String()))
35}
diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
index 433cd77..6bd92f7 100644
--- a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
+++ b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
@@ -18,7 +18,7 @@ const (
18 EnvLogFile = "TF_LOG_PATH" // Set to a file 18 EnvLogFile = "TF_LOG_PATH" // Set to a file
19) 19)
20 20
21var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} 21var ValidLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}
22 22
23// LogOutput determines where we should send logs (if anywhere) and the log level. 23// LogOutput determines where we should send logs (if anywhere) and the log level.
24func LogOutput() (logOutput io.Writer, err error) { 24func LogOutput() (logOutput io.Writer, err error) {
@@ -40,7 +40,7 @@ func LogOutput() (logOutput io.Writer, err error) {
40 40
41 // This was the default since the beginning 41 // This was the default since the beginning
42 logOutput = &logutils.LevelFilter{ 42 logOutput = &logutils.LevelFilter{
43 Levels: validLevels, 43 Levels: ValidLevels,
44 MinLevel: logutils.LogLevel(logLevel), 44 MinLevel: logutils.LogLevel(logLevel),
45 Writer: logOutput, 45 Writer: logOutput,
46 } 46 }
@@ -77,7 +77,7 @@ func LogLevel() string {
77 logLevel = strings.ToUpper(envLevel) 77 logLevel = strings.ToUpper(envLevel)
78 } else { 78 } else {
79 log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", 79 log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v",
80 envLevel, validLevels) 80 envLevel, ValidLevels)
81 } 81 }
82 82
83 return logLevel 83 return logLevel
@@ -90,7 +90,7 @@ func IsDebugOrHigher() bool {
90} 90}
91 91
92func isValidLogLevel(level string) bool { 92func isValidLogLevel(level string) bool {
93 for _, l := range validLevels { 93 for _, l := range ValidLevels {
94 if strings.ToUpper(level) == string(l) { 94 if strings.ToUpper(level) == string(l) {
95 return true 95 return true
96 } 96 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go
index 4477924..bddabe6 100644
--- a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go
+++ b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go
@@ -1,9 +1,12 @@
1package logging 1package logging
2 2
3import ( 3import (
4 "bytes"
5 "encoding/json"
4 "log" 6 "log"
5 "net/http" 7 "net/http"
6 "net/http/httputil" 8 "net/http/httputil"
9 "strings"
7) 10)
8 11
9type transport struct { 12type transport struct {
@@ -15,7 +18,7 @@ func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
15 if IsDebugOrHigher() { 18 if IsDebugOrHigher() {
16 reqData, err := httputil.DumpRequestOut(req, true) 19 reqData, err := httputil.DumpRequestOut(req, true)
17 if err == nil { 20 if err == nil {
18 log.Printf("[DEBUG] "+logReqMsg, t.name, string(reqData)) 21 log.Printf("[DEBUG] "+logReqMsg, t.name, prettyPrintJsonLines(reqData))
19 } else { 22 } else {
20 log.Printf("[ERROR] %s API Request error: %#v", t.name, err) 23 log.Printf("[ERROR] %s API Request error: %#v", t.name, err)
21 } 24 }
@@ -29,7 +32,7 @@ func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
29 if IsDebugOrHigher() { 32 if IsDebugOrHigher() {
30 respData, err := httputil.DumpResponse(resp, true) 33 respData, err := httputil.DumpResponse(resp, true)
31 if err == nil { 34 if err == nil {
32 log.Printf("[DEBUG] "+logRespMsg, t.name, string(respData)) 35 log.Printf("[DEBUG] "+logRespMsg, t.name, prettyPrintJsonLines(respData))
33 } else { 36 } else {
34 log.Printf("[ERROR] %s API Response error: %#v", t.name, err) 37 log.Printf("[ERROR] %s API Response error: %#v", t.name, err)
35 } 38 }
@@ -42,6 +45,20 @@ func NewTransport(name string, t http.RoundTripper) *transport {
42 return &transport{name, t} 45 return &transport{name, t}
43} 46}
44 47
48// prettyPrintJsonLines iterates through a []byte line-by-line,
49// transforming any lines that are complete json into pretty-printed json.
50func prettyPrintJsonLines(b []byte) string {
51 parts := strings.Split(string(b), "\n")
52 for i, p := range parts {
53 if b := []byte(p); json.Valid(b) {
54 var out bytes.Buffer
55 json.Indent(&out, b, "", " ")
56 parts[i] = out.String()
57 }
58 }
59 return strings.Join(parts, "\n")
60}
61
45const logReqMsg = `%s API Request Details: 62const logReqMsg = `%s API Request Details:
46---[ REQUEST ]--------------------------------------- 63---[ REQUEST ]---------------------------------------
47%s 64%s
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
index 1cde67c..4494955 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/id.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
@@ -18,6 +18,11 @@ func UniqueId() string {
18 return PrefixedUniqueId(UniqueIdPrefix) 18 return PrefixedUniqueId(UniqueIdPrefix)
19} 19}
20 20
21// UniqueIDSuffixLength is the string length of the suffix generated by
22// PrefixedUniqueId. This can be used by length validation functions to
23// ensure prefixes are the correct length for the target field.
24const UniqueIDSuffixLength = 26
25
21// Helper for a resource to generate a unique identifier w/ given prefix 26// Helper for a resource to generate a unique identifier w/ given prefix
22// 27//
23// After the prefix, the ID consists of an incrementing 26 digit value (to match 28// After the prefix, the ID consists of an incrementing 26 digit value (to match
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
index 37c586a..c34e21b 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/state.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
@@ -46,7 +46,7 @@ type StateChangeConf struct {
46// If the Timeout is exceeded before reaching the Target state, return an 46// If the Timeout is exceeded before reaching the Target state, return an
47// error. 47// error.
48// 48//
49// Otherwise, result the result of the first call to the Refresh function to 49// Otherwise, the result is the result of the first call to the Refresh function to
50// reach the target state. 50// reach the target state.
51func (conf *StateChangeConf) WaitForState() (interface{}, error) { 51func (conf *StateChangeConf) WaitForState() (interface{}, error) {
52 log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) 52 log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target)
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
index d7de1a0..b97673f 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
@@ -11,11 +11,13 @@ import (
11 "reflect" 11 "reflect"
12 "regexp" 12 "regexp"
13 "strings" 13 "strings"
14 "syscall"
14 "testing" 15 "testing"
15 16
16 "github.com/davecgh/go-spew/spew" 17 "github.com/davecgh/go-spew/spew"
17 "github.com/hashicorp/go-getter" 18 "github.com/hashicorp/errwrap"
18 "github.com/hashicorp/go-multierror" 19 "github.com/hashicorp/go-multierror"
20 "github.com/hashicorp/logutils"
19 "github.com/hashicorp/terraform/config/module" 21 "github.com/hashicorp/terraform/config/module"
20 "github.com/hashicorp/terraform/helper/logging" 22 "github.com/hashicorp/terraform/helper/logging"
21 "github.com/hashicorp/terraform/terraform" 23 "github.com/hashicorp/terraform/terraform"
@@ -186,6 +188,10 @@ type TestCheckFunc func(*terraform.State) error
186// ImportStateCheckFunc is the check function for ImportState tests 188// ImportStateCheckFunc is the check function for ImportState tests
187type ImportStateCheckFunc func([]*terraform.InstanceState) error 189type ImportStateCheckFunc func([]*terraform.InstanceState) error
188 190
191// ImportStateIdFunc is an ID generation function to help with complex ID
192// generation for ImportState tests.
193type ImportStateIdFunc func(*terraform.State) (string, error)
194
189// TestCase is a single acceptance test case used to test the apply/destroy 195// TestCase is a single acceptance test case used to test the apply/destroy
190// lifecycle of a resource in a specific configuration. 196// lifecycle of a resource in a specific configuration.
191// 197//
@@ -260,6 +266,15 @@ type TestStep struct {
260 // below. 266 // below.
261 PreConfig func() 267 PreConfig func()
262 268
269 // Taint is a list of resource addresses to taint prior to the execution of
270 // the step. Be sure to only include this at a step where the referenced
271 // address will be present in state, as it will fail the test if the resource
272 // is missing.
273 //
274 // This option is ignored on ImportState tests, and currently only works for
275 // resources in the root module path.
276 Taint []string
277
263 //--------------------------------------------------------------- 278 //---------------------------------------------------------------
264 // Test modes. One of the following groups of settings must be 279 // Test modes. One of the following groups of settings must be
265 // set to determine what the test step will do. Ideally we would've 280 // set to determine what the test step will do. Ideally we would've
@@ -304,10 +319,19 @@ type TestStep struct {
304 // no-op plans 319 // no-op plans
305 PlanOnly bool 320 PlanOnly bool
306 321
322 // PreventDiskCleanup can be set to true for testing terraform modules which
323 // require access to disk at runtime. Note that this will leave files in the
324 // temp folder
325 PreventDiskCleanup bool
326
307 // PreventPostDestroyRefresh can be set to true for cases where data sources 327 // PreventPostDestroyRefresh can be set to true for cases where data sources
308 // are tested alongside real resources 328 // are tested alongside real resources
309 PreventPostDestroyRefresh bool 329 PreventPostDestroyRefresh bool
310 330
331 // SkipFunc is called before applying config, but after PreConfig
332 // This is useful for defining test steps with platform-dependent checks
333 SkipFunc func() (bool, error)
334
311 //--------------------------------------------------------------- 335 //---------------------------------------------------------------
312 // ImportState testing 336 // ImportState testing
313 //--------------------------------------------------------------- 337 //---------------------------------------------------------------
@@ -329,6 +353,12 @@ type TestStep struct {
329 // the unset ImportStateId field. 353 // the unset ImportStateId field.
330 ImportStateIdPrefix string 354 ImportStateIdPrefix string
331 355
356 // ImportStateIdFunc is a function that can be used to dynamically generate
357 // the ID for the ImportState tests. It is sent the state, which can be
358 // checked to derive the attributes necessary and generate the string in the
359 // desired format.
360 ImportStateIdFunc ImportStateIdFunc
361
332 // ImportStateCheck checks the results of ImportState. It should be 362 // ImportStateCheck checks the results of ImportState. It should be
333 // used to verify that the resulting value of ImportState has the 363 // used to verify that the resulting value of ImportState has the
334 // proper resources, IDs, and attributes. 364 // proper resources, IDs, and attributes.
@@ -345,6 +375,60 @@ type TestStep struct {
345 ImportStateVerifyIgnore []string 375 ImportStateVerifyIgnore []string
346} 376}
347 377
378// Set to a file mask in sprintf format where %s is test name
379const EnvLogPathMask = "TF_LOG_PATH_MASK"
380
381func LogOutput(t TestT) (logOutput io.Writer, err error) {
382 logOutput = ioutil.Discard
383
384 logLevel := logging.LogLevel()
385 if logLevel == "" {
386 return
387 }
388
389 logOutput = os.Stderr
390
391 if logPath := os.Getenv(logging.EnvLogFile); logPath != "" {
392 var err error
393 logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666)
394 if err != nil {
395 return nil, err
396 }
397 }
398
399 if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" {
400 // Escape special characters which may appear if we have subtests
401 testName := strings.Replace(t.Name(), "/", "__", -1)
402
403 logPath := fmt.Sprintf(logPathMask, testName)
404 var err error
405 logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666)
406 if err != nil {
407 return nil, err
408 }
409 }
410
411 // This was the default since the beginning
412 logOutput = &logutils.LevelFilter{
413 Levels: logging.ValidLevels,
414 MinLevel: logutils.LogLevel(logLevel),
415 Writer: logOutput,
416 }
417
418 return
419}
420
421// ParallelTest performs an acceptance test on a resource, allowing concurrency
422// with other ParallelTest.
423//
424// Tests will fail if they do not properly handle conditions to allow multiple
425// tests to occur against the same resource or service (e.g. random naming).
426// All other requirements of the Test function also apply to this function.
427func ParallelTest(t TestT, c TestCase) {
428 t.Parallel()
429 Test(t, c)
430}
431
348// Test performs an acceptance test on a resource. 432// Test performs an acceptance test on a resource.
349// 433//
350// Tests are not run unless an environmental variable "TF_ACC" is 434// Tests are not run unless an environmental variable "TF_ACC" is
@@ -366,7 +450,7 @@ func Test(t TestT, c TestCase) {
366 return 450 return
367 } 451 }
368 452
369 logWriter, err := logging.LogOutput() 453 logWriter, err := LogOutput(t)
370 if err != nil { 454 if err != nil {
371 t.Error(fmt.Errorf("error setting up logging: %s", err)) 455 t.Error(fmt.Errorf("error setting up logging: %s", err))
372 } 456 }
@@ -398,7 +482,18 @@ func Test(t TestT, c TestCase) {
398 errored := false 482 errored := false
399 for i, step := range c.Steps { 483 for i, step := range c.Steps {
400 var err error 484 var err error
401 log.Printf("[WARN] Test: Executing step %d", i) 485 log.Printf("[DEBUG] Test: Executing step %d", i)
486
487 if step.SkipFunc != nil {
488 skip, err := step.SkipFunc()
489 if err != nil {
490 t.Fatal(err)
491 }
492 if skip {
493 log.Printf("[WARN] Skipping step %d", i)
494 continue
495 }
496 }
402 497
403 if step.Config == "" && !step.ImportState { 498 if step.Config == "" && !step.ImportState {
404 err = fmt.Errorf( 499 err = fmt.Errorf(
@@ -418,6 +513,15 @@ func Test(t TestT, c TestCase) {
418 } 513 }
419 } 514 }
420 515
516 // If we expected an error, but did not get one, fail
517 if err == nil && step.ExpectError != nil {
518 errored = true
519 t.Error(fmt.Sprintf(
520 "Step %d, no error received, but expected a match to:\n\n%s\n\n",
521 i, step.ExpectError))
522 break
523 }
524
421 // If there was an error, exit 525 // If there was an error, exit
422 if err != nil { 526 if err != nil {
423 // Perhaps we expected an error? Check if it matches 527 // Perhaps we expected an error? Check if it matches
@@ -485,6 +589,7 @@ func Test(t TestT, c TestCase) {
485 Config: lastStep.Config, 589 Config: lastStep.Config,
486 Check: c.CheckDestroy, 590 Check: c.CheckDestroy,
487 Destroy: true, 591 Destroy: true,
592 PreventDiskCleanup: lastStep.PreventDiskCleanup,
488 PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, 593 PreventPostDestroyRefresh: c.PreventPostDestroyRefresh,
489 } 594 }
490 595
@@ -593,18 +698,12 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
593 if err != nil { 698 if err != nil {
594 return err 699 return err
595 } 700 }
596 if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { 701 if diags := ctx.Validate(); len(diags) > 0 {
597 if len(es) > 0 { 702 if diags.HasErrors() {
598 estrs := make([]string, len(es)) 703 return errwrap.Wrapf("config is invalid: {{err}}", diags.Err())
599 for i, e := range es {
600 estrs[i] = e.Error()
601 }
602 return fmt.Errorf(
603 "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
604 ws, estrs)
605 } 704 }
606 705
607 log.Printf("[WARN] Config warnings: %#v", ws) 706 log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error())
608 } 707 }
609 708
610 // Refresh! 709 // Refresh!
@@ -657,9 +756,7 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
657 return nil 756 return nil
658} 757}
659 758
660func testModule( 759func testModule(opts terraform.ContextOpts, step TestStep) (*module.Tree, error) {
661 opts terraform.ContextOpts,
662 step TestStep) (*module.Tree, error) {
663 if step.PreConfig != nil { 760 if step.PreConfig != nil {
664 step.PreConfig() 761 step.PreConfig()
665 } 762 }
@@ -669,7 +766,12 @@ func testModule(
669 return nil, fmt.Errorf( 766 return nil, fmt.Errorf(
670 "Error creating temporary directory for config: %s", err) 767 "Error creating temporary directory for config: %s", err)
671 } 768 }
672 defer os.RemoveAll(cfgPath) 769
770 if step.PreventDiskCleanup {
771 log.Printf("[INFO] Skipping defer os.RemoveAll call")
772 } else {
773 defer os.RemoveAll(cfgPath)
774 }
673 775
674 // Write the configuration 776 // Write the configuration
675 cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf")) 777 cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf"))
@@ -693,10 +795,11 @@ func testModule(
693 } 795 }
694 796
695 // Load the modules 797 // Load the modules
696 modStorage := &getter.FolderStorage{ 798 modStorage := &module.Storage{
697 StorageDir: filepath.Join(cfgPath, ".tfmodules"), 799 StorageDir: filepath.Join(cfgPath, ".tfmodules"),
800 Mode: module.GetModeGet,
698 } 801 }
699 err = mod.Load(modStorage, module.GetModeGet) 802 err = mod.Load(modStorage)
700 if err != nil { 803 if err != nil {
701 return nil, fmt.Errorf("Error downloading modules: %s", err) 804 return nil, fmt.Errorf("Error downloading modules: %s", err)
702 } 805 }
@@ -771,12 +874,29 @@ func TestCheckResourceAttrSet(name, key string) TestCheckFunc {
771 return err 874 return err
772 } 875 }
773 876
774 if val, ok := is.Attributes[key]; ok && val != "" { 877 return testCheckResourceAttrSet(is, name, key)
775 return nil 878 }
879}
880
881// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with
882// support for non-root modules
883func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc {
884 return func(s *terraform.State) error {
885 is, err := modulePathPrimaryInstanceState(s, mp, name)
886 if err != nil {
887 return err
776 } 888 }
777 889
890 return testCheckResourceAttrSet(is, name, key)
891 }
892}
893
894func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error {
895 if val, ok := is.Attributes[key]; !ok || val == "" {
778 return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) 896 return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key)
779 } 897 }
898
899 return nil
780} 900}
781 901
782// TestCheckResourceAttr is a TestCheckFunc which validates 902// TestCheckResourceAttr is a TestCheckFunc which validates
@@ -788,21 +908,37 @@ func TestCheckResourceAttr(name, key, value string) TestCheckFunc {
788 return err 908 return err
789 } 909 }
790 910
791 if v, ok := is.Attributes[key]; !ok || v != value { 911 return testCheckResourceAttr(is, name, key, value)
792 if !ok { 912 }
793 return fmt.Errorf("%s: Attribute '%s' not found", name, key) 913}
794 }
795 914
796 return fmt.Errorf( 915// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with
797 "%s: Attribute '%s' expected %#v, got %#v", 916// support for non-root modules
798 name, 917func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc {
799 key, 918 return func(s *terraform.State) error {
800 value, 919 is, err := modulePathPrimaryInstanceState(s, mp, name)
801 v) 920 if err != nil {
921 return err
802 } 922 }
803 923
804 return nil 924 return testCheckResourceAttr(is, name, key, value)
925 }
926}
927
928func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error {
929 if v, ok := is.Attributes[key]; !ok || v != value {
930 if !ok {
931 return fmt.Errorf("%s: Attribute '%s' not found", name, key)
932 }
933
934 return fmt.Errorf(
935 "%s: Attribute '%s' expected %#v, got %#v",
936 name,
937 key,
938 value,
939 v)
805 } 940 }
941 return nil
806} 942}
807 943
808// TestCheckNoResourceAttr is a TestCheckFunc which ensures that 944// TestCheckNoResourceAttr is a TestCheckFunc which ensures that
@@ -814,14 +950,31 @@ func TestCheckNoResourceAttr(name, key string) TestCheckFunc {
814 return err 950 return err
815 } 951 }
816 952
817 if _, ok := is.Attributes[key]; ok { 953 return testCheckNoResourceAttr(is, name, key)
818 return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) 954 }
955}
956
957// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with
958// support for non-root modules
959func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc {
960 return func(s *terraform.State) error {
961 is, err := modulePathPrimaryInstanceState(s, mp, name)
962 if err != nil {
963 return err
819 } 964 }
820 965
821 return nil 966 return testCheckNoResourceAttr(is, name, key)
822 } 967 }
823} 968}
824 969
970func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error {
971 if _, ok := is.Attributes[key]; ok {
972 return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key)
973 }
974
975 return nil
976}
977
825// TestMatchResourceAttr is a TestCheckFunc which checks that the value 978// TestMatchResourceAttr is a TestCheckFunc which checks that the value
826// in state for the given name/key combination matches the given regex. 979// in state for the given name/key combination matches the given regex.
827func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { 980func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {
@@ -831,17 +984,34 @@ func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {
831 return err 984 return err
832 } 985 }
833 986
834 if !r.MatchString(is.Attributes[key]) { 987 return testMatchResourceAttr(is, name, key, r)
835 return fmt.Errorf( 988 }
836 "%s: Attribute '%s' didn't match %q, got %#v", 989}
837 name, 990
838 key, 991// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with
839 r.String(), 992// support for non-root modules
840 is.Attributes[key]) 993func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc {
994 return func(s *terraform.State) error {
995 is, err := modulePathPrimaryInstanceState(s, mp, name)
996 if err != nil {
997 return err
841 } 998 }
842 999
843 return nil 1000 return testMatchResourceAttr(is, name, key, r)
1001 }
1002}
1003
1004func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error {
1005 if !r.MatchString(is.Attributes[key]) {
1006 return fmt.Errorf(
1007 "%s: Attribute '%s' didn't match %q, got %#v",
1008 name,
1009 key,
1010 r.String(),
1011 is.Attributes[key])
844 } 1012 }
1013
1014 return nil
845} 1015}
846 1016
847// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the 1017// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the
@@ -853,6 +1023,14 @@ func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckF
853 } 1023 }
854} 1024}
855 1025
1026// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with
1027// support for non-root modules
1028func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc {
1029 return func(s *terraform.State) error {
1030 return TestCheckModuleResourceAttr(mp, name, key, *value)(s)
1031 }
1032}
1033
856// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values 1034// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values
857// in state for a pair of name/key combinations are equal. 1035// in state for a pair of name/key combinations are equal.
858func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { 1036func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc {
@@ -861,33 +1039,57 @@ func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string
861 if err != nil { 1039 if err != nil {
862 return err 1040 return err
863 } 1041 }
864 vFirst, ok := isFirst.Attributes[keyFirst]
865 if !ok {
866 return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst)
867 }
868 1042
869 isSecond, err := primaryInstanceState(s, nameSecond) 1043 isSecond, err := primaryInstanceState(s, nameSecond)
870 if err != nil { 1044 if err != nil {
871 return err 1045 return err
872 } 1046 }
873 vSecond, ok := isSecond.Attributes[keySecond] 1047
874 if !ok { 1048 return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond)
875 return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond) 1049 }
1050}
1051
1052// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with
1053// support for non-root modules
1054func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc {
1055 return func(s *terraform.State) error {
1056 isFirst, err := modulePathPrimaryInstanceState(s, mpFirst, nameFirst)
1057 if err != nil {
1058 return err
876 } 1059 }
877 1060
878 if vFirst != vSecond { 1061 isSecond, err := modulePathPrimaryInstanceState(s, mpSecond, nameSecond)
879 return fmt.Errorf( 1062 if err != nil {
880 "%s: Attribute '%s' expected %#v, got %#v", 1063 return err
881 nameFirst,
882 keyFirst,
883 vSecond,
884 vFirst)
885 } 1064 }
886 1065
887 return nil 1066 return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond)
888 } 1067 }
889} 1068}
890 1069
1070func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error {
1071 vFirst, ok := isFirst.Attributes[keyFirst]
1072 if !ok {
1073 return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst)
1074 }
1075
1076 vSecond, ok := isSecond.Attributes[keySecond]
1077 if !ok {
1078 return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond)
1079 }
1080
1081 if vFirst != vSecond {
1082 return fmt.Errorf(
1083 "%s: Attribute '%s' expected %#v, got %#v",
1084 nameFirst,
1085 keyFirst,
1086 vSecond,
1087 vFirst)
1088 }
1089
1090 return nil
1091}
1092
891// TestCheckOutput checks an output in the Terraform configuration 1093// TestCheckOutput checks an output in the Terraform configuration
892func TestCheckOutput(name, value string) TestCheckFunc { 1094func TestCheckOutput(name, value string) TestCheckFunc {
893 return func(s *terraform.State) error { 1095 return func(s *terraform.State) error {
@@ -936,23 +1138,43 @@ type TestT interface {
936 Error(args ...interface{}) 1138 Error(args ...interface{})
937 Fatal(args ...interface{}) 1139 Fatal(args ...interface{})
938 Skip(args ...interface{}) 1140 Skip(args ...interface{})
1141 Name() string
1142 Parallel()
939} 1143}
940 1144
941// This is set to true by unit tests to alter some behavior 1145// This is set to true by unit tests to alter some behavior
942var testTesting = false 1146var testTesting = false
943 1147
944// primaryInstanceState returns the primary instance state for the given resource name. 1148// modulePrimaryInstanceState returns the instance state for the given resource
945func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { 1149// name in a ModuleState
946 ms := s.RootModule() 1150func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) {
947 rs, ok := ms.Resources[name] 1151 rs, ok := ms.Resources[name]
948 if !ok { 1152 if !ok {
949 return nil, fmt.Errorf("Not found: %s", name) 1153 return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path)
950 } 1154 }
951 1155
952 is := rs.Primary 1156 is := rs.Primary
953 if is == nil { 1157 if is == nil {
954 return nil, fmt.Errorf("No primary instance: %s", name) 1158 return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path)
955 } 1159 }
956 1160
957 return is, nil 1161 return is, nil
958} 1162}
1163
1164// modulePathPrimaryInstanceState returns the primary instance state for the
1165// given resource name in a given module path.
1166func modulePathPrimaryInstanceState(s *terraform.State, mp []string, name string) (*terraform.InstanceState, error) {
1167 ms := s.ModuleByPath(mp)
1168 if ms == nil {
1169 return nil, fmt.Errorf("No module found at: %s", mp)
1170 }
1171
1172 return modulePrimaryInstanceState(s, ms, name)
1173}
1174
1175// primaryInstanceState returns the primary instance state for the given
1176// resource name in the root module.
1177func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) {
1178 ms := s.RootModule()
1179 return modulePrimaryInstanceState(s, ms, name)
1180}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
index 537a11c..033f126 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
@@ -1,10 +1,12 @@
1package resource 1package resource
2 2
3import ( 3import (
4 "errors"
4 "fmt" 5 "fmt"
5 "log" 6 "log"
6 "strings" 7 "strings"
7 8
9 "github.com/hashicorp/errwrap"
8 "github.com/hashicorp/terraform/terraform" 10 "github.com/hashicorp/terraform/terraform"
9) 11)
10 12
@@ -20,6 +22,14 @@ func testStep(
20 opts terraform.ContextOpts, 22 opts terraform.ContextOpts,
21 state *terraform.State, 23 state *terraform.State,
22 step TestStep) (*terraform.State, error) { 24 step TestStep) (*terraform.State, error) {
25 // Pre-taint any resources that have been defined in Taint, as long as this
26 // is not a destroy step.
27 if !step.Destroy {
28 if err := testStepTaint(state, step); err != nil {
29 return state, err
30 }
31 }
32
23 mod, err := testModule(opts, step) 33 mod, err := testModule(opts, step)
24 if err != nil { 34 if err != nil {
25 return state, err 35 return state, err
@@ -33,17 +43,12 @@ func testStep(
33 if err != nil { 43 if err != nil {
34 return state, fmt.Errorf("Error initializing context: %s", err) 44 return state, fmt.Errorf("Error initializing context: %s", err)
35 } 45 }
36 if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { 46 if diags := ctx.Validate(); len(diags) > 0 {
37 if len(es) > 0 { 47 if diags.HasErrors() {
38 estrs := make([]string, len(es)) 48 return nil, errwrap.Wrapf("config is invalid: {{err}}", diags.Err())
39 for i, e := range es {
40 estrs[i] = e.Error()
41 }
42 return state, fmt.Errorf(
43 "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
44 ws, estrs)
45 } 49 }
46 log.Printf("[WARN] Config warnings: %#v", ws) 50
51 log.Printf("[WARN] Config warnings:\n%s", diags)
47 } 52 }
48 53
49 // Refresh! 54 // Refresh!
@@ -158,3 +163,19 @@ func testStep(
158 // Made it here? Good job test step! 163 // Made it here? Good job test step!
159 return state, nil 164 return state, nil
160} 165}
166
167func testStepTaint(state *terraform.State, step TestStep) error {
168 for _, p := range step.Taint {
169 m := state.RootModule()
170 if m == nil {
171 return errors.New("no state")
172 }
173 rs, ok := m.Resources[p]
174 if !ok {
175 return fmt.Errorf("resource %q not found in state", p)
176 }
177 log.Printf("[WARN] Test: Explicitly tainting resource %q", p)
178 rs.Taint()
179 }
180 return nil
181}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
index 28ad105..94fef3c 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
@@ -16,15 +16,24 @@ func testStepImportState(
16 state *terraform.State, 16 state *terraform.State,
17 step TestStep) (*terraform.State, error) { 17 step TestStep) (*terraform.State, error) {
18 // Determine the ID to import 18 // Determine the ID to import
19 importId := step.ImportStateId 19 var importId string
20 if importId == "" { 20 switch {
21 case step.ImportStateIdFunc != nil:
22 var err error
23 importId, err = step.ImportStateIdFunc(state)
24 if err != nil {
25 return state, err
26 }
27 case step.ImportStateId != "":
28 importId = step.ImportStateId
29 default:
21 resource, err := testResource(step, state) 30 resource, err := testResource(step, state)
22 if err != nil { 31 if err != nil {
23 return state, err 32 return state, err
24 } 33 }
25
26 importId = resource.Primary.ID 34 importId = resource.Primary.ID
27 } 35 }
36
28 importPrefix := step.ImportStateIdPrefix 37 importPrefix := step.ImportStateIdPrefix
29 if importPrefix != "" { 38 if importPrefix != "" {
30 importId = fmt.Sprintf("%s%s", importPrefix, importId) 39 importId = fmt.Sprintf("%s%s", importPrefix, importId)
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
index ca50e29..e56a515 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
@@ -74,7 +74,7 @@ func RetryableError(err error) *RetryError {
74 return &RetryError{Err: err, Retryable: true} 74 return &RetryError{Err: err, Retryable: true}
75} 75}
76 76
77// NonRetryableError is a helper to create a RetryError that's _not)_ retryable 77// NonRetryableError is a helper to create a RetryError that's _not_ retryable
78// from a given error. 78// from a given error.
79func NonRetryableError(err error) *RetryError { 79func NonRetryableError(err error) *RetryError {
80 if err == nil { 80 if err == nil {
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
index a0729c0..57fbba7 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
@@ -65,7 +65,7 @@ func (b *Backend) Configure(c *terraform.ResourceConfig) error {
65 65
66 // Get a ResourceData for this configuration. To do this, we actually 66 // Get a ResourceData for this configuration. To do this, we actually
67 // generate an intermediary "diff" although that is never exposed. 67 // generate an intermediary "diff" although that is never exposed.
68 diff, err := sm.Diff(nil, c) 68 diff, err := sm.Diff(nil, c, nil, nil)
69 if err != nil { 69 if err != nil {
70 return err 70 return err
71 } 71 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go
new file mode 100644
index 0000000..bf952f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go
@@ -0,0 +1,155 @@
1package schema
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config/configschema"
7 "github.com/zclconf/go-cty/cty"
8)
9
10// The functions and methods in this file are concerned with the conversion
11// of this package's schema model into the slightly-lower-level schema model
12// used by Terraform core for configuration parsing.
13
14// CoreConfigSchema lowers the receiver to the schema model expected by
15// Terraform core.
16//
17// This lower-level model has fewer features than the schema in this package,
18// describing only the basic structure of configuration and state values we
19// expect. The full schemaMap from this package is still required for full
20// validation, handling of default values, etc.
21//
22// This method presumes a schema that passes InternalValidate, and so may
23// panic or produce an invalid result if given an invalid schemaMap.
24func (m schemaMap) CoreConfigSchema() *configschema.Block {
25 if len(m) == 0 {
26 // We return an actual (empty) object here, rather than a nil,
27 // because a nil result would mean that we don't have a schema at
28 // all, rather than that we have an empty one.
29 return &configschema.Block{}
30 }
31
32 ret := &configschema.Block{
33 Attributes: map[string]*configschema.Attribute{},
34 BlockTypes: map[string]*configschema.NestedBlock{},
35 }
36
37 for name, schema := range m {
38 if schema.Elem == nil {
39 ret.Attributes[name] = schema.coreConfigSchemaAttribute()
40 continue
41 }
42 switch schema.Elem.(type) {
43 case *Schema:
44 ret.Attributes[name] = schema.coreConfigSchemaAttribute()
45 case *Resource:
46 ret.BlockTypes[name] = schema.coreConfigSchemaBlock()
47 default:
48 // Should never happen for a valid schema
49 panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem))
50 }
51 }
52
53 return ret
54}
55
56// coreConfigSchemaAttribute prepares a configschema.Attribute representation
57// of a schema. This is appropriate only for primitives or collections whose
58// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections
59// whose elem is a whole resource.
60func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute {
61 return &configschema.Attribute{
62 Type: s.coreConfigSchemaType(),
63 Optional: s.Optional,
64 Required: s.Required,
65 Computed: s.Computed,
66 Sensitive: s.Sensitive,
67 }
68}
69
70// coreConfigSchemaBlock prepares a configschema.NestedBlock representation of
71// a schema. This is appropriate only for collections whose Elem is an instance
72// of Resource, and will panic otherwise.
73func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock {
74 ret := &configschema.NestedBlock{}
75 if nested := s.Elem.(*Resource).CoreConfigSchema(); nested != nil {
76 ret.Block = *nested
77 }
78 switch s.Type {
79 case TypeList:
80 ret.Nesting = configschema.NestingList
81 case TypeSet:
82 ret.Nesting = configschema.NestingSet
83 case TypeMap:
84 ret.Nesting = configschema.NestingMap
85 default:
86 // Should never happen for a valid schema
87 panic(fmt.Errorf("invalid s.Type %s for s.Elem being resource", s.Type))
88 }
89
90 ret.MinItems = s.MinItems
91 ret.MaxItems = s.MaxItems
92
93 if s.Required && s.MinItems == 0 {
94 // configschema doesn't have a "required" representation for nested
95 // blocks, but we can fake it by requiring at least one item.
96 ret.MinItems = 1
97 }
98
99 return ret
100}
101
102// coreConfigSchemaType determines the core config schema type that corresponds
103// to a particular schema's type.
104func (s *Schema) coreConfigSchemaType() cty.Type {
105 switch s.Type {
106 case TypeString:
107 return cty.String
108 case TypeBool:
109 return cty.Bool
110 case TypeInt, TypeFloat:
111 // configschema doesn't distinguish int and float, so helper/schema
112 // will deal with this as an additional validation step after
113 // configuration has been parsed and decoded.
114 return cty.Number
115 case TypeList, TypeSet, TypeMap:
116 var elemType cty.Type
117 switch set := s.Elem.(type) {
118 case *Schema:
119 elemType = set.coreConfigSchemaType()
120 case *Resource:
121 // In practice we don't actually use this for normal schema
122 // construction because we construct a NestedBlock in that
123 // case instead. See schemaMap.CoreConfigSchema.
124 elemType = set.CoreConfigSchema().ImpliedType()
125 default:
126 if set != nil {
127 // Should never happen for a valid schema
128 panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", s.Elem))
129 }
130 // Some pre-existing schemas assume string as default, so we need
131 // to be compatible with them.
132 elemType = cty.String
133 }
134 switch s.Type {
135 case TypeList:
136 return cty.List(elemType)
137 case TypeSet:
138 return cty.Set(elemType)
139 case TypeMap:
140 return cty.Map(elemType)
141 default:
142 // can never get here in practice, due to the case we're inside
143 panic("invalid collection type")
144 }
145 default:
146 // should never happen for a valid schema
147 panic(fmt.Errorf("invalid Schema.Type %s", s.Type))
148 }
149}
150
151// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema
152// on the resource's schema.
153func (r *Resource) CoreConfigSchema() *configschema.Block {
154 return schemaMap(r.Schema).CoreConfigSchema()
155}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
index 5a03d2d..8d93750 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
@@ -32,7 +32,7 @@ func DataSourceResourceShim(name string, dataSource *Resource) *Resource {
32 32
33 // FIXME: Link to some further docs either on the website or in the 33 // FIXME: Link to some further docs either on the website or in the
34 // changelog, once such a thing exists. 34 // changelog, once such a thing exists.
35 dataSource.deprecationMessage = fmt.Sprintf( 35 dataSource.DeprecationMessage = fmt.Sprintf(
36 "using %s as a resource is deprecated; consider using the data source instead", 36 "using %s as a resource is deprecated; consider using the data source instead",
37 name, 37 name,
38 ) 38 )
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
index 1660a67..b80b223 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
@@ -126,6 +126,8 @@ func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema {
126 switch v := current.Elem.(type) { 126 switch v := current.Elem.(type) {
127 case ValueType: 127 case ValueType:
128 current = &Schema{Type: v} 128 current = &Schema{Type: v}
129 case *Schema:
130 current, _ = current.Elem.(*Schema)
129 default: 131 default:
130 // maps default to string values. This is all we can have 132 // maps default to string values. This is all we can have
131 // if this is nested in another list or map. 133 // if this is nested in another list or map.
@@ -249,11 +251,10 @@ func readObjectField(
249} 251}
250 252
251// convert map values to the proper primitive type based on schema.Elem 253// convert map values to the proper primitive type based on schema.Elem
252func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error { 254func mapValuesToPrimitive(k string, m map[string]interface{}, schema *Schema) error {
253 255 elemType, err := getValueType(k, schema)
254 elemType := TypeString 256 if err != nil {
255 if et, ok := schema.Elem.(ValueType); ok { 257 return err
256 elemType = et
257 } 258 }
258 259
259 switch elemType { 260 switch elemType {
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
index f958bbc..55a301d 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
@@ -206,7 +206,7 @@ func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult,
206 panic(fmt.Sprintf("unknown type: %#v", mraw)) 206 panic(fmt.Sprintf("unknown type: %#v", mraw))
207 } 207 }
208 208
209 err := mapValuesToPrimitive(result, schema) 209 err := mapValuesToPrimitive(k, result, schema)
210 if err != nil { 210 if err != nil {
211 return FieldReadResult{}, nil 211 return FieldReadResult{}, nil
212 } 212 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
index 16bbae2..d558a5b 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
@@ -29,29 +29,59 @@ type DiffFieldReader struct {
29 Diff *terraform.InstanceDiff 29 Diff *terraform.InstanceDiff
30 Source FieldReader 30 Source FieldReader
31 Schema map[string]*Schema 31 Schema map[string]*Schema
32
33 // cache for memoizing ReadField calls.
34 cache map[string]cachedFieldReadResult
35}
36
37type cachedFieldReadResult struct {
38 val FieldReadResult
39 err error
32} 40}
33 41
34func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { 42func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) {
43 if r.cache == nil {
44 r.cache = make(map[string]cachedFieldReadResult)
45 }
46
47 // Create the cache key by joining around a value that isn't a valid part
48 // of an address. This assumes that the Source and Schema are not changed
49 // for the life of this DiffFieldReader.
50 cacheKey := strings.Join(address, "|")
51 if cached, ok := r.cache[cacheKey]; ok {
52 return cached.val, cached.err
53 }
54
35 schemaList := addrToSchema(address, r.Schema) 55 schemaList := addrToSchema(address, r.Schema)
36 if len(schemaList) == 0 { 56 if len(schemaList) == 0 {
57 r.cache[cacheKey] = cachedFieldReadResult{}
37 return FieldReadResult{}, nil 58 return FieldReadResult{}, nil
38 } 59 }
39 60
61 var res FieldReadResult
62 var err error
63
40 schema := schemaList[len(schemaList)-1] 64 schema := schemaList[len(schemaList)-1]
41 switch schema.Type { 65 switch schema.Type {
42 case TypeBool, TypeInt, TypeFloat, TypeString: 66 case TypeBool, TypeInt, TypeFloat, TypeString:
43 return r.readPrimitive(address, schema) 67 res, err = r.readPrimitive(address, schema)
44 case TypeList: 68 case TypeList:
45 return readListField(r, address, schema) 69 res, err = readListField(r, address, schema)
46 case TypeMap: 70 case TypeMap:
47 return r.readMap(address, schema) 71 res, err = r.readMap(address, schema)
48 case TypeSet: 72 case TypeSet:
49 return r.readSet(address, schema) 73 res, err = r.readSet(address, schema)
50 case typeObject: 74 case typeObject:
51 return readObjectField(r, address, schema.Elem.(map[string]*Schema)) 75 res, err = readObjectField(r, address, schema.Elem.(map[string]*Schema))
52 default: 76 default:
53 panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) 77 panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
54 } 78 }
79
80 r.cache[cacheKey] = cachedFieldReadResult{
81 val: res,
82 err: err,
83 }
84 return res, err
55} 85}
56 86
57func (r *DiffFieldReader) readMap( 87func (r *DiffFieldReader) readMap(
@@ -92,7 +122,8 @@ func (r *DiffFieldReader) readMap(
92 result[k] = v.New 122 result[k] = v.New
93 } 123 }
94 124
95 err = mapValuesToPrimitive(result, schema) 125 key := address[len(address)-1]
126 err = mapValuesToPrimitive(key, result, schema)
96 if err != nil { 127 if err != nil {
97 return FieldReadResult{}, nil 128 return FieldReadResult{}, nil
98 } 129 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
index 9533981..054efe0 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
@@ -61,7 +61,7 @@ func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, err
61 return true 61 return true
62 }) 62 })
63 63
64 err := mapValuesToPrimitive(result, schema) 64 err := mapValuesToPrimitive(k, result, schema)
65 if err != nil { 65 if err != nil {
66 return FieldReadResult{}, nil 66 return FieldReadResult{}, nil
67 } 67 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
index 689ed8d..814c7ba 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
@@ -39,6 +39,19 @@ func (w *MapFieldWriter) unsafeWriteField(addr string, value string) {
39 w.result[addr] = value 39 w.result[addr] = value
40} 40}
41 41
42// clearTree clears a field and any sub-fields of the given address out of the
43// map. This should be used to reset some kind of complex structures (namely
44// sets) before writing to make sure that any conflicting data is removed (for
45// example, if the set was previously written to the writer's layer).
46func (w *MapFieldWriter) clearTree(addr []string) {
47 prefix := strings.Join(addr, ".") + "."
48 for k := range w.result {
49 if strings.HasPrefix(k, prefix) {
50 delete(w.result, k)
51 }
52 }
53}
54
42func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error { 55func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error {
43 w.lock.Lock() 56 w.lock.Lock()
44 defer w.lock.Unlock() 57 defer w.lock.Unlock()
@@ -115,6 +128,14 @@ func (w *MapFieldWriter) setList(
115 return fmt.Errorf("%s: %s", k, err) 128 return fmt.Errorf("%s: %s", k, err)
116 } 129 }
117 130
131 // Wipe the set from the current writer prior to writing if it exists.
132 // Multiple writes to the same layer is a lot safer for lists than sets due
133 // to the fact that indexes are always deterministic and the length will
134 // always be updated with the current length on the last write, but making
135 // sure we have a clean namespace removes any chance for edge cases to pop up
136 // and ensures that the last write to the set is the correct value.
137 w.clearTree(addr)
138
118 // Set the entire list. 139 // Set the entire list.
119 var err error 140 var err error
120 for i, elem := range vs { 141 for i, elem := range vs {
@@ -162,6 +183,10 @@ func (w *MapFieldWriter) setMap(
162 vs[mk.String()] = mv.Interface() 183 vs[mk.String()] = mv.Interface()
163 } 184 }
164 185
186 // Wipe this address tree. The contents of the map should always reflect the
187 // last write made to it.
188 w.clearTree(addr)
189
165 // Remove the pure key since we're setting the full map value 190 // Remove the pure key since we're setting the full map value
166 delete(w.result, k) 191 delete(w.result, k)
167 192
@@ -308,6 +333,13 @@ func (w *MapFieldWriter) setSet(
308 value = s 333 value = s
309 } 334 }
310 335
336 // Clear any keys that match the set address first. This is necessary because
337 // it's always possible and sometimes may be necessary to write to a certain
338 // writer layer more than once with different set data each time, which will
339 // lead to different keys being inserted, which can lead to determinism
340 // problems when the old data isn't wiped first.
341 w.clearTree(addr)
342
311 for code, elem := range value.(*Set).m { 343 for code, elem := range value.(*Set).m {
312 if err := w.set(append(addrCopy, code), elem); err != nil { 344 if err := w.set(append(addrCopy, code), elem); err != nil {
313 return err 345 return err
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
index 3a97629..38cd8c7 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
@@ -2,7 +2,7 @@
2 2
3package schema 3package schema
4 4
5import "fmt" 5import "strconv"
6 6
7const ( 7const (
8 _getSource_name_0 = "getSourceStategetSourceConfig" 8 _getSource_name_0 = "getSourceStategetSourceConfig"
@@ -13,8 +13,6 @@ const (
13 13
14var ( 14var (
15 _getSource_index_0 = [...]uint8{0, 14, 29} 15 _getSource_index_0 = [...]uint8{0, 14, 29}
16 _getSource_index_1 = [...]uint8{0, 13}
17 _getSource_index_2 = [...]uint8{0, 12}
18 _getSource_index_3 = [...]uint8{0, 18, 32} 16 _getSource_index_3 = [...]uint8{0, 18, 32}
19) 17)
20 18
@@ -31,6 +29,6 @@ func (i getSource) String() string {
31 i -= 15 29 i -= 15
32 return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]] 30 return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]]
33 default: 31 default:
34 return fmt.Sprintf("getSource(%d)", i) 32 return "getSource(" + strconv.FormatInt(int64(i), 10) + ")"
35 } 33 }
36} 34}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
index fb28b41..6cd325d 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
@@ -9,6 +9,7 @@ import (
9 9
10 "github.com/hashicorp/go-multierror" 10 "github.com/hashicorp/go-multierror"
11 "github.com/hashicorp/terraform/config" 11 "github.com/hashicorp/terraform/config"
12 "github.com/hashicorp/terraform/config/configschema"
12 "github.com/hashicorp/terraform/terraform" 13 "github.com/hashicorp/terraform/terraform"
13) 14)
14 15
@@ -58,7 +59,7 @@ type Provider struct {
58 59
59 meta interface{} 60 meta interface{}
60 61
61 // a mutex is required because TestReset can directly repalce the stopCtx 62 // a mutex is required because TestReset can directly replace the stopCtx
62 stopMu sync.Mutex 63 stopMu sync.Mutex
63 stopCtx context.Context 64 stopCtx context.Context
64 stopCtxCancel context.CancelFunc 65 stopCtxCancel context.CancelFunc
@@ -185,6 +186,29 @@ func (p *Provider) TestReset() error {
185 return nil 186 return nil
186} 187}
187 188
189// GetSchema implementation of terraform.ResourceProvider interface
190func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) {
191 resourceTypes := map[string]*configschema.Block{}
192 dataSources := map[string]*configschema.Block{}
193
194 for _, name := range req.ResourceTypes {
195 if r, exists := p.ResourcesMap[name]; exists {
196 resourceTypes[name] = r.CoreConfigSchema()
197 }
198 }
199 for _, name := range req.DataSources {
200 if r, exists := p.DataSourcesMap[name]; exists {
201 dataSources[name] = r.CoreConfigSchema()
202 }
203 }
204
205 return &terraform.ProviderSchema{
206 Provider: schemaMap(p.Schema).CoreConfigSchema(),
207 ResourceTypes: resourceTypes,
208 DataSources: dataSources,
209 }, nil
210}
211
188// Input implementation of terraform.ResourceProvider interface. 212// Input implementation of terraform.ResourceProvider interface.
189func (p *Provider) Input( 213func (p *Provider) Input(
190 input terraform.UIInput, 214 input terraform.UIInput,
@@ -227,7 +251,7 @@ func (p *Provider) Configure(c *terraform.ResourceConfig) error {
227 251
228 // Get a ResourceData for this configuration. To do this, we actually 252 // Get a ResourceData for this configuration. To do this, we actually
229 // generate an intermediary "diff" although that is never exposed. 253 // generate an intermediary "diff" although that is never exposed.
230 diff, err := sm.Diff(nil, c) 254 diff, err := sm.Diff(nil, c, nil, p.meta)
231 if err != nil { 255 if err != nil {
232 return err 256 return err
233 } 257 }
@@ -269,7 +293,7 @@ func (p *Provider) Diff(
269 return nil, fmt.Errorf("unknown resource type: %s", info.Type) 293 return nil, fmt.Errorf("unknown resource type: %s", info.Type)
270 } 294 }
271 295
272 return r.Diff(s, c) 296 return r.Diff(s, c, p.meta)
273} 297}
274 298
275// Refresh implementation of terraform.ResourceProvider interface. 299// Refresh implementation of terraform.ResourceProvider interface.
@@ -305,6 +329,10 @@ func (p *Provider) Resources() []terraform.ResourceType {
305 result = append(result, terraform.ResourceType{ 329 result = append(result, terraform.ResourceType{
306 Name: k, 330 Name: k,
307 Importable: resource.Importer != nil, 331 Importable: resource.Importer != nil,
332
333 // Indicates that a provider is compiled against a new enough
334 // version of core to support the GetSchema method.
335 SchemaAvailable: true,
308 }) 336 })
309 } 337 }
310 338
@@ -382,7 +410,7 @@ func (p *Provider) ReadDataDiff(
382 return nil, fmt.Errorf("unknown data source: %s", info.Type) 410 return nil, fmt.Errorf("unknown data source: %s", info.Type)
383 } 411 }
384 412
385 return r.Diff(nil, c) 413 return r.Diff(nil, c, p.meta)
386} 414}
387 415
388// RefreshData implementation of terraform.ResourceProvider interface. 416// RefreshData implementation of terraform.ResourceProvider interface.
@@ -410,6 +438,10 @@ func (p *Provider) DataSources() []terraform.DataSource {
410 for _, k := range keys { 438 for _, k := range keys {
411 result = append(result, terraform.DataSource{ 439 result = append(result, terraform.DataSource{
412 Name: k, 440 Name: k,
441
442 // Indicates that a provider is compiled against a new enough
443 // version of core to support the GetSchema method.
444 SchemaAvailable: true,
413 }) 445 })
414 } 446 }
415 447
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
index 476192e..a8d42db 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
@@ -146,7 +146,7 @@ func (p *Provisioner) Apply(
146 } 146 }
147 147
148 sm := schemaMap(p.ConnSchema) 148 sm := schemaMap(p.ConnSchema)
149 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c)) 149 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil)
150 if err != nil { 150 if err != nil {
151 return err 151 return err
152 } 152 }
@@ -160,7 +160,7 @@ func (p *Provisioner) Apply(
160 // Build the configuration data. Doing this requires making a "diff" 160 // Build the configuration data. Doing this requires making a "diff"
161 // even though that's never used. We use that just to get the correct types. 161 // even though that's never used. We use that just to get the correct types.
162 configMap := schemaMap(p.Schema) 162 configMap := schemaMap(p.Schema)
163 diff, err := configMap.Diff(nil, c) 163 diff, err := configMap.Diff(nil, c, nil, nil)
164 if err != nil { 164 if err != nil {
165 return err 165 return err
166 } 166 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
index ddba109..d3be2d6 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
@@ -85,6 +85,37 @@ type Resource struct {
85 Delete DeleteFunc 85 Delete DeleteFunc
86 Exists ExistsFunc 86 Exists ExistsFunc
87 87
88 // CustomizeDiff is a custom function for working with the diff that
89 // Terraform has created for this resource - it can be used to customize the
90 // diff that has been created, diff values not controlled by configuration,
91 // or even veto the diff altogether and abort the plan. It is passed a
92 // *ResourceDiff, a structure similar to ResourceData but lacking most write
93 // functions like Set, while introducing new functions that work with the
94 // diff such as SetNew, SetNewComputed, and ForceNew.
95 //
96 // The phases Terraform runs this in, and the state available via functions
97 // like Get and GetChange, are as follows:
98 //
99 // * New resource: One run with no state
100 // * Existing resource: One run with state
101 // * Existing resource, forced new: One run with state (before ForceNew),
102 // then one run without state (as if new resource)
103 // * Tainted resource: No runs (custom diff logic is skipped)
104 // * Destroy: No runs (standard diff logic is skipped on destroy diffs)
105 //
106 // This function needs to be resilient to support all scenarios.
107 //
108 // If this function needs to access external API resources, remember to flag
109 // the RequiresRefresh attribute mentioned below to ensure that
110 // -refresh=false is blocked when running plan or apply, as this means that
111 // this resource requires refresh-like behaviour to work effectively.
112 //
113 // For the most part, only computed fields can be customized by this
114 // function.
115 //
116 // This function is only allowed on regular resources (not data sources).
117 CustomizeDiff CustomizeDiffFunc
118
88 // Importer is the ResourceImporter implementation for this resource. 119 // Importer is the ResourceImporter implementation for this resource.
89 // If this is nil, then this resource does not support importing. If 120 // If this is nil, then this resource does not support importing. If
90 // this is non-nil, then it supports importing and ResourceImporter 121 // this is non-nil, then it supports importing and ResourceImporter
@@ -93,9 +124,7 @@ type Resource struct {
93 Importer *ResourceImporter 124 Importer *ResourceImporter
94 125
95 // If non-empty, this string is emitted as a warning during Validate. 126 // If non-empty, this string is emitted as a warning during Validate.
96 // This is a private interface for now, for use by DataSourceResourceShim, 127 DeprecationMessage string
97 // and not for general use. (But maybe later...)
98 deprecationMessage string
99 128
100 // Timeouts allow users to specify specific time durations in which an 129 // Timeouts allow users to specify specific time durations in which an
101 // operation should time out, to allow them to extend an action to suit their 130 // operation should time out, to allow them to extend an action to suit their
@@ -126,6 +155,9 @@ type ExistsFunc func(*ResourceData, interface{}) (bool, error)
126type StateMigrateFunc func( 155type StateMigrateFunc func(
127 int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) 156 int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error)
128 157
158// See Resource documentation.
159type CustomizeDiffFunc func(*ResourceDiff, interface{}) error
160
129// Apply creates, updates, and/or deletes a resource. 161// Apply creates, updates, and/or deletes a resource.
130func (r *Resource) Apply( 162func (r *Resource) Apply(
131 s *terraform.InstanceState, 163 s *terraform.InstanceState,
@@ -202,11 +234,11 @@ func (r *Resource) Apply(
202 return r.recordCurrentSchemaVersion(data.State()), err 234 return r.recordCurrentSchemaVersion(data.State()), err
203} 235}
204 236
205// Diff returns a diff of this resource and is API compatible with the 237// Diff returns a diff of this resource.
206// ResourceProvider interface.
207func (r *Resource) Diff( 238func (r *Resource) Diff(
208 s *terraform.InstanceState, 239 s *terraform.InstanceState,
209 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { 240 c *terraform.ResourceConfig,
241 meta interface{}) (*terraform.InstanceDiff, error) {
210 242
211 t := &ResourceTimeout{} 243 t := &ResourceTimeout{}
212 err := t.ConfigDecode(r, c) 244 err := t.ConfigDecode(r, c)
@@ -215,7 +247,7 @@ func (r *Resource) Diff(
215 return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) 247 return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
216 } 248 }
217 249
218 instanceDiff, err := schemaMap(r.Schema).Diff(s, c) 250 instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta)
219 if err != nil { 251 if err != nil {
220 return instanceDiff, err 252 return instanceDiff, err
221 } 253 }
@@ -235,8 +267,8 @@ func (r *Resource) Diff(
235func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { 267func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) {
236 warns, errs := schemaMap(r.Schema).Validate(c) 268 warns, errs := schemaMap(r.Schema).Validate(c)
237 269
238 if r.deprecationMessage != "" { 270 if r.DeprecationMessage != "" {
239 warns = append(warns, r.deprecationMessage) 271 warns = append(warns, r.DeprecationMessage)
240 } 272 }
241 273
242 return warns, errs 274 return warns, errs
@@ -248,7 +280,6 @@ func (r *Resource) ReadDataApply(
248 d *terraform.InstanceDiff, 280 d *terraform.InstanceDiff,
249 meta interface{}, 281 meta interface{},
250) (*terraform.InstanceState, error) { 282) (*terraform.InstanceState, error) {
251
252 // Data sources are always built completely from scratch 283 // Data sources are always built completely from scratch
253 // on each read, so the source state is always nil. 284 // on each read, so the source state is always nil.
254 data, err := schemaMap(r.Schema).Data(nil, d) 285 data, err := schemaMap(r.Schema).Data(nil, d)
@@ -346,6 +377,11 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error
346 if r.Create != nil || r.Update != nil || r.Delete != nil { 377 if r.Create != nil || r.Update != nil || r.Delete != nil {
347 return fmt.Errorf("must not implement Create, Update or Delete") 378 return fmt.Errorf("must not implement Create, Update or Delete")
348 } 379 }
380
381 // CustomizeDiff cannot be defined for read-only resources
382 if r.CustomizeDiff != nil {
383 return fmt.Errorf("cannot implement CustomizeDiff")
384 }
349 } 385 }
350 386
351 tsm := topSchemaMap 387 tsm := topSchemaMap
@@ -393,19 +429,43 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error
393 return err 429 return err
394 } 430 }
395 } 431 }
432
433 for k, f := range tsm {
434 if isReservedResourceFieldName(k, f) {
435 return fmt.Errorf("%s is a reserved field name", k)
436 }
437 }
396 } 438 }
397 439
398 // Resource-specific checks 440 // Data source
399 for k, _ := range tsm { 441 if r.isTopLevel() && !writable {
400 if isReservedResourceFieldName(k) { 442 tsm = schemaMap(r.Schema)
401 return fmt.Errorf("%s is a reserved field name for a resource", k) 443 for k, _ := range tsm {
444 if isReservedDataSourceFieldName(k) {
445 return fmt.Errorf("%s is a reserved field name", k)
446 }
402 } 447 }
403 } 448 }
404 449
405 return schemaMap(r.Schema).InternalValidate(tsm) 450 return schemaMap(r.Schema).InternalValidate(tsm)
406} 451}
407 452
408func isReservedResourceFieldName(name string) bool { 453func isReservedDataSourceFieldName(name string) bool {
454 for _, reservedName := range config.ReservedDataSourceFields {
455 if name == reservedName {
456 return true
457 }
458 }
459 return false
460}
461
462func isReservedResourceFieldName(name string, s *Schema) bool {
463 // Allow phasing out "id"
464 // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415
465 if name == "id" && (s.Deprecated != "" || s.Removed != "") {
466 return false
467 }
468
409 for _, reservedName := range config.ReservedResourceFields { 469 for _, reservedName := range config.ReservedResourceFields {
410 if name == reservedName { 470 if name == reservedName {
411 return true 471 return true
@@ -430,6 +490,12 @@ func (r *Resource) Data(s *terraform.InstanceState) *ResourceData {
430 panic(err) 490 panic(err)
431 } 491 }
432 492
493 // load the Resource timeouts
494 result.timeouts = r.Timeouts
495 if result.timeouts == nil {
496 result.timeouts = &ResourceTimeout{}
497 }
498
433 // Set the schema version to latest by default 499 // Set the schema version to latest by default
434 result.meta = map[string]interface{}{ 500 result.meta = map[string]interface{}{
435 "schema_version": strconv.Itoa(r.SchemaVersion), 501 "schema_version": strconv.Itoa(r.SchemaVersion),
@@ -450,7 +516,7 @@ func (r *Resource) TestResourceData() *ResourceData {
450// Returns true if the resource is "top level" i.e. not a sub-resource. 516// Returns true if the resource is "top level" i.e. not a sub-resource.
451func (r *Resource) isTopLevel() bool { 517func (r *Resource) isTopLevel() bool {
452 // TODO: This is a heuristic; replace with a definitive attribute? 518 // TODO: This is a heuristic; replace with a definitive attribute?
453 return r.Create != nil 519 return (r.Create != nil || r.Read != nil)
454} 520}
455 521
456// Determines if a given InstanceState needs to be migrated by checking the 522// Determines if a given InstanceState needs to be migrated by checking the
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
index b2bc8f6..6cc01ee 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
@@ -35,6 +35,8 @@ type ResourceData struct {
35 partialMap map[string]struct{} 35 partialMap map[string]struct{}
36 once sync.Once 36 once sync.Once
37 isNew bool 37 isNew bool
38
39 panicOnError bool
38} 40}
39 41
40// getResult is the internal structure that is generated when a Get 42// getResult is the internal structure that is generated when a Get
@@ -104,6 +106,22 @@ func (d *ResourceData) GetOk(key string) (interface{}, bool) {
104 return r.Value, exists 106 return r.Value, exists
105} 107}
106 108
109// GetOkExists returns the data for a given key and whether or not the key
110// has been set to a non-zero value. This is only useful for determining
111// if boolean attributes have been set, if they are Optional but do not
112// have a Default value.
113//
114// This is nearly the same function as GetOk, yet it does not check
115// for the zero value of the attribute's type. This allows for attributes
116// without a default, to fully check for a literal assignment, regardless
117// of the zero-value for that type.
118// This should only be used if absolutely required/needed.
119func (d *ResourceData) GetOkExists(key string) (interface{}, bool) {
120 r := d.getRaw(key, getSourceSet)
121 exists := r.Exists && !r.Computed
122 return r.Value, exists
123}
124
107func (d *ResourceData) getRaw(key string, level getSource) getResult { 125func (d *ResourceData) getRaw(key string, level getSource) getResult {
108 var parts []string 126 var parts []string
109 if key != "" { 127 if key != "" {
@@ -168,7 +186,11 @@ func (d *ResourceData) Set(key string, value interface{}) error {
168 } 186 }
169 } 187 }
170 188
171 return d.setWriter.WriteField(strings.Split(key, "."), value) 189 err := d.setWriter.WriteField(strings.Split(key, "."), value)
190 if err != nil && d.panicOnError {
191 panic(err)
192 }
193 return err
172} 194}
173 195
174// SetPartial adds the key to the final state output while 196// SetPartial adds the key to the final state output while
@@ -293,6 +315,7 @@ func (d *ResourceData) State() *terraform.InstanceState {
293 315
294 mapW := &MapFieldWriter{Schema: d.schema} 316 mapW := &MapFieldWriter{Schema: d.schema}
295 if err := mapW.WriteField(nil, rawMap); err != nil { 317 if err := mapW.WriteField(nil, rawMap); err != nil {
318 log.Printf("[ERR] Error writing fields: %s", err)
296 return nil 319 return nil
297 } 320 }
298 321
@@ -344,6 +367,13 @@ func (d *ResourceData) State() *terraform.InstanceState {
344func (d *ResourceData) Timeout(key string) time.Duration { 367func (d *ResourceData) Timeout(key string) time.Duration {
345 key = strings.ToLower(key) 368 key = strings.ToLower(key)
346 369
370 // System default of 20 minutes
371 defaultTimeout := 20 * time.Minute
372
373 if d.timeouts == nil {
374 return defaultTimeout
375 }
376
347 var timeout *time.Duration 377 var timeout *time.Duration
348 switch key { 378 switch key {
349 case TimeoutCreate: 379 case TimeoutCreate:
@@ -364,8 +394,7 @@ func (d *ResourceData) Timeout(key string) time.Duration {
364 return *d.timeouts.Default 394 return *d.timeouts.Default
365 } 395 }
366 396
367 // Return system default of 20 minutes 397 return defaultTimeout
368 return 20 * time.Minute
369} 398}
370 399
371func (d *ResourceData) init() { 400func (d *ResourceData) init() {
@@ -423,7 +452,7 @@ func (d *ResourceData) init() {
423} 452}
424 453
425func (d *ResourceData) diffChange( 454func (d *ResourceData) diffChange(
426 k string) (interface{}, interface{}, bool, bool) { 455 k string) (interface{}, interface{}, bool, bool, bool) {
427 // Get the change between the state and the config. 456 // Get the change between the state and the config.
428 o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) 457 o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact)
429 if !o.Exists { 458 if !o.Exists {
@@ -434,7 +463,7 @@ func (d *ResourceData) diffChange(
434 } 463 }
435 464
436 // Return the old, new, and whether there is a change 465 // Return the old, new, and whether there is a change
437 return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed 466 return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false
438} 467}
439 468
440func (d *ResourceData) getChange( 469func (d *ResourceData) getChange(
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go
new file mode 100644
index 0000000..7db3dec
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go
@@ -0,0 +1,559 @@
1package schema
2
3import (
4 "errors"
5 "fmt"
6 "reflect"
7 "strings"
8 "sync"
9
10 "github.com/hashicorp/terraform/terraform"
11)
12
13// newValueWriter is a minor re-implementation of MapFieldWriter to include
14// keys that should be marked as computed, to represent the new part of a
15// pseudo-diff.
16type newValueWriter struct {
17 *MapFieldWriter
18
19 // A list of keys that should be marked as computed.
20 computedKeys map[string]bool
21
22 // A lock to prevent races on writes. The underlying writer will have one as
23 // well - this is for computed keys.
24 lock sync.Mutex
25
26 // To be used with init.
27 once sync.Once
28}
29
30// init performs any initialization tasks for the newValueWriter.
31func (w *newValueWriter) init() {
32 if w.computedKeys == nil {
33 w.computedKeys = make(map[string]bool)
34 }
35}
36
37// WriteField overrides MapValueWriter's WriteField, adding the ability to flag
38// the address as computed.
39func (w *newValueWriter) WriteField(address []string, value interface{}, computed bool) error {
40 // Fail the write if we have a non-nil value and computed is true.
41 // NewComputed values should not have a value when written.
42 if value != nil && computed {
43 return errors.New("Non-nil value with computed set")
44 }
45
46 if err := w.MapFieldWriter.WriteField(address, value); err != nil {
47 return err
48 }
49
50 w.once.Do(w.init)
51
52 w.lock.Lock()
53 defer w.lock.Unlock()
54 if computed {
55 w.computedKeys[strings.Join(address, ".")] = true
56 }
57 return nil
58}
59
60// ComputedKeysMap returns the underlying computed keys map.
61func (w *newValueWriter) ComputedKeysMap() map[string]bool {
62 w.once.Do(w.init)
63 return w.computedKeys
64}
65
66// newValueReader is a minor re-implementation of MapFieldReader and is the
67// read counterpart to MapValueWriter, allowing the read of keys flagged as
68// computed to accommodate the diff override logic in ResourceDiff.
69type newValueReader struct {
70 *MapFieldReader
71
72 // The list of computed keys from a newValueWriter.
73 computedKeys map[string]bool
74}
75
76// ReadField reads the values from the underlying writer, returning the
77// computed value if it is found as well.
78func (r *newValueReader) ReadField(address []string) (FieldReadResult, error) {
79 addrKey := strings.Join(address, ".")
80 v, err := r.MapFieldReader.ReadField(address)
81 if err != nil {
82 return FieldReadResult{}, err
83 }
84 for computedKey := range r.computedKeys {
85 if childAddrOf(addrKey, computedKey) {
86 if strings.HasSuffix(addrKey, ".#") {
87 // This is a count value for a list or set that has been marked as
88 // computed, or a sub-list/sub-set of a complex resource that has
89 // been marked as computed. We need to pass through to other readers
90 // so that an accurate previous count can be fetched for the diff.
91 v.Exists = false
92 }
93 v.Computed = true
94 }
95 }
96
97 return v, nil
98}
99
100// ResourceDiff is used to query and make custom changes to an in-flight diff.
101// It can be used to veto particular changes in the diff, customize the diff
102// that has been created, or diff values not controlled by config.
103//
104// The object functions similar to ResourceData, however most notably lacks
105// Set, SetPartial, and Partial, as it should be used to change diff values
106// only. Most other first-class ResourceData functions exist, namely Get,
107// GetOk, HasChange, and GetChange exist.
108//
109// All functions in ResourceDiff, save for ForceNew, can only be used on
110// computed fields.
111type ResourceDiff struct {
112 // The schema for the resource being worked on.
113 schema map[string]*Schema
114
115 // The current config for this resource.
116 config *terraform.ResourceConfig
117
118 // The state for this resource as it exists post-refresh, after the initial
119 // diff.
120 state *terraform.InstanceState
121
122 // The diff created by Terraform. This diff is used, along with state,
123 // config, and custom-set diff data, to provide a multi-level reader
124 // experience similar to ResourceData.
125 diff *terraform.InstanceDiff
126
127 // The internal reader structure that contains the state, config, the default
128 // diff, and the new diff.
129 multiReader *MultiLevelFieldReader
130
131 // A writer that writes overridden new fields.
132 newWriter *newValueWriter
133
134 // Tracks which keys have been updated by ResourceDiff to ensure that the
135 // diff does not get re-run on keys that were not touched, or diffs that were
136 // just removed (re-running on the latter would just roll back the removal).
137 updatedKeys map[string]bool
138
139 // Tracks which keys were flagged as forceNew. These keys are not saved in
140 // newWriter, but we need to track them so that they can be re-diffed later.
141 forcedNewKeys map[string]bool
142}
143
144// newResourceDiff creates a new ResourceDiff instance.
145func newResourceDiff(schema map[string]*Schema, config *terraform.ResourceConfig, state *terraform.InstanceState, diff *terraform.InstanceDiff) *ResourceDiff {
146 d := &ResourceDiff{
147 config: config,
148 state: state,
149 diff: diff,
150 schema: schema,
151 }
152
153 d.newWriter = &newValueWriter{
154 MapFieldWriter: &MapFieldWriter{Schema: d.schema},
155 }
156 readers := make(map[string]FieldReader)
157 var stateAttributes map[string]string
158 if d.state != nil {
159 stateAttributes = d.state.Attributes
160 readers["state"] = &MapFieldReader{
161 Schema: d.schema,
162 Map: BasicMapReader(stateAttributes),
163 }
164 }
165 if d.config != nil {
166 readers["config"] = &ConfigFieldReader{
167 Schema: d.schema,
168 Config: d.config,
169 }
170 }
171 if d.diff != nil {
172 readers["diff"] = &DiffFieldReader{
173 Schema: d.schema,
174 Diff: d.diff,
175 Source: &MultiLevelFieldReader{
176 Levels: []string{"state", "config"},
177 Readers: readers,
178 },
179 }
180 }
181 readers["newDiff"] = &newValueReader{
182 MapFieldReader: &MapFieldReader{
183 Schema: d.schema,
184 Map: BasicMapReader(d.newWriter.Map()),
185 },
186 computedKeys: d.newWriter.ComputedKeysMap(),
187 }
188 d.multiReader = &MultiLevelFieldReader{
189 Levels: []string{
190 "state",
191 "config",
192 "diff",
193 "newDiff",
194 },
195
196 Readers: readers,
197 }
198
199 d.updatedKeys = make(map[string]bool)
200 d.forcedNewKeys = make(map[string]bool)
201
202 return d
203}
204
205// UpdatedKeys returns the keys that were updated by this ResourceDiff run.
206// These are the only keys that a diff should be re-calculated for.
207//
208// This is the combined result of both keys for which diff values were updated
209// for or cleared, and also keys that were flagged to be re-diffed as a result
210// of ForceNew.
211func (d *ResourceDiff) UpdatedKeys() []string {
212 var s []string
213 for k := range d.updatedKeys {
214 s = append(s, k)
215 }
216 for k := range d.forcedNewKeys {
217 for _, l := range s {
218 if k == l {
219 break
220 }
221 }
222 s = append(s, k)
223 }
224 return s
225}
226
227// Clear wipes the diff for a particular key. It is called by ResourceDiff's
228// functionality to remove any possibility of conflicts, but can be called on
229// its own to just remove a specific key from the diff completely.
230//
231// Note that this does not wipe an override. This function is only allowed on
232// computed keys.
233func (d *ResourceDiff) Clear(key string) error {
234 if err := d.checkKey(key, "Clear", true); err != nil {
235 return err
236 }
237
238 return d.clear(key)
239}
240
241func (d *ResourceDiff) clear(key string) error {
242 // Check the schema to make sure that this key exists first.
243 schemaL := addrToSchema(strings.Split(key, "."), d.schema)
244 if len(schemaL) == 0 {
245 return fmt.Errorf("%s is not a valid key", key)
246 }
247
248 for k := range d.diff.Attributes {
249 if strings.HasPrefix(k, key) {
250 delete(d.diff.Attributes, k)
251 }
252 }
253 return nil
254}
255
256// GetChangedKeysPrefix helps to implement Resource.CustomizeDiff
257// where we need to act on all nested fields
258// without calling out each one separately
259func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string {
260 keys := make([]string, 0)
261 for k := range d.diff.Attributes {
262 if strings.HasPrefix(k, prefix) {
263 keys = append(keys, k)
264 }
265 }
266 return keys
267}
268
269// diffChange helps to implement resourceDiffer and derives its change values
270// from ResourceDiff's own change data, in addition to existing diff, config, and state.
271func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) {
272 old, new, customized := d.getChange(key)
273
274 if !old.Exists {
275 old.Value = nil
276 }
277 if !new.Exists || d.removed(key) {
278 new.Value = nil
279 }
280
281 return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized
282}
283
284// SetNew is used to set a new diff value for the mentioned key. The value must
285// be correct for the attribute's schema (mostly relevant for maps, lists, and
286// sets). The original value from the state is used as the old value.
287//
288// This function is only allowed on computed attributes.
289func (d *ResourceDiff) SetNew(key string, value interface{}) error {
290 if err := d.checkKey(key, "SetNew", false); err != nil {
291 return err
292 }
293
294 return d.setDiff(key, value, false)
295}
296
297// SetNewComputed functions like SetNew, except that it blanks out a new value
298// and marks it as computed.
299//
300// This function is only allowed on computed attributes.
301func (d *ResourceDiff) SetNewComputed(key string) error {
302 if err := d.checkKey(key, "SetNewComputed", false); err != nil {
303 return err
304 }
305
306 return d.setDiff(key, nil, true)
307}
308
309// setDiff performs common diff setting behaviour.
310func (d *ResourceDiff) setDiff(key string, new interface{}, computed bool) error {
311 if err := d.clear(key); err != nil {
312 return err
313 }
314
315 if err := d.newWriter.WriteField(strings.Split(key, "."), new, computed); err != nil {
316 return fmt.Errorf("Cannot set new diff value for key %s: %s", key, err)
317 }
318
319 d.updatedKeys[key] = true
320
321 return nil
322}
323
324// ForceNew force-flags ForceNew in the schema for a specific key, and
325// re-calculates its diff, effectively causing this attribute to force a new
326// resource.
327//
328// Keep in mind that forcing a new resource will force a second run of the
329// resource's CustomizeDiff function (with a new ResourceDiff) once the current
330// one has completed. This second run is performed without state. This behavior
331// will be the same as if a new resource is being created and is performed to
332// ensure that the diff looks like the diff for a new resource as much as
333// possible. CustomizeDiff should expect such a scenario and act correctly.
334//
335// This function is a no-op/error if there is no diff.
336//
337// Note that the change to schema is permanent for the lifecycle of this
338// specific ResourceDiff instance.
339func (d *ResourceDiff) ForceNew(key string) error {
340 if !d.HasChange(key) {
341 return fmt.Errorf("ForceNew: No changes for %s", key)
342 }
343
344 keyParts := strings.Split(key, ".")
345 var schema *Schema
346 schemaL := addrToSchema(keyParts, d.schema)
347 if len(schemaL) > 0 {
348 schema = schemaL[len(schemaL)-1]
349 } else {
350 return fmt.Errorf("ForceNew: %s is not a valid key", key)
351 }
352
353 schema.ForceNew = true
354
355 // Flag this for a re-diff. Don't save any values to guarantee that existing
356 // diffs aren't messed with, as this gets messy when dealing with complex
357 // structures, zero values, etc.
358 d.forcedNewKeys[keyParts[0]] = true
359
360 return nil
361}
362
363// Get hands off to ResourceData.Get.
364func (d *ResourceDiff) Get(key string) interface{} {
365 r, _ := d.GetOk(key)
366 return r
367}
368
369// GetChange gets the change between the state and diff, checking first to see
370// if a overridden diff exists.
371//
372// This implementation differs from ResourceData's in the way that we first get
373// results from the exact levels for the new diff, then from state and diff as
374// per normal.
375func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) {
376 old, new, _ := d.getChange(key)
377 return old.Value, new.Value
378}
379
380// GetOk functions the same way as ResourceData.GetOk, but it also checks the
381// new diff levels to provide data consistent with the current state of the
382// customized diff.
383func (d *ResourceDiff) GetOk(key string) (interface{}, bool) {
384 r := d.get(strings.Split(key, "."), "newDiff")
385 exists := r.Exists && !r.Computed
386 if exists {
387 // If it exists, we also want to verify it is not the zero-value.
388 value := r.Value
389 zero := r.Schema.Type.Zero()
390
391 if eq, ok := value.(Equal); ok {
392 exists = !eq.Equal(zero)
393 } else {
394 exists = !reflect.DeepEqual(value, zero)
395 }
396 }
397
398 return r.Value, exists
399}
400
401// GetOkExists functions the same way as GetOkExists within ResourceData, but
402// it also checks the new diff levels to provide data consistent with the
403// current state of the customized diff.
404//
405// This is nearly the same function as GetOk, yet it does not check
406// for the zero value of the attribute's type. This allows for attributes
407// without a default, to fully check for a literal assignment, regardless
408// of the zero-value for that type.
409func (d *ResourceDiff) GetOkExists(key string) (interface{}, bool) {
410 r := d.get(strings.Split(key, "."), "newDiff")
411 exists := r.Exists && !r.Computed
412 return r.Value, exists
413}
414
415// NewValueKnown returns true if the new value for the given key is available
416// as its final value at diff time. If the return value is false, this means
417// either the value is based of interpolation that was unavailable at diff
418// time, or that the value was explicitly marked as computed by SetNewComputed.
419func (d *ResourceDiff) NewValueKnown(key string) bool {
420 r := d.get(strings.Split(key, "."), "newDiff")
421 return !r.Computed
422}
423
424// HasChange checks to see if there is a change between state and the diff, or
425// in the overridden diff.
426func (d *ResourceDiff) HasChange(key string) bool {
427 old, new := d.GetChange(key)
428
429 // If the type implements the Equal interface, then call that
430 // instead of just doing a reflect.DeepEqual. An example where this is
431 // needed is *Set
432 if eq, ok := old.(Equal); ok {
433 return !eq.Equal(new)
434 }
435
436 return !reflect.DeepEqual(old, new)
437}
438
439// Id returns the ID of this resource.
440//
441// Note that technically, ID does not change during diffs (it either has
442// already changed in the refresh, or will change on update), hence we do not
443// support updating the ID or fetching it from anything else other than state.
444func (d *ResourceDiff) Id() string {
445 var result string
446
447 if d.state != nil {
448 result = d.state.ID
449 }
450 return result
451}
452
453// getChange gets values from two different levels, designed for use in
454// diffChange, HasChange, and GetChange.
455//
456// This implementation differs from ResourceData's in the way that we first get
457// results from the exact levels for the new diff, then from state and diff as
458// per normal.
459func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) {
460 old := d.get(strings.Split(key, "."), "state")
461 var new getResult
462 for p := range d.updatedKeys {
463 if childAddrOf(key, p) {
464 new = d.getExact(strings.Split(key, "."), "newDiff")
465 return old, new, true
466 }
467 }
468 new = d.get(strings.Split(key, "."), "newDiff")
469 return old, new, false
470}
471
472// removed checks to see if the key is present in the existing, pre-customized
473// diff and if it was marked as NewRemoved.
474func (d *ResourceDiff) removed(k string) bool {
475 diff, ok := d.diff.Attributes[k]
476 if !ok {
477 return false
478 }
479 return diff.NewRemoved
480}
481
482// get performs the appropriate multi-level reader logic for ResourceDiff,
483// starting at source. Refer to newResourceDiff for the level order.
484func (d *ResourceDiff) get(addr []string, source string) getResult {
485 result, err := d.multiReader.ReadFieldMerge(addr, source)
486 if err != nil {
487 panic(err)
488 }
489
490 return d.finalizeResult(addr, result)
491}
492
493// getExact gets an attribute from the exact level referenced by source.
494func (d *ResourceDiff) getExact(addr []string, source string) getResult {
495 result, err := d.multiReader.ReadFieldExact(addr, source)
496 if err != nil {
497 panic(err)
498 }
499
500 return d.finalizeResult(addr, result)
501}
502
503// finalizeResult does some post-processing of the result produced by get and getExact.
504func (d *ResourceDiff) finalizeResult(addr []string, result FieldReadResult) getResult {
505 // If the result doesn't exist, then we set the value to the zero value
506 var schema *Schema
507 if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 {
508 schema = schemaL[len(schemaL)-1]
509 }
510
511 if result.Value == nil && schema != nil {
512 result.Value = result.ValueOrZero(schema)
513 }
514
515 // Transform the FieldReadResult into a getResult. It might be worth
516 // merging these two structures one day.
517 return getResult{
518 Value: result.Value,
519 ValueProcessed: result.ValueProcessed,
520 Computed: result.Computed,
521 Exists: result.Exists,
522 Schema: schema,
523 }
524}
525
526// childAddrOf does a comparison of two addresses to see if one is the child of
527// the other.
528func childAddrOf(child, parent string) bool {
529 cs := strings.Split(child, ".")
530 ps := strings.Split(parent, ".")
531 if len(ps) > len(cs) {
532 return false
533 }
534 return reflect.DeepEqual(ps, cs[:len(ps)])
535}
536
537// checkKey checks the key to make sure it exists and is computed.
538func (d *ResourceDiff) checkKey(key, caller string, nested bool) error {
539 var schema *Schema
540 if nested {
541 keyParts := strings.Split(key, ".")
542 schemaL := addrToSchema(keyParts, d.schema)
543 if len(schemaL) > 0 {
544 schema = schemaL[len(schemaL)-1]
545 }
546 } else {
547 s, ok := d.schema[key]
548 if ok {
549 schema = s
550 }
551 }
552 if schema == nil {
553 return fmt.Errorf("%s: invalid key: %s", caller, key)
554 }
555 if !schema.Computed {
556 return fmt.Errorf("%s only operates on computed keys - %s is not one", caller, key)
557 }
558 return nil
559}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
index acb5618..0ea5aad 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
@@ -21,9 +21,13 @@ import (
21 "strings" 21 "strings"
22 22
23 "github.com/hashicorp/terraform/terraform" 23 "github.com/hashicorp/terraform/terraform"
24 "github.com/mitchellh/copystructure"
24 "github.com/mitchellh/mapstructure" 25 "github.com/mitchellh/mapstructure"
25) 26)
26 27
28// Name of ENV variable which (if not empty) prefers panic over error
29const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR"
30
27// type used for schema package context keys 31// type used for schema package context keys
28type contextKey string 32type contextKey string
29 33
@@ -116,12 +120,16 @@ type Schema struct {
116 ForceNew bool 120 ForceNew bool
117 StateFunc SchemaStateFunc 121 StateFunc SchemaStateFunc
118 122
119 // The following fields are only set for a TypeList or TypeSet Type. 123 // The following fields are only set for a TypeList, TypeSet, or TypeMap.
120 // 124 //
121 // Elem must be either a *Schema or a *Resource only if the Type is 125 // Elem represents the element type. For a TypeMap, it must be a *Schema
122 // TypeList, and represents what the element type is. If it is *Schema, 126 // with a Type of TypeString, otherwise it may be either a *Schema or a
123 // the element type is just a simple value. If it is *Resource, the 127 // *Resource. If it is *Schema, the element type is just a simple value.
124 // element type is a complex structure, potentially with its own lifecycle. 128 // If it is *Resource, the element type is a complex structure,
129 // potentially with its own lifecycle.
130 Elem interface{}
131
132 // The following fields are only set for a TypeList or TypeSet.
125 // 133 //
126 // MaxItems defines a maximum amount of items that can exist within a 134 // MaxItems defines a maximum amount of items that can exist within a
127 // TypeSet or TypeList. Specific use cases would be if a TypeSet is being 135 // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
@@ -138,7 +146,6 @@ type Schema struct {
138 // ["foo"] automatically. This is primarily for legacy reasons and the 146 // ["foo"] automatically. This is primarily for legacy reasons and the
139 // ambiguity is not recommended for new usage. Promotion is only allowed 147 // ambiguity is not recommended for new usage. Promotion is only allowed
140 // for primitive element types. 148 // for primitive element types.
141 Elem interface{}
142 MaxItems int 149 MaxItems int
143 MinItems int 150 MinItems int
144 PromoteSingle bool 151 PromoteSingle bool
@@ -192,7 +199,7 @@ type Schema struct {
192 Sensitive bool 199 Sensitive bool
193} 200}
194 201
195// SchemaDiffSuppresFunc is a function which can be used to determine 202// SchemaDiffSuppressFunc is a function which can be used to determine
196// whether a detected diff on a schema element is "valid" or not, and 203// whether a detected diff on a schema element is "valid" or not, and
197// suppress it from the plan if necessary. 204// suppress it from the plan if necessary.
198// 205//
@@ -289,8 +296,7 @@ func (s *Schema) ZeroValue() interface{} {
289 } 296 }
290} 297}
291 298
292func (s *Schema) finalizeDiff( 299func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff {
293 d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff {
294 if d == nil { 300 if d == nil {
295 return d 301 return d
296 } 302 }
@@ -331,13 +337,20 @@ func (s *Schema) finalizeDiff(
331 } 337 }
332 338
333 if s.Computed { 339 if s.Computed {
334 if d.Old != "" && d.New == "" { 340 // FIXME: This is where the customized bool from getChange finally
335 // This is a computed value with an old value set already, 341 // comes into play. It allows the previously incorrect behavior
336 // just let it go. 342 // of an empty string being used as "unset" when the value is
337 return nil 343 // computed. This should be removed once we can properly
344 // represent an unset/nil value from the configuration.
345 if !customized {
346 if d.Old != "" && d.New == "" {
347 // This is a computed value with an old value set already,
348 // just let it go.
349 return nil
350 }
338 } 351 }
339 352
340 if d.New == "" { 353 if d.New == "" && !d.NewComputed {
341 // Computed attribute without a new value set 354 // Computed attribute without a new value set
342 d.NewComputed = true 355 d.NewComputed = true
343 } 356 }
@@ -354,6 +367,13 @@ func (s *Schema) finalizeDiff(
354// schemaMap is a wrapper that adds nice functions on top of schemas. 367// schemaMap is a wrapper that adds nice functions on top of schemas.
355type schemaMap map[string]*Schema 368type schemaMap map[string]*Schema
356 369
370func (m schemaMap) panicOnError() bool {
371 if os.Getenv(PanicOnErr) != "" {
372 return true
373 }
374 return false
375}
376
357// Data returns a ResourceData for the given schema, state, and diff. 377// Data returns a ResourceData for the given schema, state, and diff.
358// 378//
359// The diff is optional. 379// The diff is optional.
@@ -361,17 +381,30 @@ func (m schemaMap) Data(
361 s *terraform.InstanceState, 381 s *terraform.InstanceState,
362 d *terraform.InstanceDiff) (*ResourceData, error) { 382 d *terraform.InstanceDiff) (*ResourceData, error) {
363 return &ResourceData{ 383 return &ResourceData{
364 schema: m, 384 schema: m,
365 state: s, 385 state: s,
366 diff: d, 386 diff: d,
387 panicOnError: m.panicOnError(),
367 }, nil 388 }, nil
368} 389}
369 390
391// DeepCopy returns a copy of this schemaMap. The copy can be safely modified
392// without affecting the original.
393func (m *schemaMap) DeepCopy() schemaMap {
394 copy, err := copystructure.Config{Lock: true}.Copy(m)
395 if err != nil {
396 panic(err)
397 }
398 return *copy.(*schemaMap)
399}
400
370// Diff returns the diff for a resource given the schema map, 401// Diff returns the diff for a resource given the schema map,
371// state, and configuration. 402// state, and configuration.
372func (m schemaMap) Diff( 403func (m schemaMap) Diff(
373 s *terraform.InstanceState, 404 s *terraform.InstanceState,
374 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { 405 c *terraform.ResourceConfig,
406 customizeDiff CustomizeDiffFunc,
407 meta interface{}) (*terraform.InstanceDiff, error) {
375 result := new(terraform.InstanceDiff) 408 result := new(terraform.InstanceDiff)
376 result.Attributes = make(map[string]*terraform.ResourceAttrDiff) 409 result.Attributes = make(map[string]*terraform.ResourceAttrDiff)
377 410
@@ -381,9 +414,10 @@ func (m schemaMap) Diff(
381 } 414 }
382 415
383 d := &ResourceData{ 416 d := &ResourceData{
384 schema: m, 417 schema: m,
385 state: s, 418 state: s,
386 config: c, 419 config: c,
420 panicOnError: m.panicOnError(),
387 } 421 }
388 422
389 for k, schema := range m { 423 for k, schema := range m {
@@ -393,6 +427,29 @@ func (m schemaMap) Diff(
393 } 427 }
394 } 428 }
395 429
430 // Remove any nil diffs just to keep things clean
431 for k, v := range result.Attributes {
432 if v == nil {
433 delete(result.Attributes, k)
434 }
435 }
436
437 // If this is a non-destroy diff, call any custom diff logic that has been
438 // defined.
439 if !result.DestroyTainted && customizeDiff != nil {
440 mc := m.DeepCopy()
441 rd := newResourceDiff(mc, c, s, result)
442 if err := customizeDiff(rd, meta); err != nil {
443 return nil, err
444 }
445 for _, k := range rd.UpdatedKeys() {
446 err := m.diff(k, mc[k], result, rd, false)
447 if err != nil {
448 return nil, err
449 }
450 }
451 }
452
396 // If the diff requires a new resource, then we recompute the diff 453 // If the diff requires a new resource, then we recompute the diff
397 // so we have the complete new resource diff, and preserve the 454 // so we have the complete new resource diff, and preserve the
398 // RequiresNew fields where necessary so the user knows exactly what 455 // RequiresNew fields where necessary so the user knows exactly what
@@ -418,6 +475,21 @@ func (m schemaMap) Diff(
418 } 475 }
419 } 476 }
420 477
478 // Re-run customization
479 if !result2.DestroyTainted && customizeDiff != nil {
480 mc := m.DeepCopy()
481 rd := newResourceDiff(mc, c, d.state, result2)
482 if err := customizeDiff(rd, meta); err != nil {
483 return nil, err
484 }
485 for _, k := range rd.UpdatedKeys() {
486 err := m.diff(k, mc[k], result2, rd, false)
487 if err != nil {
488 return nil, err
489 }
490 }
491 }
492
421 // Force all the fields to not force a new since we know what we 493 // Force all the fields to not force a new since we know what we
422 // want to force new. 494 // want to force new.
423 for k, attr := range result2.Attributes { 495 for k, attr := range result2.Attributes {
@@ -456,13 +528,6 @@ func (m schemaMap) Diff(
456 result = result2 528 result = result2
457 } 529 }
458 530
459 // Remove any nil diffs just to keep things clean
460 for k, v := range result.Attributes {
461 if v == nil {
462 delete(result.Attributes, k)
463 }
464 }
465
466 // Go through and detect all of the ComputedWhens now that we've 531 // Go through and detect all of the ComputedWhens now that we've
467 // finished the diff. 532 // finished the diff.
468 // TODO 533 // TODO
@@ -681,11 +746,23 @@ func isValidFieldName(name string) bool {
681 return re.MatchString(name) 746 return re.MatchString(name)
682} 747}
683 748
749// resourceDiffer is an interface that is used by the private diff functions.
750// This helps facilitate diff logic for both ResourceData and ResoureDiff with
751// minimal divergence in code.
752type resourceDiffer interface {
753 diffChange(string) (interface{}, interface{}, bool, bool, bool)
754 Get(string) interface{}
755 GetChange(string) (interface{}, interface{})
756 GetOk(string) (interface{}, bool)
757 HasChange(string) bool
758 Id() string
759}
760
684func (m schemaMap) diff( 761func (m schemaMap) diff(
685 k string, 762 k string,
686 schema *Schema, 763 schema *Schema,
687 diff *terraform.InstanceDiff, 764 diff *terraform.InstanceDiff,
688 d *ResourceData, 765 d resourceDiffer,
689 all bool) error { 766 all bool) error {
690 767
691 unsupressedDiff := new(terraform.InstanceDiff) 768 unsupressedDiff := new(terraform.InstanceDiff)
@@ -706,12 +783,14 @@ func (m schemaMap) diff(
706 } 783 }
707 784
708 for attrK, attrV := range unsupressedDiff.Attributes { 785 for attrK, attrV := range unsupressedDiff.Attributes {
709 if schema.DiffSuppressFunc != nil && 786 switch rd := d.(type) {
710 attrV != nil && 787 case *ResourceData:
711 schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, d) { 788 if schema.DiffSuppressFunc != nil &&
712 continue 789 attrV != nil &&
790 schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) {
791 continue
792 }
713 } 793 }
714
715 diff.Attributes[attrK] = attrV 794 diff.Attributes[attrK] = attrV
716 } 795 }
717 796
@@ -722,9 +801,9 @@ func (m schemaMap) diffList(
722 k string, 801 k string,
723 schema *Schema, 802 schema *Schema,
724 diff *terraform.InstanceDiff, 803 diff *terraform.InstanceDiff,
725 d *ResourceData, 804 d resourceDiffer,
726 all bool) error { 805 all bool) error {
727 o, n, _, computedList := d.diffChange(k) 806 o, n, _, computedList, customized := d.diffChange(k)
728 if computedList { 807 if computedList {
729 n = nil 808 n = nil
730 } 809 }
@@ -791,10 +870,13 @@ func (m schemaMap) diffList(
791 oldStr = "" 870 oldStr = ""
792 } 871 }
793 872
794 diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ 873 diff.Attributes[k+".#"] = countSchema.finalizeDiff(
795 Old: oldStr, 874 &terraform.ResourceAttrDiff{
796 New: newStr, 875 Old: oldStr,
797 }) 876 New: newStr,
877 },
878 customized,
879 )
798 } 880 }
799 881
800 // Figure out the maximum 882 // Figure out the maximum
@@ -841,13 +923,13 @@ func (m schemaMap) diffMap(
841 k string, 923 k string,
842 schema *Schema, 924 schema *Schema,
843 diff *terraform.InstanceDiff, 925 diff *terraform.InstanceDiff,
844 d *ResourceData, 926 d resourceDiffer,
845 all bool) error { 927 all bool) error {
846 prefix := k + "." 928 prefix := k + "."
847 929
848 // First get all the values from the state 930 // First get all the values from the state
849 var stateMap, configMap map[string]string 931 var stateMap, configMap map[string]string
850 o, n, _, nComputed := d.diffChange(k) 932 o, n, _, nComputed, customized := d.diffChange(k)
851 if err := mapstructure.WeakDecode(o, &stateMap); err != nil { 933 if err := mapstructure.WeakDecode(o, &stateMap); err != nil {
852 return fmt.Errorf("%s: %s", k, err) 934 return fmt.Errorf("%s: %s", k, err)
853 } 935 }
@@ -899,6 +981,7 @@ func (m schemaMap) diffMap(
899 Old: oldStr, 981 Old: oldStr,
900 New: newStr, 982 New: newStr,
901 }, 983 },
984 customized,
902 ) 985 )
903 } 986 }
904 987
@@ -916,16 +999,22 @@ func (m schemaMap) diffMap(
916 continue 999 continue
917 } 1000 }
918 1001
919 diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ 1002 diff.Attributes[prefix+k] = schema.finalizeDiff(
920 Old: old, 1003 &terraform.ResourceAttrDiff{
921 New: v, 1004 Old: old,
922 }) 1005 New: v,
1006 },
1007 customized,
1008 )
923 } 1009 }
924 for k, v := range stateMap { 1010 for k, v := range stateMap {
925 diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ 1011 diff.Attributes[prefix+k] = schema.finalizeDiff(
926 Old: v, 1012 &terraform.ResourceAttrDiff{
927 NewRemoved: true, 1013 Old: v,
928 }) 1014 NewRemoved: true,
1015 },
1016 customized,
1017 )
929 } 1018 }
930 1019
931 return nil 1020 return nil
@@ -935,10 +1024,10 @@ func (m schemaMap) diffSet(
935 k string, 1024 k string,
936 schema *Schema, 1025 schema *Schema,
937 diff *terraform.InstanceDiff, 1026 diff *terraform.InstanceDiff,
938 d *ResourceData, 1027 d resourceDiffer,
939 all bool) error { 1028 all bool) error {
940 1029
941 o, n, _, computedSet := d.diffChange(k) 1030 o, n, _, computedSet, customized := d.diffChange(k)
942 if computedSet { 1031 if computedSet {
943 n = nil 1032 n = nil
944 } 1033 }
@@ -997,20 +1086,26 @@ func (m schemaMap) diffSet(
997 countStr = "" 1086 countStr = ""
998 } 1087 }
999 1088
1000 diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ 1089 diff.Attributes[k+".#"] = countSchema.finalizeDiff(
1001 Old: countStr, 1090 &terraform.ResourceAttrDiff{
1002 NewComputed: true, 1091 Old: countStr,
1003 }) 1092 NewComputed: true,
1093 },
1094 customized,
1095 )
1004 return nil 1096 return nil
1005 } 1097 }
1006 1098
1007 // If the counts are not the same, then record that diff 1099 // If the counts are not the same, then record that diff
1008 changed := oldLen != newLen 1100 changed := oldLen != newLen
1009 if changed || all { 1101 if changed || all {
1010 diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ 1102 diff.Attributes[k+".#"] = countSchema.finalizeDiff(
1011 Old: oldStr, 1103 &terraform.ResourceAttrDiff{
1012 New: newStr, 1104 Old: oldStr,
1013 }) 1105 New: newStr,
1106 },
1107 customized,
1108 )
1014 } 1109 }
1015 1110
1016 // Build the list of codes that will make up our set. This is the 1111 // Build the list of codes that will make up our set. This is the
@@ -1056,11 +1151,11 @@ func (m schemaMap) diffString(
1056 k string, 1151 k string,
1057 schema *Schema, 1152 schema *Schema,
1058 diff *terraform.InstanceDiff, 1153 diff *terraform.InstanceDiff,
1059 d *ResourceData, 1154 d resourceDiffer,
1060 all bool) error { 1155 all bool) error {
1061 var originalN interface{} 1156 var originalN interface{}
1062 var os, ns string 1157 var os, ns string
1063 o, n, _, computed := d.diffChange(k) 1158 o, n, _, computed, customized := d.diffChange(k)
1064 if schema.StateFunc != nil && n != nil { 1159 if schema.StateFunc != nil && n != nil {
1065 originalN = n 1160 originalN = n
1066 n = schema.StateFunc(n) 1161 n = schema.StateFunc(n)
@@ -1090,20 +1185,23 @@ func (m schemaMap) diffString(
1090 } 1185 }
1091 1186
1092 removed := false 1187 removed := false
1093 if o != nil && n == nil { 1188 if o != nil && n == nil && !computed {
1094 removed = true 1189 removed = true
1095 } 1190 }
1096 if removed && schema.Computed { 1191 if removed && schema.Computed {
1097 return nil 1192 return nil
1098 } 1193 }
1099 1194
1100 diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ 1195 diff.Attributes[k] = schema.finalizeDiff(
1101 Old: os, 1196 &terraform.ResourceAttrDiff{
1102 New: ns, 1197 Old: os,
1103 NewExtra: originalN, 1198 New: ns,
1104 NewRemoved: removed, 1199 NewExtra: originalN,
1105 NewComputed: computed, 1200 NewRemoved: removed,
1106 }) 1201 NewComputed: computed,
1202 },
1203 customized,
1204 )
1107 1205
1108 return nil 1206 return nil
1109} 1207}
@@ -1172,9 +1270,9 @@ func (m schemaMap) validateConflictingAttributes(
1172 } 1270 }
1173 1271
1174 for _, conflicting_key := range schema.ConflictsWith { 1272 for _, conflicting_key := range schema.ConflictsWith {
1175 if value, ok := c.Get(conflicting_key); ok { 1273 if _, ok := c.Get(conflicting_key); ok {
1176 return fmt.Errorf( 1274 return fmt.Errorf(
1177 "%q: conflicts with %s (%#v)", k, conflicting_key, value) 1275 "%q: conflicts with %s", k, conflicting_key)
1178 } 1276 }
1179 } 1277 }
1180 1278
@@ -1363,13 +1461,10 @@ func getValueType(k string, schema *Schema) (ValueType, error) {
1363 return vt, nil 1461 return vt, nil
1364 } 1462 }
1365 1463
1464 // If a Schema is provided to a Map, we use the Type of that schema
1465 // as the type for each element in the Map.
1366 if s, ok := schema.Elem.(*Schema); ok { 1466 if s, ok := schema.Elem.(*Schema); ok {
1367 if s.Elem == nil { 1467 return s.Type, nil
1368 return TypeString, nil
1369 }
1370 if vt, ok := s.Elem.(ValueType); ok {
1371 return vt, nil
1372 }
1373 } 1468 }
1374 1469
1375 if _, ok := schema.Elem.(*Resource); ok { 1470 if _, ok := schema.Elem.(*Resource); ok {
@@ -1430,7 +1525,6 @@ func (m schemaMap) validatePrimitive(
1430 raw interface{}, 1525 raw interface{},
1431 schema *Schema, 1526 schema *Schema,
1432 c *terraform.ResourceConfig) ([]string, []error) { 1527 c *terraform.ResourceConfig) ([]string, []error) {
1433
1434 // Catch if the user gave a complex type where a primitive was 1528 // Catch if the user gave a complex type where a primitive was
1435 // expected, so we can return a friendly error message that 1529 // expected, so we can return a friendly error message that
1436 // doesn't contain Go type system terminology. 1530 // doesn't contain Go type system terminology.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
index de05f40..cba2890 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/set.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
@@ -17,6 +17,12 @@ func HashString(v interface{}) int {
17 return hashcode.String(v.(string)) 17 return hashcode.String(v.(string))
18} 18}
19 19
20// HashInt hashes integers. If you want a Set of integers, this is the
21// SchemaSetFunc you want.
22func HashInt(v interface{}) int {
23 return hashcode.String(strconv.Itoa(v.(int)))
24}
25
20// HashResource hashes complex structures that are described using 26// HashResource hashes complex structures that are described using
21// a *Resource. This is the default set implementation used when a set's 27// a *Resource. This is the default set implementation used when a set's
22// element type is a full resource. 28// element type is a full resource.
@@ -153,6 +159,31 @@ func (s *Set) Equal(raw interface{}) bool {
153 return reflect.DeepEqual(s.m, other.m) 159 return reflect.DeepEqual(s.m, other.m)
154} 160}
155 161
162// HashEqual simply checks to the keys the top-level map to the keys in the
163// other set's top-level map to see if they are equal. This obviously assumes
164// you have a properly working hash function - use HashResource if in doubt.
165func (s *Set) HashEqual(raw interface{}) bool {
166 other, ok := raw.(*Set)
167 if !ok {
168 return false
169 }
170
171 ks1 := make([]string, 0)
172 ks2 := make([]string, 0)
173
174 for k := range s.m {
175 ks1 = append(ks1, k)
176 }
177 for k := range other.m {
178 ks2 = append(ks2, k)
179 }
180
181 sort.Strings(ks1)
182 sort.Strings(ks2)
183
184 return reflect.DeepEqual(ks1, ks2)
185}
186
156func (s *Set) GoString() string { 187func (s *Set) GoString() string {
157 return fmt.Sprintf("*Set(%#v)", s.m) 188 return fmt.Sprintf("*Set(%#v)", s.m)
158} 189}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
index 9765bdb..da754ac 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
@@ -10,13 +10,15 @@ import (
10// TestResourceDataRaw creates a ResourceData from a raw configuration map. 10// TestResourceDataRaw creates a ResourceData from a raw configuration map.
11func TestResourceDataRaw( 11func TestResourceDataRaw(
12 t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { 12 t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData {
13 t.Helper()
14
13 c, err := config.NewRawConfig(raw) 15 c, err := config.NewRawConfig(raw)
14 if err != nil { 16 if err != nil {
15 t.Fatalf("err: %s", err) 17 t.Fatalf("err: %s", err)
16 } 18 }
17 19
18 sm := schemaMap(schema) 20 sm := schemaMap(schema)
19 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c)) 21 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil)
20 if err != nil { 22 if err != nil {
21 t.Fatalf("err: %s", err) 23 t.Fatalf("err: %s", err)
22 } 24 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
index 1610cec..3bc3ac4 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
@@ -2,7 +2,7 @@
2 2
3package schema 3package schema
4 4
5import "fmt" 5import "strconv"
6 6
7const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" 7const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
8 8
@@ -10,7 +10,7 @@ var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}
10 10
11func (i ValueType) String() string { 11func (i ValueType) String() string {
12 if i < 0 || i >= ValueType(len(_ValueType_index)-1) { 12 if i < 0 || i >= ValueType(len(_ValueType_index)-1) {
13 return fmt.Sprintf("ValueType(%d)", i) 13 return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")"
14 } 14 }
15 return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] 15 return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]]
16} 16}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
deleted file mode 100644
index edc1e2a..0000000
--- a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
+++ /dev/null
@@ -1,83 +0,0 @@
1package shadow
2
3import (
4 "fmt"
5 "io"
6 "reflect"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/mitchellh/reflectwalk"
10)
11
12// Close will close all shadow values within the given structure.
13//
14// This uses reflection to walk the structure, find all shadow elements,
15// and close them. Currently this will only find struct fields that are
16// shadow values, and not slice elements, etc.
17func Close(v interface{}) error {
18 // We require a pointer so we can address the internal fields
19 val := reflect.ValueOf(v)
20 if val.Kind() != reflect.Ptr {
21 return fmt.Errorf("value must be a pointer")
22 }
23
24 // Walk and close
25 var w closeWalker
26 if err := reflectwalk.Walk(v, &w); err != nil {
27 return err
28 }
29
30 return w.Err
31}
32
33type closeWalker struct {
34 Err error
35}
36
37func (w *closeWalker) Struct(reflect.Value) error {
38 // Do nothing. We implement this for reflectwalk.StructWalker
39 return nil
40}
41
42var closerType = reflect.TypeOf((*io.Closer)(nil)).Elem()
43
44func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error {
45 // Not sure why this would be but lets avoid some panics
46 if !v.IsValid() {
47 return nil
48 }
49
50 // Empty for exported, so don't check unexported fields
51 if f.PkgPath != "" {
52 return nil
53 }
54
55 // Verify the io.Closer is in this package
56 typ := v.Type()
57 if typ.PkgPath() != "github.com/hashicorp/terraform/helper/shadow" {
58 return nil
59 }
60
61 var closer io.Closer
62 if v.Type().Implements(closerType) {
63 closer = v.Interface().(io.Closer)
64 } else if v.CanAddr() {
65 // The Close method may require a pointer receiver, but we only have a value.
66 v := v.Addr()
67 if v.Type().Implements(closerType) {
68 closer = v.Interface().(io.Closer)
69 }
70 }
71
72 if closer == nil {
73 return reflectwalk.SkipEntry
74 }
75
76 // Close it
77 if err := closer.Close(); err != nil {
78 w.Err = multierror.Append(w.Err, err)
79 }
80
81 // Don't go into the struct field
82 return reflectwalk.SkipEntry
83}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
deleted file mode 100644
index 4223e92..0000000
--- a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
+++ /dev/null
@@ -1,128 +0,0 @@
1package shadow
2
3import (
4 "sync"
5)
6
7// ComparedValue is a struct that finds a value by comparing some key
8// to the list of stored values. This is useful when there is no easy
9// uniquely identifying key that works in a map (for that, use KeyedValue).
10//
11// ComparedValue is very expensive, relative to other Value types. Try to
12// limit the number of values stored in a ComparedValue by potentially
13// nesting it within a KeyedValue (a keyed value points to a compared value,
14// for example).
15type ComparedValue struct {
16 // Func is a function that is given the lookup key and a single
17 // stored value. If it matches, it returns true.
18 Func func(k, v interface{}) bool
19
20 lock sync.Mutex
21 once sync.Once
22 closed bool
23 values []interface{}
24 waiters map[interface{}]*Value
25}
26
27// Close closes the value. This can never fail. For a definition of
28// "close" see the ErrClosed docs.
29func (w *ComparedValue) Close() error {
30 w.lock.Lock()
31 defer w.lock.Unlock()
32
33 // Set closed to true always
34 w.closed = true
35
36 // For all waiters, complete with ErrClosed
37 for k, val := range w.waiters {
38 val.SetValue(ErrClosed)
39 delete(w.waiters, k)
40 }
41
42 return nil
43}
44
45// Value returns the value that was set for the given key, or blocks
46// until one is available.
47func (w *ComparedValue) Value(k interface{}) interface{} {
48 v, val := w.valueWaiter(k)
49 if val == nil {
50 return v
51 }
52
53 return val.Value()
54}
55
56// ValueOk gets the value for the given key, returning immediately if the
57// value doesn't exist. The second return argument is true if the value exists.
58func (w *ComparedValue) ValueOk(k interface{}) (interface{}, bool) {
59 v, val := w.valueWaiter(k)
60 return v, val == nil
61}
62
63func (w *ComparedValue) SetValue(v interface{}) {
64 w.lock.Lock()
65 defer w.lock.Unlock()
66 w.once.Do(w.init)
67
68 // Check if we already have this exact value (by simply comparing
69 // with == directly). If we do, then we don't insert it again.
70 found := false
71 for _, v2 := range w.values {
72 if v == v2 {
73 found = true
74 break
75 }
76 }
77
78 if !found {
79 // Set the value, always
80 w.values = append(w.values, v)
81 }
82
83 // Go through the waiters
84 for k, val := range w.waiters {
85 if w.Func(k, v) {
86 val.SetValue(v)
87 delete(w.waiters, k)
88 }
89 }
90}
91
92func (w *ComparedValue) valueWaiter(k interface{}) (interface{}, *Value) {
93 w.lock.Lock()
94 w.once.Do(w.init)
95
96 // Look for a pre-existing value
97 for _, v := range w.values {
98 if w.Func(k, v) {
99 w.lock.Unlock()
100 return v, nil
101 }
102 }
103
104 // If we're closed, return that
105 if w.closed {
106 w.lock.Unlock()
107 return ErrClosed, nil
108 }
109
110 // Pre-existing value doesn't exist, create a waiter
111 val := w.waiters[k]
112 if val == nil {
113 val = new(Value)
114 w.waiters[k] = val
115 }
116 w.lock.Unlock()
117
118 // Return the waiter
119 return nil, val
120}
121
122// Must be called with w.lock held.
123func (w *ComparedValue) init() {
124 w.waiters = make(map[interface{}]*Value)
125 if w.Func == nil {
126 w.Func = func(k, v interface{}) bool { return k == v }
127 }
128}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
deleted file mode 100644
index 432b036..0000000
--- a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
+++ /dev/null
@@ -1,151 +0,0 @@
1package shadow
2
3import (
4 "sync"
5)
6
7// KeyedValue is a struct that coordinates a value by key. If a value is
8// not available for a give key, it'll block until it is available.
9type KeyedValue struct {
10 lock sync.Mutex
11 once sync.Once
12 values map[string]interface{}
13 waiters map[string]*Value
14 closed bool
15}
16
17// Close closes the value. This can never fail. For a definition of
18// "close" see the ErrClosed docs.
19func (w *KeyedValue) Close() error {
20 w.lock.Lock()
21 defer w.lock.Unlock()
22
23 // Set closed to true always
24 w.closed = true
25
26 // For all waiters, complete with ErrClosed
27 for k, val := range w.waiters {
28 val.SetValue(ErrClosed)
29 delete(w.waiters, k)
30 }
31
32 return nil
33}
34
35// Value returns the value that was set for the given key, or blocks
36// until one is available.
37func (w *KeyedValue) Value(k string) interface{} {
38 w.lock.Lock()
39 v, val := w.valueWaiter(k)
40 w.lock.Unlock()
41
42 // If we have no waiter, then return the value
43 if val == nil {
44 return v
45 }
46
47 // We have a waiter, so wait
48 return val.Value()
49}
50
51// WaitForChange waits for the value with the given key to be set again.
52// If the key isn't set, it'll wait for an initial value. Note that while
53// it is called "WaitForChange", the value isn't guaranteed to _change_;
54// this will return when a SetValue is called for the given k.
55func (w *KeyedValue) WaitForChange(k string) interface{} {
56 w.lock.Lock()
57 w.once.Do(w.init)
58
59 // If we're closed, we're closed
60 if w.closed {
61 w.lock.Unlock()
62 return ErrClosed
63 }
64
65 // Check for an active waiter. If there isn't one, make it
66 val := w.waiters[k]
67 if val == nil {
68 val = new(Value)
69 w.waiters[k] = val
70 }
71 w.lock.Unlock()
72
73 // And wait
74 return val.Value()
75}
76
77// ValueOk gets the value for the given key, returning immediately if the
78// value doesn't exist. The second return argument is true if the value exists.
79func (w *KeyedValue) ValueOk(k string) (interface{}, bool) {
80 w.lock.Lock()
81 defer w.lock.Unlock()
82
83 v, val := w.valueWaiter(k)
84 return v, val == nil
85}
86
87func (w *KeyedValue) SetValue(k string, v interface{}) {
88 w.lock.Lock()
89 defer w.lock.Unlock()
90 w.setValue(k, v)
91}
92
93// Init will initialize the key to a given value only if the key has
94// not been set before. This is safe to call multiple times and in parallel.
95func (w *KeyedValue) Init(k string, v interface{}) {
96 w.lock.Lock()
97 defer w.lock.Unlock()
98
99 // If we have a waiter, set the value.
100 _, val := w.valueWaiter(k)
101 if val != nil {
102 w.setValue(k, v)
103 }
104}
105
106// Must be called with w.lock held.
107func (w *KeyedValue) init() {
108 w.values = make(map[string]interface{})
109 w.waiters = make(map[string]*Value)
110}
111
112// setValue is like SetValue but assumes the lock is held.
113func (w *KeyedValue) setValue(k string, v interface{}) {
114 w.once.Do(w.init)
115
116 // Set the value, always
117 w.values[k] = v
118
119 // If we have a waiter, set it
120 if val, ok := w.waiters[k]; ok {
121 val.SetValue(v)
122 delete(w.waiters, k)
123 }
124}
125
126// valueWaiter gets the value or the Value waiter for a given key.
127//
128// This must be called with lock held.
129func (w *KeyedValue) valueWaiter(k string) (interface{}, *Value) {
130 w.once.Do(w.init)
131
132 // If we have this value already, return it
133 if v, ok := w.values[k]; ok {
134 return v, nil
135 }
136
137 // If we're closed, return that
138 if w.closed {
139 return ErrClosed, nil
140 }
141
142 // No pending value, check for a waiter
143 val := w.waiters[k]
144 if val == nil {
145 val = new(Value)
146 w.waiters[k] = val
147 }
148
149 // Return the waiter
150 return nil, val
151}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
deleted file mode 100644
index 0a43d4d..0000000
--- a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
+++ /dev/null
@@ -1,66 +0,0 @@
1package shadow
2
3import (
4 "container/list"
5 "sync"
6)
7
8// OrderedValue is a struct that keeps track of a value in the order
9// it is set. Each time Value() is called, it will return the most recent
10// calls value then discard it.
11//
12// This is unlike Value that returns the same value once it is set.
13type OrderedValue struct {
14 lock sync.Mutex
15 values *list.List
16 waiters *list.List
17}
18
19// Value returns the last value that was set, or blocks until one
20// is received.
21func (w *OrderedValue) Value() interface{} {
22 w.lock.Lock()
23
24 // If we have a pending value already, use it
25 if w.values != nil && w.values.Len() > 0 {
26 front := w.values.Front()
27 w.values.Remove(front)
28 w.lock.Unlock()
29 return front.Value
30 }
31
32 // No pending value, create a waiter
33 if w.waiters == nil {
34 w.waiters = list.New()
35 }
36
37 var val Value
38 w.waiters.PushBack(&val)
39 w.lock.Unlock()
40
41 // Return the value once we have it
42 return val.Value()
43}
44
45// SetValue sets the latest value.
46func (w *OrderedValue) SetValue(v interface{}) {
47 w.lock.Lock()
48 defer w.lock.Unlock()
49
50 // If we have a waiter, notify it
51 if w.waiters != nil && w.waiters.Len() > 0 {
52 front := w.waiters.Front()
53 w.waiters.Remove(front)
54
55 val := front.Value.(*Value)
56 val.SetValue(v)
57 return
58 }
59
60 // Add it to the list of values
61 if w.values == nil {
62 w.values = list.New()
63 }
64
65 w.values.PushBack(v)
66}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
deleted file mode 100644
index 178b7e7..0000000
--- a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
+++ /dev/null
@@ -1,87 +0,0 @@
1package shadow
2
3import (
4 "errors"
5 "sync"
6)
7
8// ErrClosed is returned by any closed values.
9//
10// A "closed value" is when the shadow has been notified that the real
11// side is complete and any blocking values will _never_ be satisfied
12// in the future. In this case, this error is returned. If a value is already
13// available, that is still returned.
14var ErrClosed = errors.New("shadow closed")
15
16// Value is a struct that coordinates a value between two
17// parallel routines. It is similar to atomic.Value except that when
18// Value is called if it isn't set it will wait for it.
19//
20// The Value can be closed with Close, which will cause any future
21// blocking operations to return immediately with ErrClosed.
22type Value struct {
23 lock sync.Mutex
24 cond *sync.Cond
25 value interface{}
26 valueSet bool
27}
28
29func (v *Value) Lock() {
30 v.lock.Lock()
31}
32
33func (v *Value) Unlock() {
34 v.lock.Unlock()
35}
36
37// Close closes the value. This can never fail. For a definition of
38// "close" see the struct docs.
39func (w *Value) Close() error {
40 w.lock.Lock()
41 set := w.valueSet
42 w.lock.Unlock()
43
44 // If we haven't set the value, set it
45 if !set {
46 w.SetValue(ErrClosed)
47 }
48
49 // Done
50 return nil
51}
52
53// Value returns the value that was set.
54func (w *Value) Value() interface{} {
55 w.lock.Lock()
56 defer w.lock.Unlock()
57
58 // If we already have a value just return
59 for !w.valueSet {
60 // No value, setup the condition variable if we have to
61 if w.cond == nil {
62 w.cond = sync.NewCond(&w.lock)
63 }
64
65 // Wait on it
66 w.cond.Wait()
67 }
68
69 // Return the value
70 return w.value
71}
72
73// SetValue sets the value.
74func (w *Value) SetValue(v interface{}) {
75 w.lock.Lock()
76 defer w.lock.Unlock()
77
78 // Set the value
79 w.valueSet = true
80 w.value = v
81
82 // If we have a condition, clear it
83 if w.cond != nil {
84 w.cond.Broadcast()
85 w.cond = nil
86 }
87}
diff --git a/vendor/github.com/hashicorp/terraform/httpclient/client.go b/vendor/github.com/hashicorp/terraform/httpclient/client.go
new file mode 100644
index 0000000..bb06beb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/httpclient/client.go
@@ -0,0 +1,18 @@
1package httpclient
2
3import (
4 "net/http"
5
6 cleanhttp "github.com/hashicorp/go-cleanhttp"
7)
8
9// New returns the DefaultPooledClient from the cleanhttp
10// package that will also send a Terraform User-Agent string.
11func New() *http.Client {
12 cli := cleanhttp.DefaultPooledClient()
13 cli.Transport = &userAgentRoundTripper{
14 userAgent: UserAgentString(),
15 inner: cli.Transport,
16 }
17 return cli
18}
diff --git a/vendor/github.com/hashicorp/terraform/httpclient/useragent.go b/vendor/github.com/hashicorp/terraform/httpclient/useragent.go
new file mode 100644
index 0000000..5e28017
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/httpclient/useragent.go
@@ -0,0 +1,40 @@
1package httpclient
2
3import (
4 "fmt"
5 "log"
6 "net/http"
7 "os"
8 "strings"
9
10 "github.com/hashicorp/terraform/version"
11)
12
13const userAgentFormat = "Terraform/%s"
14const uaEnvVar = "TF_APPEND_USER_AGENT"
15
16func UserAgentString() string {
17 ua := fmt.Sprintf(userAgentFormat, version.Version)
18
19 if add := os.Getenv(uaEnvVar); add != "" {
20 add = strings.TrimSpace(add)
21 if len(add) > 0 {
22 ua += " " + add
23 log.Printf("[DEBUG] Using modified User-Agent: %s", ua)
24 }
25 }
26
27 return ua
28}
29
30type userAgentRoundTripper struct {
31 inner http.RoundTripper
32 userAgent string
33}
34
35func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
36 if _, ok := req.Header["User-Agent"]; !ok {
37 req.Header.Set("User-Agent", rt.userAgent)
38 }
39 return rt.inner.RoundTrip(req)
40}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/client.go b/vendor/github.com/hashicorp/terraform/plugin/client.go
index 3a5cb7a..7e2f4fe 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/client.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/client.go
@@ -1,8 +1,10 @@
1package plugin 1package plugin
2 2
3import ( 3import (
4 "os"
4 "os/exec" 5 "os/exec"
5 6
7 hclog "github.com/hashicorp/go-hclog"
6 plugin "github.com/hashicorp/go-plugin" 8 plugin "github.com/hashicorp/go-plugin"
7 "github.com/hashicorp/terraform/plugin/discovery" 9 "github.com/hashicorp/terraform/plugin/discovery"
8) 10)
@@ -10,11 +12,18 @@ import (
10// ClientConfig returns a configuration object that can be used to instantiate 12// ClientConfig returns a configuration object that can be used to instantiate
11// a client for the plugin described by the given metadata. 13// a client for the plugin described by the given metadata.
12func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig { 14func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig {
15 logger := hclog.New(&hclog.LoggerOptions{
16 Name: "plugin",
17 Level: hclog.Trace,
18 Output: os.Stderr,
19 })
20
13 return &plugin.ClientConfig{ 21 return &plugin.ClientConfig{
14 Cmd: exec.Command(m.Path), 22 Cmd: exec.Command(m.Path),
15 HandshakeConfig: Handshake, 23 HandshakeConfig: Handshake,
16 Managed: true, 24 Managed: true,
17 Plugins: PluginMap, 25 Plugins: PluginMap,
26 Logger: logger,
18 } 27 }
19} 28}
20 29
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go
index f5bc4c1..f053312 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go
@@ -3,6 +3,7 @@ package discovery
3import ( 3import (
4 "io/ioutil" 4 "io/ioutil"
5 "log" 5 "log"
6 "os"
6 "path/filepath" 7 "path/filepath"
7 "strings" 8 "strings"
8) 9)
@@ -59,7 +60,6 @@ func findPluginPaths(kind string, dirs []string) []string {
59 fullName := item.Name() 60 fullName := item.Name()
60 61
61 if !strings.HasPrefix(fullName, prefix) { 62 if !strings.HasPrefix(fullName, prefix) {
62 log.Printf("[DEBUG] skipping %q, not a %s", fullName, kind)
63 continue 63 continue
64 } 64 }
65 65
@@ -71,6 +71,12 @@ func findPluginPaths(kind string, dirs []string) []string {
71 continue 71 continue
72 } 72 }
73 73
74 // Check that the file we found is usable
75 if !pathIsFile(absPath) {
76 log.Printf("[ERROR] ignoring non-file %s", absPath)
77 continue
78 }
79
74 log.Printf("[DEBUG] found %s %q", kind, fullName) 80 log.Printf("[DEBUG] found %s %q", kind, fullName)
75 ret = append(ret, filepath.Clean(absPath)) 81 ret = append(ret, filepath.Clean(absPath))
76 continue 82 continue
@@ -83,7 +89,13 @@ func findPluginPaths(kind string, dirs []string) []string {
83 continue 89 continue
84 } 90 }
85 91
86 log.Printf("[WARNING] found legacy %s %q", kind, fullName) 92 // Check that the file we found is usable
93 if !pathIsFile(absPath) {
94 log.Printf("[ERROR] ignoring non-file %s", absPath)
95 continue
96 }
97
98 log.Printf("[WARN] found legacy %s %q", kind, fullName)
87 99
88 ret = append(ret, filepath.Clean(absPath)) 100 ret = append(ret, filepath.Clean(absPath))
89 } 101 }
@@ -92,6 +104,17 @@ func findPluginPaths(kind string, dirs []string) []string {
92 return ret 104 return ret
93} 105}
94 106
107// Returns true if and only if the given path refers to a file or a symlink
108// to a file.
109func pathIsFile(path string) bool {
110 info, err := os.Stat(path)
111 if err != nil {
112 return false
113 }
114
115 return !info.IsDir()
116}
117
95// ResolvePluginPaths takes a list of paths to plugin executables (as returned 118// ResolvePluginPaths takes a list of paths to plugin executables (as returned
96// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the 119// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the
97// referenced plugins. 120// referenced plugins.
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
index 241b5cb..815640f 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
@@ -3,19 +3,22 @@ package discovery
3import ( 3import (
4 "errors" 4 "errors"
5 "fmt" 5 "fmt"
6 "io"
6 "io/ioutil" 7 "io/ioutil"
7 "log" 8 "log"
8 "net/http" 9 "net/http"
9 "os" 10 "os"
11 "path/filepath"
10 "runtime" 12 "runtime"
11 "strconv" 13 "strconv"
12 "strings" 14 "strings"
13 15
14 "golang.org/x/net/html" 16 "golang.org/x/net/html"
15 17
16 cleanhttp "github.com/hashicorp/go-cleanhttp"
17 getter "github.com/hashicorp/go-getter" 18 getter "github.com/hashicorp/go-getter"
18 multierror "github.com/hashicorp/go-multierror" 19 multierror "github.com/hashicorp/go-multierror"
20 "github.com/hashicorp/terraform/httpclient"
21 "github.com/mitchellh/cli"
19) 22)
20 23
21// Releases are located by parsing the html listing from releases.hashicorp.com. 24// Releases are located by parsing the html listing from releases.hashicorp.com.
@@ -30,7 +33,19 @@ const protocolVersionHeader = "x-terraform-protocol-version"
30 33
31var releaseHost = "https://releases.hashicorp.com" 34var releaseHost = "https://releases.hashicorp.com"
32 35
33var httpClient = cleanhttp.DefaultClient() 36var httpClient *http.Client
37
38func init() {
39 httpClient = httpclient.New()
40
41 httpGetter := &getter.HttpGetter{
42 Client: httpClient,
43 Netrc: true,
44 }
45
46 getter.Getters["http"] = httpGetter
47 getter.Getters["https"] = httpGetter
48}
34 49
35// An Installer maintains a local cache of plugins by downloading plugins 50// An Installer maintains a local cache of plugins by downloading plugins
36// from an online repository. 51// from an online repository.
@@ -47,6 +62,10 @@ type Installer interface {
47type ProviderInstaller struct { 62type ProviderInstaller struct {
48 Dir string 63 Dir string
49 64
65 // Cache is used to access and update a local cache of plugins if non-nil.
66 // Can be nil to disable caching.
67 Cache PluginCache
68
50 PluginProtocolVersion uint 69 PluginProtocolVersion uint
51 70
52 // OS and Arch specify the OS and architecture that should be used when 71 // OS and Arch specify the OS and architecture that should be used when
@@ -58,6 +77,8 @@ type ProviderInstaller struct {
58 77
59 // Skip checksum and signature verification 78 // Skip checksum and signature verification
60 SkipVerify bool 79 SkipVerify bool
80
81 Ui cli.Ui // Ui for output
61} 82}
62 83
63// Get is part of an implementation of type Installer, and attempts to download 84// Get is part of an implementation of type Installer, and attempts to download
@@ -98,6 +119,12 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, e
98 // sort them newest to oldest 119 // sort them newest to oldest
99 Versions(versions).Sort() 120 Versions(versions).Sort()
100 121
122 // Ensure that our installation directory exists
123 err = os.MkdirAll(i.Dir, os.ModePerm)
124 if err != nil {
125 return PluginMeta{}, fmt.Errorf("failed to create plugin dir %s: %s", i.Dir, err)
126 }
127
101 // take the first matching plugin we find 128 // take the first matching plugin we find
102 for _, v := range versions { 129 for _, v := range versions {
103 url := i.providerURL(provider, v.String()) 130 url := i.providerURL(provider, v.String())
@@ -116,8 +143,9 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, e
116 143
117 log.Printf("[DEBUG] fetching provider info for %s version %s", provider, v) 144 log.Printf("[DEBUG] fetching provider info for %s version %s", provider, v)
118 if checkPlugin(url, i.PluginProtocolVersion) { 145 if checkPlugin(url, i.PluginProtocolVersion) {
119 log.Printf("[DEBUG] getting provider %q version %q at %s", provider, v, url) 146 i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %q (%s)...", provider, v.String()))
120 err := getter.Get(i.Dir, url) 147 log.Printf("[DEBUG] getting provider %q version %q", provider, v)
148 err := i.install(provider, v, url)
121 if err != nil { 149 if err != nil {
122 return PluginMeta{}, err 150 return PluginMeta{}, err
123 } 151 }
@@ -164,6 +192,98 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, e
164 return PluginMeta{}, ErrorNoVersionCompatible 192 return PluginMeta{}, ErrorNoVersionCompatible
165} 193}
166 194
195func (i *ProviderInstaller) install(provider string, version Version, url string) error {
196 if i.Cache != nil {
197 log.Printf("[DEBUG] looking for provider %s %s in plugin cache", provider, version)
198 cached := i.Cache.CachedPluginPath("provider", provider, version)
199 if cached == "" {
200 log.Printf("[DEBUG] %s %s not yet in cache, so downloading %s", provider, version, url)
201 err := getter.Get(i.Cache.InstallDir(), url)
202 if err != nil {
203 return err
204 }
205 // should now be in cache
206 cached = i.Cache.CachedPluginPath("provider", provider, version)
207 if cached == "" {
208 // should never happen if the getter is behaving properly
209 // and the plugins are packaged properly.
210 return fmt.Errorf("failed to find downloaded plugin in cache %s", i.Cache.InstallDir())
211 }
212 }
213
214 // Link or copy the cached binary into our install dir so the
215 // normal resolution machinery can find it.
216 filename := filepath.Base(cached)
217 targetPath := filepath.Join(i.Dir, filename)
218
219 log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider, version, targetPath, cached)
220
221 // Delete if we can. If there's nothing there already then no harm done.
222 // This is important because we can't create a link if there's
223 // already a file of the same name present.
224 // (any other error here we'll catch below when we try to write here)
225 os.Remove(targetPath)
226
227 // We don't attempt linking on Windows because links are not
228 // comprehensively supported by all tools/apps in Windows and
229 // so we choose to be conservative to avoid creating any
230 // weird issues for Windows users.
231 linkErr := errors.New("link not supported for Windows") // placeholder error, never actually returned
232 if runtime.GOOS != "windows" {
233 // Try hard linking first. Hard links are preferable because this
234 // creates a self-contained directory that doesn't depend on the
235 // cache after install.
236 linkErr = os.Link(cached, targetPath)
237
238 // If that failed, try a symlink. This _does_ depend on the cache
239 // after install, so the user must manage the cache more carefully
240 // in this case, but avoids creating redundant copies of the
241 // plugins on disk.
242 if linkErr != nil {
243 linkErr = os.Symlink(cached, targetPath)
244 }
245 }
246
247 // If we still have an error then we'll try a copy as a fallback.
248 // In this case either the OS is Windows or the target filesystem
249 // can't support symlinks.
250 if linkErr != nil {
251 srcFile, err := os.Open(cached)
252 if err != nil {
253 return fmt.Errorf("failed to open cached plugin %s: %s", cached, err)
254 }
255 defer srcFile.Close()
256
257 destFile, err := os.OpenFile(targetPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, os.ModePerm)
258 if err != nil {
259 return fmt.Errorf("failed to create %s: %s", targetPath, err)
260 }
261
262 _, err = io.Copy(destFile, srcFile)
263 if err != nil {
264 destFile.Close()
265 return fmt.Errorf("failed to copy cached plugin from %s to %s: %s", cached, targetPath, err)
266 }
267
268 err = destFile.Close()
269 if err != nil {
270 return fmt.Errorf("error creating %s: %s", targetPath, err)
271 }
272 }
273
274 // One way or another, by the time we get here we should have either
275 // a link or a copy of the cached plugin within i.Dir, as expected.
276 } else {
277 log.Printf("[DEBUG] plugin cache is disabled, so downloading %s %s from %s", provider, version, url)
278 err := getter.Get(i.Dir, url)
279 if err != nil {
280 return err
281 }
282 }
283
284 return nil
285}
286
167func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) { 287func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) {
168 purge := make(PluginMetaSet) 288 purge := make(PluginMetaSet)
169 289
@@ -261,7 +381,7 @@ func checkPlugin(url string, pluginProtocolVersion uint) bool {
261 if proto == "" { 381 if proto == "" {
262 // The header isn't present, but we don't make this error fatal since 382 // The header isn't present, but we don't make this error fatal since
263 // the latest version will probably work. 383 // the latest version will probably work.
264 log.Printf("[WARNING] missing %s from: %s", protocolVersionHeader, url) 384 log.Printf("[WARN] missing %s from: %s", protocolVersionHeader, url)
265 return true 385 return true
266 } 386 }
267 387
@@ -422,3 +542,7 @@ func getFile(url string) ([]byte, error) {
422 } 542 }
423 return data, nil 543 return data, nil
424} 544}
545
546func GetReleaseHost() string {
547 return releaseHost
548}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go
new file mode 100644
index 0000000..1a10042
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go
@@ -0,0 +1,48 @@
1package discovery
2
3// PluginCache is an interface implemented by objects that are able to maintain
4// a cache of plugins.
5type PluginCache interface {
6 // CachedPluginPath returns a path where the requested plugin is already
7 // cached, or an empty string if the requested plugin is not yet cached.
8 CachedPluginPath(kind string, name string, version Version) string
9
10 // InstallDir returns the directory that new plugins should be installed into
11 // in order to populate the cache. This directory should be used as the
12 // first argument to getter.Get when downloading plugins with go-getter.
13 //
14 // After installing into this directory, use CachedPluginPath to obtain the
15 // path where the plugin was installed.
16 InstallDir() string
17}
18
19// NewLocalPluginCache returns a PluginCache that caches plugins in a
20// given local directory.
21func NewLocalPluginCache(dir string) PluginCache {
22 return &pluginCache{
23 Dir: dir,
24 }
25}
26
27type pluginCache struct {
28 Dir string
29}
30
31func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string {
32 allPlugins := FindPlugins(kind, []string{c.Dir})
33 plugins := allPlugins.WithName(name).WithVersion(version)
34
35 if plugins.Count() == 0 {
36 // nothing cached
37 return ""
38 }
39
40 // There should generally be only one plugin here; if there's more than
41 // one match for some reason then we'll just choose one arbitrarily.
42 plugin := plugins.Newest()
43 return plugin.Path
44}
45
46func (c *pluginCache) InstallDir() string {
47 return c.Dir
48}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
index 473f786..d6a433c 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
@@ -41,6 +41,24 @@ func (p *ResourceProvider) Stop() error {
41 return err 41 return err
42} 42}
43 43
44func (p *ResourceProvider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) {
45 var result ResourceProviderGetSchemaResponse
46 args := &ResourceProviderGetSchemaArgs{
47 Req: req,
48 }
49
50 err := p.Client.Call("Plugin.GetSchema", args, &result)
51 if err != nil {
52 return nil, err
53 }
54
55 if result.Error != nil {
56 err = result.Error
57 }
58
59 return result.Schema, err
60}
61
44func (p *ResourceProvider) Input( 62func (p *ResourceProvider) Input(
45 input terraform.UIInput, 63 input terraform.UIInput,
46 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { 64 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
@@ -312,6 +330,15 @@ type ResourceProviderStopResponse struct {
312 Error *plugin.BasicError 330 Error *plugin.BasicError
313} 331}
314 332
333type ResourceProviderGetSchemaArgs struct {
334 Req *terraform.ProviderSchemaRequest
335}
336
337type ResourceProviderGetSchemaResponse struct {
338 Schema *terraform.ProviderSchema
339 Error *plugin.BasicError
340}
341
315type ResourceProviderConfigureResponse struct { 342type ResourceProviderConfigureResponse struct {
316 Error *plugin.BasicError 343 Error *plugin.BasicError
317} 344}
@@ -418,6 +445,18 @@ func (s *ResourceProviderServer) Stop(
418 return nil 445 return nil
419} 446}
420 447
448func (s *ResourceProviderServer) GetSchema(
449 args *ResourceProviderGetSchemaArgs,
450 result *ResourceProviderGetSchemaResponse,
451) error {
452 schema, err := s.Provider.GetSchema(args.Req)
453 result.Schema = schema
454 if err != nil {
455 result.Error = plugin.NewBasicError(err)
456 }
457 return nil
458}
459
421func (s *ResourceProviderServer) Input( 460func (s *ResourceProviderServer) Input(
422 args *ResourceProviderInputArgs, 461 args *ResourceProviderInputArgs,
423 reply *ResourceProviderInputResponse) error { 462 reply *ResourceProviderInputResponse) error {
diff --git a/vendor/github.com/hashicorp/terraform/registry/client.go b/vendor/github.com/hashicorp/terraform/registry/client.go
new file mode 100644
index 0000000..a18e6b8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/client.go
@@ -0,0 +1,227 @@
1package registry
2
3import (
4 "encoding/json"
5 "fmt"
6 "io/ioutil"
7 "log"
8 "net/http"
9 "net/url"
10 "path"
11 "strings"
12 "time"
13
14 "github.com/hashicorp/terraform/httpclient"
15 "github.com/hashicorp/terraform/registry/regsrc"
16 "github.com/hashicorp/terraform/registry/response"
17 "github.com/hashicorp/terraform/svchost"
18 "github.com/hashicorp/terraform/svchost/disco"
19 "github.com/hashicorp/terraform/version"
20)
21
22const (
23 xTerraformGet = "X-Terraform-Get"
24 xTerraformVersion = "X-Terraform-Version"
25 requestTimeout = 10 * time.Second
26 serviceID = "modules.v1"
27)
28
29var tfVersion = version.String()
30
31// Client provides methods to query Terraform Registries.
32type Client struct {
33 // this is the client to be used for all requests.
34 client *http.Client
35
36 // services is a required *disco.Disco, which may have services and
37 // credentials pre-loaded.
38 services *disco.Disco
39}
40
41// NewClient returns a new initialized registry client.
42func NewClient(services *disco.Disco, client *http.Client) *Client {
43 if services == nil {
44 services = disco.New()
45 }
46
47 if client == nil {
48 client = httpclient.New()
49 client.Timeout = requestTimeout
50 }
51
52 services.Transport = client.Transport
53
54 return &Client{
55 client: client,
56 services: services,
57 }
58}
59
60// Discover queries the host, and returns the url for the registry.
61func (c *Client) Discover(host svchost.Hostname) (*url.URL, error) {
62 service, err := c.services.DiscoverServiceURL(host, serviceID)
63 if err != nil {
64 return nil, err
65 }
66 if !strings.HasSuffix(service.Path, "/") {
67 service.Path += "/"
68 }
69 return service, nil
70}
71
72// Versions queries the registry for a module, and returns the available versions.
73func (c *Client) Versions(module *regsrc.Module) (*response.ModuleVersions, error) {
74 host, err := module.SvcHost()
75 if err != nil {
76 return nil, err
77 }
78
79 service, err := c.Discover(host)
80 if err != nil {
81 return nil, err
82 }
83
84 p, err := url.Parse(path.Join(module.Module(), "versions"))
85 if err != nil {
86 return nil, err
87 }
88
89 service = service.ResolveReference(p)
90
91 log.Printf("[DEBUG] fetching module versions from %q", service)
92
93 req, err := http.NewRequest("GET", service.String(), nil)
94 if err != nil {
95 return nil, err
96 }
97
98 c.addRequestCreds(host, req)
99 req.Header.Set(xTerraformVersion, tfVersion)
100
101 resp, err := c.client.Do(req)
102 if err != nil {
103 return nil, err
104 }
105 defer resp.Body.Close()
106
107 switch resp.StatusCode {
108 case http.StatusOK:
109 // OK
110 case http.StatusNotFound:
111 return nil, &errModuleNotFound{addr: module}
112 default:
113 return nil, fmt.Errorf("error looking up module versions: %s", resp.Status)
114 }
115
116 var versions response.ModuleVersions
117
118 dec := json.NewDecoder(resp.Body)
119 if err := dec.Decode(&versions); err != nil {
120 return nil, err
121 }
122
123 for _, mod := range versions.Modules {
124 for _, v := range mod.Versions {
125 log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source)
126 }
127 }
128
129 return &versions, nil
130}
131
132func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) {
133 creds, err := c.services.CredentialsForHost(host)
134 if err != nil {
135 log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err)
136 return
137 }
138
139 if creds != nil {
140 creds.PrepareRequest(req)
141 }
142}
143
144// Location find the download location for a specific version module.
145// This returns a string, because the final location may contain special go-getter syntax.
146func (c *Client) Location(module *regsrc.Module, version string) (string, error) {
147 host, err := module.SvcHost()
148 if err != nil {
149 return "", err
150 }
151
152 service, err := c.Discover(host)
153 if err != nil {
154 return "", err
155 }
156
157 var p *url.URL
158 if version == "" {
159 p, err = url.Parse(path.Join(module.Module(), "download"))
160 } else {
161 p, err = url.Parse(path.Join(module.Module(), version, "download"))
162 }
163 if err != nil {
164 return "", err
165 }
166 download := service.ResolveReference(p)
167
168 log.Printf("[DEBUG] looking up module location from %q", download)
169
170 req, err := http.NewRequest("GET", download.String(), nil)
171 if err != nil {
172 return "", err
173 }
174
175 c.addRequestCreds(host, req)
176 req.Header.Set(xTerraformVersion, tfVersion)
177
178 resp, err := c.client.Do(req)
179 if err != nil {
180 return "", err
181 }
182 defer resp.Body.Close()
183
184 // there should be no body, but save it for logging
185 body, err := ioutil.ReadAll(resp.Body)
186 if err != nil {
187 return "", fmt.Errorf("error reading response body from registry: %s", err)
188 }
189
190 switch resp.StatusCode {
191 case http.StatusOK, http.StatusNoContent:
192 // OK
193 case http.StatusNotFound:
194 return "", fmt.Errorf("module %q version %q not found", module, version)
195 default:
196 // anything else is an error:
197 return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body)
198 }
199
200 // the download location is in the X-Terraform-Get header
201 location := resp.Header.Get(xTerraformGet)
202 if location == "" {
203 return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body)
204 }
205
206 // If location looks like it's trying to be a relative URL, treat it as
207 // one.
208 //
209 // We don't do this for just _any_ location, since the X-Terraform-Get
210 // header is a go-getter location rather than a URL, and so not all
211 // possible values will parse reasonably as URLs.)
212 //
213 // When used in conjunction with go-getter we normally require this header
214 // to be an absolute URL, but we are more liberal here because third-party
215 // registry implementations may not "know" their own absolute URLs if
216 // e.g. they are running behind a reverse proxy frontend, or such.
217 if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") {
218 locationURL, err := url.Parse(location)
219 if err != nil {
220 return "", fmt.Errorf("invalid relative URL for %q: %s", module, err)
221 }
222 locationURL = download.ResolveReference(locationURL)
223 location = locationURL.String()
224 }
225
226 return location, nil
227}
diff --git a/vendor/github.com/hashicorp/terraform/registry/errors.go b/vendor/github.com/hashicorp/terraform/registry/errors.go
new file mode 100644
index 0000000..b8dcd31
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/errors.go
@@ -0,0 +1,23 @@
1package registry
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/registry/regsrc"
7)
8
9type errModuleNotFound struct {
10 addr *regsrc.Module
11}
12
13func (e *errModuleNotFound) Error() string {
14 return fmt.Sprintf("module %s not found", e.addr)
15}
16
17// IsModuleNotFound returns true only if the given error is a "module not found"
18// error. This allows callers to recognize this particular error condition
19// as distinct from operational errors such as poor network connectivity.
20func IsModuleNotFound(err error) bool {
21 _, ok := err.(*errModuleNotFound)
22 return ok
23}
diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go
new file mode 100644
index 0000000..14b4dce
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go
@@ -0,0 +1,140 @@
1package regsrc
2
3import (
4 "regexp"
5 "strings"
6
7 "github.com/hashicorp/terraform/svchost"
8)
9
10var (
11 // InvalidHostString is a placeholder returned when a raw host can't be
12 // converted by IDNA spec. It will never be returned for any host for which
13 // Valid() is true.
14 InvalidHostString = "<invalid host>"
15
16 // urlLabelEndSubRe is a sub-expression that matches any character that's
17 // allowed at the start or end of a URL label according to RFC1123.
18 urlLabelEndSubRe = "[0-9A-Za-z]"
19
20 // urlLabelEndSubRe is a sub-expression that matches any character that's
21 // allowed at in a non-start or end of a URL label according to RFC1123.
22 urlLabelMidSubRe = "[0-9A-Za-z-]"
23
24 // urlLabelUnicodeSubRe is a sub-expression that matches any non-ascii char
25 // in an IDN (Unicode) display URL. It's not strict - there are only ~15k
26 // valid Unicode points in IDN RFC (some with conditions). We are just going
27 // with being liberal with matching and then erroring if we fail to convert
28 // to punycode later (which validates chars fully). This at least ensures
29 // ascii chars dissalowed by the RC1123 parts above don't become legal
30 // again.
31 urlLabelUnicodeSubRe = "[^[:ascii:]]"
32
33 // hostLabelSubRe is the sub-expression that matches a valid hostname label.
34 // It does not anchor the start or end so it can be composed into more
35 // complex RegExps below. Note that for sanity we don't handle disallowing
36 // raw punycode in this regexp (esp. since re2 doesn't support negative
37 // lookbehind, but we can capture it's presence here to check later).
38 hostLabelSubRe = "" +
39 // Match valid initial char, or unicode char
40 "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" +
41 // Optionally, match 0 to 61 valid URL or Unicode chars,
42 // followed by one valid end char or unicode char
43 "(?:" +
44 "(?:" + urlLabelMidSubRe + "|" + urlLabelUnicodeSubRe + "){0,61}" +
45 "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" +
46 ")?"
47
48 // hostSubRe is the sub-expression that matches a valid host prefix.
49 // Allows custom port.
50 hostSubRe = hostLabelSubRe + "(?:\\." + hostLabelSubRe + ")+(?::\\d+)?"
51
52 // hostRe is a regexp that matches a valid host prefix. Additional
53 // validation of unicode strings is needed for matches.
54 hostRe = regexp.MustCompile("^" + hostSubRe + "$")
55)
56
57// FriendlyHost describes a registry instance identified in source strings by a
58// simple bare hostname like registry.terraform.io.
59type FriendlyHost struct {
60 Raw string
61}
62
63func NewFriendlyHost(host string) *FriendlyHost {
64 return &FriendlyHost{Raw: host}
65}
66
67// ParseFriendlyHost attempts to parse a valid "friendly host" prefix from the
68// given string. If no valid prefix is found, host will be nil and rest will
69// contain the full source string. The host prefix must terminate at the end of
70// the input or at the first / character. If one or more characters exist after
71// the first /, they will be returned as rest (without the / delimiter).
72// Hostnames containing punycode WILL be parsed successfully since they may have
73// come from an internal normalized source string, however should be considered
74// invalid if the string came from a user directly. This must be checked
75// explicitly for user-input strings by calling Valid() on the
76// returned host.
77func ParseFriendlyHost(source string) (host *FriendlyHost, rest string) {
78 parts := strings.SplitN(source, "/", 2)
79
80 if hostRe.MatchString(parts[0]) {
81 host = &FriendlyHost{Raw: parts[0]}
82 if len(parts) == 2 {
83 rest = parts[1]
84 }
85 return
86 }
87
88 // No match, return whole string as rest along with nil host
89 rest = source
90 return
91}
92
93// Valid returns whether the host prefix is considered valid in any case.
94// Example of invalid prefixes might include ones that don't conform to the host
95// name specifications. Not that IDN prefixes containing punycode are not valid
96// input which we expect to always be in user-input or normalised display form.
97func (h *FriendlyHost) Valid() bool {
98 return svchost.IsValid(h.Raw)
99}
100
101// Display returns the host formatted for display to the user in CLI or web
102// output.
103func (h *FriendlyHost) Display() string {
104 return svchost.ForDisplay(h.Raw)
105}
106
107// Normalized returns the host formatted for internal reference or comparison.
108func (h *FriendlyHost) Normalized() string {
109 host, err := svchost.ForComparison(h.Raw)
110 if err != nil {
111 return InvalidHostString
112 }
113 return string(host)
114}
115
116// String returns the host formatted as the user originally typed it assuming it
117// was parsed from user input.
118func (h *FriendlyHost) String() string {
119 return h.Raw
120}
121
122// Equal compares the FriendlyHost against another instance taking normalization
123// into account. Invalid hosts cannot be compared and will always return false.
124func (h *FriendlyHost) Equal(other *FriendlyHost) bool {
125 if other == nil {
126 return false
127 }
128
129 otherHost, err := svchost.ForComparison(other.Raw)
130 if err != nil {
131 return false
132 }
133
134 host, err := svchost.ForComparison(h.Raw)
135 if err != nil {
136 return false
137 }
138
139 return otherHost == host
140}
diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go
new file mode 100644
index 0000000..325706e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go
@@ -0,0 +1,205 @@
1package regsrc
2
3import (
4 "errors"
5 "fmt"
6 "regexp"
7 "strings"
8
9 "github.com/hashicorp/terraform/svchost"
10)
11
12var (
13 ErrInvalidModuleSource = errors.New("not a valid registry module source")
14
15 // nameSubRe is the sub-expression that matches a valid module namespace or
16 // name. It's strictly a super-set of what GitHub allows for user/org and
17 // repo names respectively, but more restrictive than our original repo-name
18 // regex which allowed periods but could cause ambiguity with hostname
19 // prefixes. It does not anchor the start or end so it can be composed into
20 // more complex RegExps below. Alphanumeric with - and _ allowed in non
21 // leading or trailing positions. Max length 64 chars. (GitHub username is
22 // 38 max.)
23 nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?"
24
25 // providerSubRe is the sub-expression that matches a valid provider. It
26 // does not anchor the start or end so it can be composed into more complex
27 // RegExps below. Only lowercase chars and digits are supported in practice.
28 // Max length 64 chars.
29 providerSubRe = "[0-9a-z]{1,64}"
30
31 // moduleSourceRe is a regular expression that matches the basic
32 // namespace/name/provider[//...] format for registry sources. It assumes
33 // any FriendlyHost prefix has already been removed if present.
34 moduleSourceRe = regexp.MustCompile(
35 fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$",
36 nameSubRe, nameSubRe, providerSubRe))
37
38 // NameRe is a regular expression defining the format allowed for namespace
39 // or name fields in module registry implementations.
40 NameRe = regexp.MustCompile("^" + nameSubRe + "$")
41
42 // ProviderRe is a regular expression defining the format allowed for
43 // provider fields in module registry implementations.
44 ProviderRe = regexp.MustCompile("^" + providerSubRe + "$")
45
46 // these hostnames are not allowed as registry sources, because they are
47 // already special case module sources in terraform.
48 disallowed = map[string]bool{
49 "github.com": true,
50 "bitbucket.org": true,
51 }
52)
53
54// Module describes a Terraform Registry Module source.
55type Module struct {
56 // RawHost is the friendly host prefix if one was present. It might be nil
57 // if the original source had no host prefix which implies
58 // PublicRegistryHost but is distinct from having an actual pointer to
59 // PublicRegistryHost since it encodes the fact the original string didn't
60 // include a host prefix at all which is significant for recovering actual
61 // input not just normalized form. Most callers should access it with Host()
62 // which will return public registry host instance if it's nil.
63 RawHost *FriendlyHost
64 RawNamespace string
65 RawName string
66 RawProvider string
67 RawSubmodule string
68}
69
70// NewModule construct a new module source from separate parts. Pass empty
71// string if host or submodule are not needed.
72func NewModule(host, namespace, name, provider, submodule string) (*Module, error) {
73 m := &Module{
74 RawNamespace: namespace,
75 RawName: name,
76 RawProvider: provider,
77 RawSubmodule: submodule,
78 }
79 if host != "" {
80 h := NewFriendlyHost(host)
81 if h != nil {
82 fmt.Println("HOST:", h)
83 if !h.Valid() || disallowed[h.Display()] {
84 return nil, ErrInvalidModuleSource
85 }
86 }
87 m.RawHost = h
88 }
89 return m, nil
90}
91
92// ParseModuleSource attempts to parse source as a Terraform registry module
93// source. If the string is not found to be in a valid format,
94// ErrInvalidModuleSource is returned. Note that this can only be used on
95// "input" strings, e.g. either ones supplied by the user or potentially
96// normalised but in Display form (unicode). It will fail to parse a source with
97// a punycoded domain since this is not permitted input from a user. If you have
98// an already normalized string internally, you can compare it without parsing
99// by comparing with the normalized version of the subject with the normal
100// string equality operator.
101func ParseModuleSource(source string) (*Module, error) {
102 // See if there is a friendly host prefix.
103 host, rest := ParseFriendlyHost(source)
104 if host != nil {
105 if !host.Valid() || disallowed[host.Display()] {
106 return nil, ErrInvalidModuleSource
107 }
108 }
109
110 matches := moduleSourceRe.FindStringSubmatch(rest)
111 if len(matches) < 4 {
112 return nil, ErrInvalidModuleSource
113 }
114
115 m := &Module{
116 RawHost: host,
117 RawNamespace: matches[1],
118 RawName: matches[2],
119 RawProvider: matches[3],
120 }
121
122 if len(matches) == 5 {
123 m.RawSubmodule = matches[4]
124 }
125
126 return m, nil
127}
128
129// Display returns the source formatted for display to the user in CLI or web
130// output.
131func (m *Module) Display() string {
132 return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false)
133}
134
135// Normalized returns the source formatted for internal reference or comparison.
136func (m *Module) Normalized() string {
137 return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false)
138}
139
140// String returns the source formatted as the user originally typed it assuming
141// it was parsed from user input.
142func (m *Module) String() string {
143 // Don't normalize public registry hostname - leave it exactly like the user
144 // input it.
145 hostPrefix := ""
146 if m.RawHost != nil {
147 hostPrefix = m.RawHost.String() + "/"
148 }
149 return m.formatWithPrefix(hostPrefix, true)
150}
151
152// Equal compares the module source against another instance taking
153// normalization into account.
154func (m *Module) Equal(other *Module) bool {
155 return m.Normalized() == other.Normalized()
156}
157
158// Host returns the FriendlyHost object describing which registry this module is
159// in. If the original source string had not host component this will return the
160// PublicRegistryHost.
161func (m *Module) Host() *FriendlyHost {
162 if m.RawHost == nil {
163 return PublicRegistryHost
164 }
165 return m.RawHost
166}
167
168func (m *Module) normalizedHostPrefix(host string) string {
169 if m.Host().Equal(PublicRegistryHost) {
170 return ""
171 }
172 return host + "/"
173}
174
175func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string {
176 suffix := ""
177 if m.RawSubmodule != "" {
178 suffix = "//" + m.RawSubmodule
179 }
180 str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName,
181 m.RawProvider, suffix)
182
183 // lower case by default
184 if !preserveCase {
185 return strings.ToLower(str)
186 }
187 return str
188}
189
190// Module returns just the registry ID of the module, without a hostname or
191// suffix.
192func (m *Module) Module() string {
193 return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider)
194}
195
196// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may
197// contain an invalid hostname, this also returns an error indicating if it
198// could be converted to a svchost.Hostname. If no host is specified, the
199// default PublicRegistryHost is returned.
200func (m *Module) SvcHost() (svchost.Hostname, error) {
201 if m.RawHost == nil {
202 return svchost.ForComparison(PublicRegistryHost.Raw)
203 }
204 return svchost.ForComparison(m.RawHost.Raw)
205}
diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go
new file mode 100644
index 0000000..c430bf1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go
@@ -0,0 +1,8 @@
1// Package regsrc provides helpers for working with source strings that identify
2// resources within a Terraform registry.
3package regsrc
4
5var (
6 // PublicRegistryHost is a FriendlyHost that represents the public registry.
7 PublicRegistryHost = NewFriendlyHost("registry.terraform.io")
8)
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module.go b/vendor/github.com/hashicorp/terraform/registry/response/module.go
new file mode 100644
index 0000000..3bd2b3d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/module.go
@@ -0,0 +1,93 @@
1package response
2
3import (
4 "time"
5)
6
7// Module is the response structure with the data for a single module version.
8type Module struct {
9 ID string `json:"id"`
10
11 //---------------------------------------------------------------
12 // Metadata about the overall module.
13
14 Owner string `json:"owner"`
15 Namespace string `json:"namespace"`
16 Name string `json:"name"`
17 Version string `json:"version"`
18 Provider string `json:"provider"`
19 Description string `json:"description"`
20 Source string `json:"source"`
21 PublishedAt time.Time `json:"published_at"`
22 Downloads int `json:"downloads"`
23 Verified bool `json:"verified"`
24}
25
26// ModuleDetail represents a module in full detail.
27type ModuleDetail struct {
28 Module
29
30 //---------------------------------------------------------------
31 // Metadata about the overall module. This is only available when
32 // requesting the specific module (not in list responses).
33
34 // Root is the root module.
35 Root *ModuleSubmodule `json:"root"`
36
37 // Submodules are the other submodules that are available within
38 // this module.
39 Submodules []*ModuleSubmodule `json:"submodules"`
40
41 //---------------------------------------------------------------
42 // The fields below are only set when requesting this specific
43 // module. They are available to easily know all available versions
44 // and providers without multiple API calls.
45
46 Providers []string `json:"providers"` // All available providers
47 Versions []string `json:"versions"` // All versions
48}
49
50// ModuleSubmodule is the metadata about a specific submodule within
51// a module. This includes the root module as a special case.
52type ModuleSubmodule struct {
53 Path string `json:"path"`
54 Readme string `json:"readme"`
55 Empty bool `json:"empty"`
56
57 Inputs []*ModuleInput `json:"inputs"`
58 Outputs []*ModuleOutput `json:"outputs"`
59 Dependencies []*ModuleDep `json:"dependencies"`
60 Resources []*ModuleResource `json:"resources"`
61}
62
63// ModuleInput is an input for a module.
64type ModuleInput struct {
65 Name string `json:"name"`
66 Description string `json:"description"`
67 Default string `json:"default"`
68}
69
70// ModuleOutput is an output for a module.
71type ModuleOutput struct {
72 Name string `json:"name"`
73 Description string `json:"description"`
74}
75
76// ModuleDep is an output for a module.
77type ModuleDep struct {
78 Name string `json:"name"`
79 Source string `json:"source"`
80 Version string `json:"version"`
81}
82
83// ModuleProviderDep is the output for a provider dependency
84type ModuleProviderDep struct {
85 Name string `json:"name"`
86 Version string `json:"version"`
87}
88
89// ModuleResource is an output for a module.
90type ModuleResource struct {
91 Name string `json:"name"`
92 Type string `json:"type"`
93}
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_list.go b/vendor/github.com/hashicorp/terraform/registry/response/module_list.go
new file mode 100644
index 0000000..9783748
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/module_list.go
@@ -0,0 +1,7 @@
1package response
2
3// ModuleList is the response structure for a pageable list of modules.
4type ModuleList struct {
5 Meta PaginationMeta `json:"meta"`
6 Modules []*Module `json:"modules"`
7}
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go b/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go
new file mode 100644
index 0000000..e48499d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go
@@ -0,0 +1,14 @@
1package response
2
3// ModuleProvider represents a single provider for modules.
4type ModuleProvider struct {
5 Name string `json:"name"`
6 Downloads int `json:"downloads"`
7 ModuleCount int `json:"module_count"`
8}
9
10// ModuleProviderList is the response structure for a pageable list of ModuleProviders.
11type ModuleProviderList struct {
12 Meta PaginationMeta `json:"meta"`
13 Providers []*ModuleProvider `json:"providers"`
14}
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go b/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go
new file mode 100644
index 0000000..f69e975
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go
@@ -0,0 +1,32 @@
1package response
2
3// ModuleVersions is the response format that contains all metadata about module
4// versions needed for terraform CLI to resolve version constraints. See RFC
5// TF-042 for details on this format.
6type ModuleVersions struct {
7 Modules []*ModuleProviderVersions `json:"modules"`
8}
9
10// ModuleProviderVersions is the response format for a single module instance,
11// containing metadata about all versions and their dependencies.
12type ModuleProviderVersions struct {
13 Source string `json:"source"`
14 Versions []*ModuleVersion `json:"versions"`
15}
16
17// ModuleVersion is the output metadata for a given version needed by CLI to
18// resolve candidate versions to satisfy requirements.
19type ModuleVersion struct {
20 Version string `json:"version"`
21 Root VersionSubmodule `json:"root"`
22 Submodules []*VersionSubmodule `json:"submodules"`
23}
24
25// VersionSubmodule is the output metadata for a submodule within a given
26// version needed by CLI to resolve candidate versions to satisfy requirements.
27// When representing the Root in JSON the path is omitted.
28type VersionSubmodule struct {
29 Path string `json:"path,omitempty"`
30 Providers []*ModuleProviderDep `json:"providers"`
31 Dependencies []*ModuleDep `json:"dependencies"`
32}
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/pagination.go b/vendor/github.com/hashicorp/terraform/registry/response/pagination.go
new file mode 100644
index 0000000..75a9254
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/pagination.go
@@ -0,0 +1,65 @@
1package response
2
3import (
4 "net/url"
5 "strconv"
6)
7
8// PaginationMeta is a structure included in responses for pagination.
9type PaginationMeta struct {
10 Limit int `json:"limit"`
11 CurrentOffset int `json:"current_offset"`
12 NextOffset *int `json:"next_offset,omitempty"`
13 PrevOffset *int `json:"prev_offset,omitempty"`
14 NextURL string `json:"next_url,omitempty"`
15 PrevURL string `json:"prev_url,omitempty"`
16}
17
18// NewPaginationMeta populates pagination meta data from result parameters
19func NewPaginationMeta(offset, limit int, hasMore bool, currentURL string) PaginationMeta {
20 pm := PaginationMeta{
21 Limit: limit,
22 CurrentOffset: offset,
23 }
24
25 // Calculate next/prev offsets, leave nil if not valid pages
26 nextOffset := offset + limit
27 if hasMore {
28 pm.NextOffset = &nextOffset
29 }
30
31 prevOffset := offset - limit
32 if prevOffset < 0 {
33 prevOffset = 0
34 }
35 if prevOffset < offset {
36 pm.PrevOffset = &prevOffset
37 }
38
39 // If URL format provided, populate URLs. Intentionally swallow URL errors for now, API should
40 // catch missing URLs if we call with bad URL arg (and we care about them being present).
41 if currentURL != "" && pm.NextOffset != nil {
42 pm.NextURL, _ = setQueryParam(currentURL, "offset", *pm.NextOffset, 0)
43 }
44 if currentURL != "" && pm.PrevOffset != nil {
45 pm.PrevURL, _ = setQueryParam(currentURL, "offset", *pm.PrevOffset, 0)
46 }
47
48 return pm
49}
50
51func setQueryParam(baseURL, key string, val, defaultVal int) (string, error) {
52 u, err := url.Parse(baseURL)
53 if err != nil {
54 return "", err
55 }
56 q := u.Query()
57 if val == defaultVal {
58 // elide param if it's the default value
59 q.Del(key)
60 } else {
61 q.Set(key, strconv.Itoa(val))
62 }
63 u.RawQuery = q.Encode()
64 return u.String(), nil
65}
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/redirect.go b/vendor/github.com/hashicorp/terraform/registry/response/redirect.go
new file mode 100644
index 0000000..d5eb49b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/redirect.go
@@ -0,0 +1,6 @@
1package response
2
3// Redirect causes the frontend to perform a window redirect.
4type Redirect struct {
5 URL string `json:"url"`
6}
diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go b/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go
new file mode 100644
index 0000000..4f0d168
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go
@@ -0,0 +1,45 @@
1package auth
2
3import (
4 "github.com/hashicorp/terraform/svchost"
5)
6
7// CachingCredentialsSource creates a new credentials source that wraps another
8// and caches its results in memory, on a per-hostname basis.
9//
10// No means is provided for expiration of cached credentials, so a caching
11// credentials source should have a limited lifetime (one Terraform operation,
12// for example) to ensure that time-limited credentials don't expire before
13// their cache entries do.
14func CachingCredentialsSource(source CredentialsSource) CredentialsSource {
15 return &cachingCredentialsSource{
16 source: source,
17 cache: map[svchost.Hostname]HostCredentials{},
18 }
19}
20
21type cachingCredentialsSource struct {
22 source CredentialsSource
23 cache map[svchost.Hostname]HostCredentials
24}
25
26// ForHost passes the given hostname on to the wrapped credentials source and
27// caches the result to return for future requests with the same hostname.
28//
29// Both credentials and non-credentials (nil) responses are cached.
30//
31// No cache entry is created if the wrapped source returns an error, to allow
32// the caller to retry the failing operation.
33func (s *cachingCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) {
34 if cache, cached := s.cache[host]; cached {
35 return cache, nil
36 }
37
38 result, err := s.source.ForHost(host)
39 if err != nil {
40 return result, err
41 }
42
43 s.cache[host] = result
44 return result, nil
45}
diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go b/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go
new file mode 100644
index 0000000..0372c16
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go
@@ -0,0 +1,63 @@
1// Package auth contains types and functions to manage authentication
2// credentials for service hosts.
3package auth
4
5import (
6 "net/http"
7
8 "github.com/hashicorp/terraform/svchost"
9)
10
11// Credentials is a list of CredentialsSource objects that can be tried in
12// turn until one returns credentials for a host, or one returns an error.
13//
14// A Credentials is itself a CredentialsSource, wrapping its members.
15// In principle one CredentialsSource can be nested inside another, though
16// there is no good reason to do so.
17type Credentials []CredentialsSource
18
19// NoCredentials is an empty CredentialsSource that always returns nil
20// when asked for credentials.
21var NoCredentials CredentialsSource = Credentials{}
22
23// A CredentialsSource is an object that may be able to provide credentials
24// for a given host.
25//
26// Credentials lookups are not guaranteed to be concurrency-safe. Callers
27// using these facilities in concurrent code must use external concurrency
28// primitives to prevent race conditions.
29type CredentialsSource interface {
30 // ForHost returns a non-nil HostCredentials if the source has credentials
31 // available for the host, and a nil HostCredentials if it does not.
32 //
33 // If an error is returned, progress through a list of CredentialsSources
34 // is halted and the error is returned to the user.
35 ForHost(host svchost.Hostname) (HostCredentials, error)
36}
37
38// HostCredentials represents a single set of credentials for a particular
39// host.
40type HostCredentials interface {
41 // PrepareRequest modifies the given request in-place to apply the
42 // receiving credentials. The usual behavior of this method is to
43 // add some sort of Authorization header to the request.
44 PrepareRequest(req *http.Request)
45
46 // Token returns the authentication token.
47 Token() string
48}
49
50// ForHost iterates over the contained CredentialsSource objects and
51// tries to obtain credentials for the given host from each one in turn.
52//
53// If any source returns either a non-nil HostCredentials or a non-nil error
54// then this result is returned. Otherwise, the result is nil, nil.
55func (c Credentials) ForHost(host svchost.Hostname) (HostCredentials, error) {
56 for _, source := range c {
57 creds, err := source.ForHost(host)
58 if creds != nil || err != nil {
59 return creds, err
60 }
61 }
62 return nil, nil
63}
diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go b/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go
new file mode 100644
index 0000000..f91006a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go
@@ -0,0 +1,18 @@
1package auth
2
3// HostCredentialsFromMap converts a map of key-value pairs from a credentials
4// definition provided by the user (e.g. in a config file, or via a credentials
5// helper) into a HostCredentials object if possible, or returns nil if
6// no credentials could be extracted from the map.
7//
8// This function ignores map keys it is unfamiliar with, to allow for future
9// expansion of the credentials map format for new credential types.
10func HostCredentialsFromMap(m map[string]interface{}) HostCredentials {
11 if m == nil {
12 return nil
13 }
14 if token, ok := m["token"].(string); ok {
15 return HostCredentialsToken(token)
16 }
17 return nil
18}
diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go b/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go
new file mode 100644
index 0000000..d72ffe3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go
@@ -0,0 +1,80 @@
1package auth
2
3import (
4 "bytes"
5 "encoding/json"
6 "fmt"
7 "os/exec"
8 "path/filepath"
9
10 "github.com/hashicorp/terraform/svchost"
11)
12
13type helperProgramCredentialsSource struct {
14 executable string
15 args []string
16}
17
18// HelperProgramCredentialsSource returns a CredentialsSource that runs the
19// given program with the given arguments in order to obtain credentials.
20//
21// The given executable path must be an absolute path; it is the caller's
22// responsibility to validate and process a relative path or other input
23// provided by an end-user. If the given path is not absolute, this
24// function will panic.
25//
26// When credentials are requested, the program will be run in a child process
27// with the given arguments along with two additional arguments added to the
28// end of the list: the literal string "get", followed by the requested
29// hostname in ASCII compatibility form (punycode form).
30func HelperProgramCredentialsSource(executable string, args ...string) CredentialsSource {
31 if !filepath.IsAbs(executable) {
32 panic("NewCredentialsSourceHelperProgram requires absolute path to executable")
33 }
34
35 fullArgs := make([]string, len(args)+1)
36 fullArgs[0] = executable
37 copy(fullArgs[1:], args)
38
39 return &helperProgramCredentialsSource{
40 executable: executable,
41 args: fullArgs,
42 }
43}
44
45func (s *helperProgramCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) {
46 args := make([]string, len(s.args), len(s.args)+2)
47 copy(args, s.args)
48 args = append(args, "get")
49 args = append(args, string(host))
50
51 outBuf := bytes.Buffer{}
52 errBuf := bytes.Buffer{}
53
54 cmd := exec.Cmd{
55 Path: s.executable,
56 Args: args,
57 Stdin: nil,
58 Stdout: &outBuf,
59 Stderr: &errBuf,
60 }
61 err := cmd.Run()
62 if _, isExitErr := err.(*exec.ExitError); isExitErr {
63 errText := errBuf.String()
64 if errText == "" {
65 // Shouldn't happen for a well-behaved helper program
66 return nil, fmt.Errorf("error in %s, but it produced no error message", s.executable)
67 }
68 return nil, fmt.Errorf("error in %s: %s", s.executable, errText)
69 } else if err != nil {
70 return nil, fmt.Errorf("failed to run %s: %s", s.executable, err)
71 }
72
73 var m map[string]interface{}
74 err = json.Unmarshal(outBuf.Bytes(), &m)
75 if err != nil {
76 return nil, fmt.Errorf("malformed output from %s: %s", s.executable, err)
77 }
78
79 return HostCredentialsFromMap(m), nil
80}
diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/static.go b/vendor/github.com/hashicorp/terraform/svchost/auth/static.go
new file mode 100644
index 0000000..5373fdd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/svchost/auth/static.go
@@ -0,0 +1,28 @@
1package auth
2
3import (
4 "github.com/hashicorp/terraform/svchost"
5)
6
7// StaticCredentialsSource is a credentials source that retrieves credentials
8// from the provided map. It returns nil if a requested hostname is not
9// present in the map.
10//
11// The caller should not modify the given map after passing it to this function.
12func StaticCredentialsSource(creds map[svchost.Hostname]map[string]interface{}) CredentialsSource {
13 return staticCredentialsSource(creds)
14}
15
16type staticCredentialsSource map[svchost.Hostname]map[string]interface{}
17
18func (s staticCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) {
19 if s == nil {
20 return nil, nil
21 }
22
23 if m, exists := s[host]; exists {
24 return HostCredentialsFromMap(m), nil
25 }
26
27 return nil, nil
28}
diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go b/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go
new file mode 100644
index 0000000..9358bcb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go
@@ -0,0 +1,25 @@
1package auth
2
3import (
4 "net/http"
5)
6
7// HostCredentialsToken is a HostCredentials implementation that represents a
8// single "bearer token", to be sent to the server via an Authorization header
9// with the auth type set to "Bearer"
10type HostCredentialsToken string
11
12// PrepareRequest alters the given HTTP request by setting its Authorization
13// header to the string "Bearer " followed by the encapsulated authentication
14// token.
15func (tc HostCredentialsToken) PrepareRequest(req *http.Request) {
16 if req.Header == nil {
17 req.Header = http.Header{}
18 }
19 req.Header.Set("Authorization", "Bearer "+string(tc))
20}
21
22// Token returns the authentication token.
23func (tc HostCredentialsToken) Token() string {
24 return string(tc)
25}
diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go
new file mode 100644
index 0000000..1963cbd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go
@@ -0,0 +1,259 @@
1// Package disco handles Terraform's remote service discovery protocol.
2//
3// This protocol allows mapping from a service hostname, as produced by the
4// svchost package, to a set of services supported by that host and the
5// endpoint information for each supported service.
6package disco
7
8import (
9 "encoding/json"
10 "errors"
11 "fmt"
12 "io"
13 "io/ioutil"
14 "log"
15 "mime"
16 "net/http"
17 "net/url"
18 "time"
19
20 cleanhttp "github.com/hashicorp/go-cleanhttp"
21 "github.com/hashicorp/terraform/httpclient"
22 "github.com/hashicorp/terraform/svchost"
23 "github.com/hashicorp/terraform/svchost/auth"
24)
25
26const (
27 // Fixed path to the discovery manifest.
28 discoPath = "/.well-known/terraform.json"
29
30 // Arbitrary-but-small number to prevent runaway redirect loops.
31 maxRedirects = 3
32
33 // Arbitrary-but-small time limit to prevent UI "hangs" during discovery.
34 discoTimeout = 11 * time.Second
35
36 // 1MB - to prevent abusive services from using loads of our memory.
37 maxDiscoDocBytes = 1 * 1024 * 1024
38)
39
40// httpTransport is overridden during tests, to skip TLS verification.
41var httpTransport = cleanhttp.DefaultPooledTransport()
42
43// Disco is the main type in this package, which allows discovery on given
44// hostnames and caches the results by hostname to avoid repeated requests
45// for the same information.
46type Disco struct {
47 hostCache map[svchost.Hostname]*Host
48 credsSrc auth.CredentialsSource
49
50 // Transport is a custom http.RoundTripper to use.
51 Transport http.RoundTripper
52}
53
54// New returns a new initialized discovery object.
55func New() *Disco {
56 return NewWithCredentialsSource(nil)
57}
58
59// NewWithCredentialsSource returns a new discovery object initialized with
60// the given credentials source.
61func NewWithCredentialsSource(credsSrc auth.CredentialsSource) *Disco {
62 return &Disco{
63 hostCache: make(map[svchost.Hostname]*Host),
64 credsSrc: credsSrc,
65 Transport: httpTransport,
66 }
67}
68
69// SetCredentialsSource provides a credentials source that will be used to
70// add credentials to outgoing discovery requests, where available.
71//
72// If this method is never called, no outgoing discovery requests will have
73// credentials.
74func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) {
75 d.credsSrc = src
76}
77
78// CredentialsForHost returns a non-nil HostCredentials if the embedded source has
79// credentials available for the host, and a nil HostCredentials if it does not.
80func (d *Disco) CredentialsForHost(hostname svchost.Hostname) (auth.HostCredentials, error) {
81 if d.credsSrc == nil {
82 return nil, nil
83 }
84 return d.credsSrc.ForHost(hostname)
85}
86
87// ForceHostServices provides a pre-defined set of services for a given
88// host, which prevents the receiver from attempting network-based discovery
89// for the given host. Instead, the given services map will be returned
90// verbatim.
91//
92// When providing "forced" services, any relative URLs are resolved against
93// the initial discovery URL that would have been used for network-based
94// discovery, yielding the same results as if the given map were published
95// at the host's default discovery URL, though using absolute URLs is strongly
96// recommended to make the configured behavior more explicit.
97func (d *Disco) ForceHostServices(hostname svchost.Hostname, services map[string]interface{}) {
98 if services == nil {
99 services = map[string]interface{}{}
100 }
101
102 d.hostCache[hostname] = &Host{
103 discoURL: &url.URL{
104 Scheme: "https",
105 Host: string(hostname),
106 Path: discoPath,
107 },
108 hostname: hostname.ForDisplay(),
109 services: services,
110 transport: d.Transport,
111 }
112}
113
114// Discover runs the discovery protocol against the given hostname (which must
115// already have been validated and prepared with svchost.ForComparison) and
116// returns an object describing the services available at that host.
117//
118// If a given hostname supports no Terraform services at all, a non-nil but
119// empty Host object is returned. When giving feedback to the end user about
120// such situations, we say "host <name> does not provide a <service> service",
121// regardless of whether that is due to that service specifically being absent
122// or due to the host not providing Terraform services at all, since we don't
123// wish to expose the detail of whole-host discovery to an end-user.
124func (d *Disco) Discover(hostname svchost.Hostname) (*Host, error) {
125 if host, cached := d.hostCache[hostname]; cached {
126 return host, nil
127 }
128
129 host, err := d.discover(hostname)
130 if err != nil {
131 return nil, err
132 }
133 d.hostCache[hostname] = host
134
135 return host, nil
136}
137
138// DiscoverServiceURL is a convenience wrapper for discovery on a given
139// hostname and then looking up a particular service in the result.
140func (d *Disco) DiscoverServiceURL(hostname svchost.Hostname, serviceID string) (*url.URL, error) {
141 host, err := d.Discover(hostname)
142 if err != nil {
143 return nil, err
144 }
145 return host.ServiceURL(serviceID)
146}
147
148// discover implements the actual discovery process, with its result cached
149// by the public-facing Discover method.
150func (d *Disco) discover(hostname svchost.Hostname) (*Host, error) {
151 discoURL := &url.URL{
152 Scheme: "https",
153 Host: hostname.String(),
154 Path: discoPath,
155 }
156
157 client := &http.Client{
158 Transport: d.Transport,
159 Timeout: discoTimeout,
160
161 CheckRedirect: func(req *http.Request, via []*http.Request) error {
162 log.Printf("[DEBUG] Service discovery redirected to %s", req.URL)
163 if len(via) > maxRedirects {
164 return errors.New("too many redirects") // this error will never actually be seen
165 }
166 return nil
167 },
168 }
169
170 req := &http.Request{
171 Header: make(http.Header),
172 Method: "GET",
173 URL: discoURL,
174 }
175 req.Header.Set("Accept", "application/json")
176 req.Header.Set("User-Agent", httpclient.UserAgentString())
177
178 creds, err := d.CredentialsForHost(hostname)
179 if err != nil {
180 log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", hostname, err)
181 }
182 if creds != nil {
183 // Update the request to include credentials.
184 creds.PrepareRequest(req)
185 }
186
187 log.Printf("[DEBUG] Service discovery for %s at %s", hostname, discoURL)
188
189 resp, err := client.Do(req)
190 if err != nil {
191 return nil, fmt.Errorf("Failed to request discovery document: %v", err)
192 }
193 defer resp.Body.Close()
194
195 host := &Host{
196 // Use the discovery URL from resp.Request in
197 // case the client followed any redirects.
198 discoURL: resp.Request.URL,
199 hostname: hostname.ForDisplay(),
200 transport: d.Transport,
201 }
202
203 // Return the host without any services.
204 if resp.StatusCode == 404 {
205 return host, nil
206 }
207
208 if resp.StatusCode != 200 {
209 return nil, fmt.Errorf("Failed to request discovery document: %s", resp.Status)
210 }
211
212 contentType := resp.Header.Get("Content-Type")
213 mediaType, _, err := mime.ParseMediaType(contentType)
214 if err != nil {
215 return nil, fmt.Errorf("Discovery URL has a malformed Content-Type %q", contentType)
216 }
217 if mediaType != "application/json" {
218 return nil, fmt.Errorf("Discovery URL returned an unsupported Content-Type %q", mediaType)
219 }
220
221 // This doesn't catch chunked encoding, because ContentLength is -1 in that case.
222 if resp.ContentLength > maxDiscoDocBytes {
223 // Size limit here is not a contractual requirement and so we may
224 // adjust it over time if we find a different limit is warranted.
225 return nil, fmt.Errorf(
226 "Discovery doc response is too large (got %d bytes; limit %d)",
227 resp.ContentLength, maxDiscoDocBytes,
228 )
229 }
230
231 // If the response is using chunked encoding then we can't predict its
232 // size, but we'll at least prevent reading the entire thing into memory.
233 lr := io.LimitReader(resp.Body, maxDiscoDocBytes)
234
235 servicesBytes, err := ioutil.ReadAll(lr)
236 if err != nil {
237 return nil, fmt.Errorf("Error reading discovery document body: %v", err)
238 }
239
240 var services map[string]interface{}
241 err = json.Unmarshal(servicesBytes, &services)
242 if err != nil {
243 return nil, fmt.Errorf("Failed to decode discovery document as a JSON object: %v", err)
244 }
245 host.services = services
246
247 return host, nil
248}
249
250// Forget invalidates any cached record of the given hostname. If the host
251// has no cache entry then this is a no-op.
252func (d *Disco) Forget(hostname svchost.Hostname) {
253 delete(d.hostCache, hostname)
254}
255
256// ForgetAll is like Forget, but for all of the hostnames that have cache entries.
257func (d *Disco) ForgetAll() {
258 d.hostCache = make(map[svchost.Hostname]*Host)
259}
diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/host.go b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go
new file mode 100644
index 0000000..ab9514c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go
@@ -0,0 +1,264 @@
1package disco
2
3import (
4 "encoding/json"
5 "fmt"
6 "log"
7 "net/http"
8 "net/url"
9 "os"
10 "strconv"
11 "strings"
12 "time"
13
14 "github.com/hashicorp/go-version"
15 "github.com/hashicorp/terraform/httpclient"
16)
17
18const versionServiceID = "versions.v1"
19
20// Host represents a service discovered host.
21type Host struct {
22 discoURL *url.URL
23 hostname string
24 services map[string]interface{}
25 transport http.RoundTripper
26}
27
28// Constraints represents the version constraints of a service.
29type Constraints struct {
30 Service string `json:"service"`
31 Product string `json:"product"`
32 Minimum string `json:"minimum"`
33 Maximum string `json:"maximum"`
34 Excluding []string `json:"excluding"`
35}
36
37// ErrServiceNotProvided is returned when the service is not provided.
38type ErrServiceNotProvided struct {
39 hostname string
40 service string
41}
42
43// Error returns a customized error message.
44func (e *ErrServiceNotProvided) Error() string {
45 if e.hostname == "" {
46 return fmt.Sprintf("host does not provide a %s service", e.service)
47 }
48 return fmt.Sprintf("host %s does not provide a %s service", e.hostname, e.service)
49}
50
51// ErrVersionNotSupported is returned when the version is not supported.
52type ErrVersionNotSupported struct {
53 hostname string
54 service string
55 version string
56}
57
58// Error returns a customized error message.
59func (e *ErrVersionNotSupported) Error() string {
60 if e.hostname == "" {
61 return fmt.Sprintf("host does not support %s version %s", e.service, e.version)
62 }
63 return fmt.Sprintf("host %s does not support %s version %s", e.hostname, e.service, e.version)
64}
65
66// ErrNoVersionConstraints is returned when checkpoint was disabled
67// or the endpoint to query for version constraints was unavailable.
68type ErrNoVersionConstraints struct {
69 disabled bool
70}
71
72// Error returns a customized error message.
73func (e *ErrNoVersionConstraints) Error() string {
74 if e.disabled {
75 return "checkpoint disabled"
76 }
77 return "unable to contact versions service"
78}
79
80// ServiceURL returns the URL associated with the given service identifier,
81// which should be of the form "servicename.vN".
82//
83// A non-nil result is always an absolute URL with a scheme of either HTTPS
84// or HTTP.
85func (h *Host) ServiceURL(id string) (*url.URL, error) {
86 svc, ver, err := parseServiceID(id)
87 if err != nil {
88 return nil, err
89 }
90
91 // No services supported for an empty Host.
92 if h == nil || h.services == nil {
93 return nil, &ErrServiceNotProvided{service: svc}
94 }
95
96 urlStr, ok := h.services[id].(string)
97 if !ok {
98 // See if we have a matching service as that would indicate
99 // the service is supported, but not the requested version.
100 for serviceID := range h.services {
101 if strings.HasPrefix(serviceID, svc+".") {
102 return nil, &ErrVersionNotSupported{
103 hostname: h.hostname,
104 service: svc,
105 version: ver.Original(),
106 }
107 }
108 }
109
110 // No discovered services match the requested service.
111 return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc}
112 }
113
114 u, err := url.Parse(urlStr)
115 if err != nil {
116 return nil, fmt.Errorf("Failed to parse service URL: %v", err)
117 }
118
119 // Make relative URLs absolute using our discovery URL.
120 if !u.IsAbs() {
121 u = h.discoURL.ResolveReference(u)
122 }
123
124 if u.Scheme != "https" && u.Scheme != "http" {
125 return nil, fmt.Errorf("Service URL is using an unsupported scheme: %s", u.Scheme)
126 }
127 if u.User != nil {
128 return nil, fmt.Errorf("Embedded username/password information is not permitted")
129 }
130
131 // Fragment part is irrelevant, since we're not a browser.
132 u.Fragment = ""
133
134 return h.discoURL.ResolveReference(u), nil
135}
136
137// VersionConstraints returns the contraints for a given service identifier
138// (which should be of the form "servicename.vN") and product.
139//
140// When an exact (service and version) match is found, the constraints for
141// that service are returned.
142//
143// When the requested version is not provided but the service is, we will
144// search for all alternative versions. If mutliple alternative versions
145// are found, the contrains of the latest available version are returned.
146//
147// When a service is not provided at all an error will be returned instead.
148//
149// When checkpoint is disabled or when a 404 is returned after making the
150// HTTP call, an ErrNoVersionConstraints error will be returned.
151func (h *Host) VersionConstraints(id, product string) (*Constraints, error) {
152 svc, _, err := parseServiceID(id)
153 if err != nil {
154 return nil, err
155 }
156
157 // Return early if checkpoint is disabled.
158 if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" {
159 return nil, &ErrNoVersionConstraints{disabled: true}
160 }
161
162 // No services supported for an empty Host.
163 if h == nil || h.services == nil {
164 return nil, &ErrServiceNotProvided{service: svc}
165 }
166
167 // Try to get the service URL for the version service and
168 // return early if the service isn't provided by the host.
169 u, err := h.ServiceURL(versionServiceID)
170 if err != nil {
171 return nil, err
172 }
173
174 // Check if we have an exact (service and version) match.
175 if _, ok := h.services[id].(string); !ok {
176 // If we don't have an exact match, we search for all matching
177 // services and then use the service ID of the latest version.
178 var services []string
179 for serviceID := range h.services {
180 if strings.HasPrefix(serviceID, svc+".") {
181 services = append(services, serviceID)
182 }
183 }
184
185 if len(services) == 0 {
186 // No discovered services match the requested service.
187 return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc}
188 }
189
190 // Set id to the latest service ID we found.
191 var latest *version.Version
192 for _, serviceID := range services {
193 if _, ver, err := parseServiceID(serviceID); err == nil {
194 if latest == nil || latest.LessThan(ver) {
195 id = serviceID
196 latest = ver
197 }
198 }
199 }
200 }
201
202 // Set a default timeout of 1 sec for the versions request (in milliseconds)
203 timeout := 1000
204 if v, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil {
205 timeout = v
206 }
207
208 client := &http.Client{
209 Transport: h.transport,
210 Timeout: time.Duration(timeout) * time.Millisecond,
211 }
212
213 // Prepare the service URL by setting the service and product.
214 v := u.Query()
215 v.Set("product", product)
216 u.Path += id
217 u.RawQuery = v.Encode()
218
219 // Create a new request.
220 req, err := http.NewRequest("GET", u.String(), nil)
221 if err != nil {
222 return nil, fmt.Errorf("Failed to create version constraints request: %v", err)
223 }
224 req.Header.Set("Accept", "application/json")
225 req.Header.Set("User-Agent", httpclient.UserAgentString())
226
227 log.Printf("[DEBUG] Retrieve version constraints for service %s and product %s", id, product)
228
229 resp, err := client.Do(req)
230 if err != nil {
231 return nil, fmt.Errorf("Failed to request version constraints: %v", err)
232 }
233 defer resp.Body.Close()
234
235 if resp.StatusCode == 404 {
236 return nil, &ErrNoVersionConstraints{disabled: false}
237 }
238
239 if resp.StatusCode != 200 {
240 return nil, fmt.Errorf("Failed to request version constraints: %s", resp.Status)
241 }
242
243 // Parse the constraints from the response body.
244 result := &Constraints{}
245 if err := json.NewDecoder(resp.Body).Decode(result); err != nil {
246 return nil, fmt.Errorf("Error parsing version constraints: %v", err)
247 }
248
249 return result, nil
250}
251
252func parseServiceID(id string) (string, *version.Version, error) {
253 parts := strings.SplitN(id, ".", 2)
254 if len(parts) != 2 {
255 return "", nil, fmt.Errorf("Invalid service ID format (i.e. service.vN): %s", id)
256 }
257
258 version, err := version.NewVersion(parts[1])
259 if err != nil {
260 return "", nil, fmt.Errorf("Invalid service version: %v", err)
261 }
262
263 return parts[0], version, nil
264}
diff --git a/vendor/github.com/hashicorp/terraform/svchost/label_iter.go b/vendor/github.com/hashicorp/terraform/svchost/label_iter.go
new file mode 100644
index 0000000..af8ccba
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/svchost/label_iter.go
@@ -0,0 +1,69 @@
1package svchost
2
3import (
4 "strings"
5)
6
7// A labelIter allows iterating over domain name labels.
8//
9// This type is copied from golang.org/x/net/idna, where it is used
10// to segment hostnames into their separate labels for analysis. We use
11// it for the same purpose here, in ForComparison.
12type labelIter struct {
13 orig string
14 slice []string
15 curStart int
16 curEnd int
17 i int
18}
19
20func (l *labelIter) reset() {
21 l.curStart = 0
22 l.curEnd = 0
23 l.i = 0
24}
25
26func (l *labelIter) done() bool {
27 return l.curStart >= len(l.orig)
28}
29
30func (l *labelIter) result() string {
31 if l.slice != nil {
32 return strings.Join(l.slice, ".")
33 }
34 return l.orig
35}
36
37func (l *labelIter) label() string {
38 if l.slice != nil {
39 return l.slice[l.i]
40 }
41 p := strings.IndexByte(l.orig[l.curStart:], '.')
42 l.curEnd = l.curStart + p
43 if p == -1 {
44 l.curEnd = len(l.orig)
45 }
46 return l.orig[l.curStart:l.curEnd]
47}
48
49// next sets the value to the next label. It skips the last label if it is empty.
50func (l *labelIter) next() {
51 l.i++
52 if l.slice != nil {
53 if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
54 l.curStart = len(l.orig)
55 }
56 } else {
57 l.curStart = l.curEnd + 1
58 if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
59 l.curStart = len(l.orig)
60 }
61 }
62}
63
64func (l *labelIter) set(s string) {
65 if l.slice == nil {
66 l.slice = strings.Split(l.orig, ".")
67 }
68 l.slice[l.i] = s
69}
diff --git a/vendor/github.com/hashicorp/terraform/svchost/svchost.go b/vendor/github.com/hashicorp/terraform/svchost/svchost.go
new file mode 100644
index 0000000..4eded14
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/svchost/svchost.go
@@ -0,0 +1,207 @@
1// Package svchost deals with the representations of the so-called "friendly
2// hostnames" that we use to represent systems that provide Terraform-native
3// remote services, such as module registry, remote operations, etc.
4//
5// Friendly hostnames are specified such that, as much as possible, they
6// are consistent with how web browsers think of hostnames, so that users
7// can bring their intuitions about how hostnames behave when they access
8// a Terraform Enterprise instance's web UI (or indeed any other website)
9// and have this behave in a similar way.
10package svchost
11
12import (
13 "errors"
14 "fmt"
15 "strconv"
16 "strings"
17
18 "golang.org/x/net/idna"
19)
20
21// Hostname is specialized name for string that indicates that the string
22// has been converted to (or was already in) the storage and comparison form.
23//
24// Hostname values are not suitable for display in the user-interface. Use
25// the ForDisplay method to obtain a form suitable for display in the UI.
26//
27// Unlike user-supplied hostnames, strings of type Hostname (assuming they
28// were constructed by a function within this package) can be compared for
29// equality using the standard Go == operator.
30type Hostname string
31
32// acePrefix is the ASCII Compatible Encoding prefix, used to indicate that
33// a domain name label is in "punycode" form.
34const acePrefix = "xn--"
35
36// displayProfile is a very liberal idna profile that we use to do
37// normalization for display without imposing validation rules.
38var displayProfile = idna.New(
39 idna.MapForLookup(),
40 idna.Transitional(true),
41)
42
43// ForDisplay takes a user-specified hostname and returns a normalized form of
44// it suitable for display in the UI.
45//
46// If the input is so invalid that no normalization can be performed then
47// this will return the input, assuming that the caller still wants to
48// display _something_. This function is, however, more tolerant than the
49// other functions in this package and will make a best effort to prepare
50// _any_ given hostname for display.
51//
52// For validation, use either IsValid (for explicit validation) or
53// ForComparison (which implicitly validates, returning an error if invalid).
54func ForDisplay(given string) string {
55 var portPortion string
56 if colonPos := strings.Index(given, ":"); colonPos != -1 {
57 given, portPortion = given[:colonPos], given[colonPos:]
58 }
59 portPortion, _ = normalizePortPortion(portPortion)
60
61 ascii, err := displayProfile.ToASCII(given)
62 if err != nil {
63 return given + portPortion
64 }
65 display, err := displayProfile.ToUnicode(ascii)
66 if err != nil {
67 return given + portPortion
68 }
69 return display + portPortion
70}
71
72// IsValid returns true if the given user-specified hostname is a valid
73// service hostname.
74//
75// Validity is determined by complying with the RFC 5891 requirements for
76// names that are valid for domain lookup (section 5), with the additional
77// requirement that user-supplied forms must not _already_ contain
78// Punycode segments.
79func IsValid(given string) bool {
80 _, err := ForComparison(given)
81 return err == nil
82}
83
84// ForComparison takes a user-specified hostname and returns a normalized
85// form of it suitable for storage and comparison. The result is not suitable
86// for display to end-users because it uses Punycode to represent non-ASCII
87// characters, and this form is unreadable for non-ASCII-speaking humans.
88//
89// The result is typed as Hostname -- a specialized name for string -- so that
90// other APIs can make it clear within the type system whether they expect a
91// user-specified or display-form hostname or a value already normalized for
92// comparison.
93//
94// The returned Hostname is not valid if the returned error is non-nil.
95func ForComparison(given string) (Hostname, error) {
96 var portPortion string
97 if colonPos := strings.Index(given, ":"); colonPos != -1 {
98 given, portPortion = given[:colonPos], given[colonPos:]
99 }
100
101 var err error
102 portPortion, err = normalizePortPortion(portPortion)
103 if err != nil {
104 return Hostname(""), err
105 }
106
107 if given == "" {
108 return Hostname(""), fmt.Errorf("empty string is not a valid hostname")
109 }
110
111 // First we'll apply our additional constraint that Punycode must not
112 // be given directly by the user. This is not an IDN specification
113 // requirement, but we prohibit it to force users to use human-readable
114 // hostname forms within Terraform configuration.
115 labels := labelIter{orig: given}
116 for ; !labels.done(); labels.next() {
117 label := labels.label()
118 if label == "" {
119 return Hostname(""), fmt.Errorf(
120 "hostname contains empty label (two consecutive periods)",
121 )
122 }
123 if strings.HasPrefix(label, acePrefix) {
124 return Hostname(""), fmt.Errorf(
125 "hostname label %q specified in punycode format; service hostnames must be given in unicode",
126 label,
127 )
128 }
129 }
130
131 result, err := idna.Lookup.ToASCII(given)
132 if err != nil {
133 return Hostname(""), err
134 }
135 return Hostname(result + portPortion), nil
136}
137
138// ForDisplay returns a version of the receiver that is appropriate for display
139// in the UI. This includes converting any punycode labels to their
140// corresponding Unicode characters.
141//
142// A round-trip through ForComparison and this ForDisplay method does not
143// guarantee the same result as calling this package's top-level ForDisplay
144// function, since a round-trip through the Hostname type implies stricter
145// handling than we do when doing basic display-only processing.
146func (h Hostname) ForDisplay() string {
147 given := string(h)
148 var portPortion string
149 if colonPos := strings.Index(given, ":"); colonPos != -1 {
150 given, portPortion = given[:colonPos], given[colonPos:]
151 }
152 // We don't normalize the port portion here because we assume it's
153 // already been normalized on the way in.
154
155 result, err := idna.Lookup.ToUnicode(given)
156 if err != nil {
157 // Should never happen, since type Hostname indicates that a string
158 // passed through our validation rules.
159 panic(fmt.Errorf("ForDisplay called on invalid Hostname: %s", err))
160 }
161 return result + portPortion
162}
163
164func (h Hostname) String() string {
165 return string(h)
166}
167
168func (h Hostname) GoString() string {
169 return fmt.Sprintf("svchost.Hostname(%q)", string(h))
170}
171
172// normalizePortPortion attempts to normalize the "port portion" of a hostname,
173// which begins with the first colon in the hostname and should be followed
174// by a string of decimal digits.
175//
176// If the port portion is valid, a normalized version of it is returned along
177// with a nil error.
178//
179// If the port portion is invalid, the input string is returned verbatim along
180// with a non-nil error.
181//
182// An empty string is a valid port portion representing the absense of a port.
183// If non-empty, the first character must be a colon.
184func normalizePortPortion(s string) (string, error) {
185 if s == "" {
186 return s, nil
187 }
188
189 if s[0] != ':' {
190 // should never happen, since caller tends to guarantee the presence
191 // of a colon due to how it's extracted from the string.
192 return s, errors.New("port portion is missing its initial colon")
193 }
194
195 numStr := s[1:]
196 num, err := strconv.Atoi(numStr)
197 if err != nil {
198 return s, errors.New("port portion contains non-digit characters")
199 }
200 if num == 443 {
201 return "", nil // ":443" is the default
202 }
203 if num > 65535 {
204 return s, errors.New("port number is greater than 65535")
205 }
206 return fmt.Sprintf(":%d", num), nil
207}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go
index a814a85..f133cc2 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/context.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/context.go
@@ -8,11 +8,13 @@ import (
8 "strings" 8 "strings"
9 "sync" 9 "sync"
10 10
11 "github.com/hashicorp/terraform/tfdiags"
12
11 "github.com/hashicorp/go-multierror" 13 "github.com/hashicorp/go-multierror"
12 "github.com/hashicorp/hcl" 14 "github.com/hashicorp/hcl"
13 "github.com/hashicorp/terraform/config" 15 "github.com/hashicorp/terraform/config"
14 "github.com/hashicorp/terraform/config/module" 16 "github.com/hashicorp/terraform/config/module"
15 "github.com/hashicorp/terraform/helper/experiment" 17 "github.com/hashicorp/terraform/version"
16) 18)
17 19
18// InputMode defines what sort of input will be asked for when Input 20// InputMode defines what sort of input will be asked for when Input
@@ -123,7 +125,7 @@ type Context struct {
123func NewContext(opts *ContextOpts) (*Context, error) { 125func NewContext(opts *ContextOpts) (*Context, error) {
124 // Validate the version requirement if it is given 126 // Validate the version requirement if it is given
125 if opts.Module != nil { 127 if opts.Module != nil {
126 if err := checkRequiredVersion(opts.Module); err != nil { 128 if err := CheckRequiredVersion(opts.Module); err != nil {
127 return nil, err 129 return nil, err
128 } 130 }
129 } 131 }
@@ -143,19 +145,14 @@ func NewContext(opts *ContextOpts) (*Context, error) {
143 145
144 // If our state is from the future, then error. Callers can avoid 146 // If our state is from the future, then error. Callers can avoid
145 // this error by explicitly setting `StateFutureAllowed`. 147 // this error by explicitly setting `StateFutureAllowed`.
146 if !opts.StateFutureAllowed && state.FromFutureTerraform() { 148 if err := CheckStateVersion(state); err != nil && !opts.StateFutureAllowed {
147 return nil, fmt.Errorf( 149 return nil, err
148 "Terraform doesn't allow running any operations against a state\n"+
149 "that was written by a future Terraform version. The state is\n"+
150 "reporting it is written by Terraform '%s'.\n\n"+
151 "Please run at least that version of Terraform to continue.",
152 state.TFVersion)
153 } 150 }
154 151
155 // Explicitly reset our state version to our current version so that 152 // Explicitly reset our state version to our current version so that
156 // any operations we do will write out that our latest version 153 // any operations we do will write out that our latest version
157 // has run. 154 // has run.
158 state.TFVersion = Version 155 state.TFVersion = version.Version
159 156
160 // Determine parallelism, default to 10. We do this both to limit 157 // Determine parallelism, default to 10. We do this both to limit
161 // CPU pressure but also to have an extra guard against rate throttling 158 // CPU pressure but also to have an extra guard against rate throttling
@@ -465,7 +462,7 @@ func (c *Context) Input(mode InputMode) error {
465 } 462 }
466 463
467 // Do the walk 464 // Do the walk
468 if _, err := c.walk(graph, nil, walkInput); err != nil { 465 if _, err := c.walk(graph, walkInput); err != nil {
469 return err 466 return err
470 } 467 }
471 } 468 }
@@ -490,6 +487,13 @@ func (c *Context) Input(mode InputMode) error {
490func (c *Context) Apply() (*State, error) { 487func (c *Context) Apply() (*State, error) {
491 defer c.acquireRun("apply")() 488 defer c.acquireRun("apply")()
492 489
490 // Check there are no empty target parameter values
491 for _, target := range c.targets {
492 if target == "" {
493 return nil, fmt.Errorf("Target parameter must not have empty value")
494 }
495 }
496
493 // Copy our own state 497 // Copy our own state
494 c.state = c.state.DeepCopy() 498 c.state = c.state.DeepCopy()
495 499
@@ -506,7 +510,7 @@ func (c *Context) Apply() (*State, error) {
506 } 510 }
507 511
508 // Walk the graph 512 // Walk the graph
509 walker, err := c.walk(graph, graph, operation) 513 walker, err := c.walk(graph, operation)
510 if len(walker.ValidationErrors) > 0 { 514 if len(walker.ValidationErrors) > 0 {
511 err = multierror.Append(err, walker.ValidationErrors...) 515 err = multierror.Append(err, walker.ValidationErrors...)
512 } 516 }
@@ -527,19 +531,27 @@ func (c *Context) Apply() (*State, error) {
527func (c *Context) Plan() (*Plan, error) { 531func (c *Context) Plan() (*Plan, error) {
528 defer c.acquireRun("plan")() 532 defer c.acquireRun("plan")()
529 533
534 // Check there are no empty target parameter values
535 for _, target := range c.targets {
536 if target == "" {
537 return nil, fmt.Errorf("Target parameter must not have empty value")
538 }
539 }
540
530 p := &Plan{ 541 p := &Plan{
531 Module: c.module, 542 Module: c.module,
532 Vars: c.variables, 543 Vars: c.variables,
533 State: c.state, 544 State: c.state,
534 Targets: c.targets, 545 Targets: c.targets,
535 546
536 TerraformVersion: VersionString(), 547 TerraformVersion: version.String(),
537 ProviderSHA256s: c.providerSHA256s, 548 ProviderSHA256s: c.providerSHA256s,
538 } 549 }
539 550
540 var operation walkOperation 551 var operation walkOperation
541 if c.destroy { 552 if c.destroy {
542 operation = walkPlanDestroy 553 operation = walkPlanDestroy
554 p.Destroy = true
543 } else { 555 } else {
544 // Set our state to be something temporary. We do this so that 556 // Set our state to be something temporary. We do this so that
545 // the plan can update a fake state so that variables work, then 557 // the plan can update a fake state so that variables work, then
@@ -575,7 +587,7 @@ func (c *Context) Plan() (*Plan, error) {
575 } 587 }
576 588
577 // Do the walk 589 // Do the walk
578 walker, err := c.walk(graph, graph, operation) 590 walker, err := c.walk(graph, operation)
579 if err != nil { 591 if err != nil {
580 return nil, err 592 return nil, err
581 } 593 }
@@ -630,7 +642,7 @@ func (c *Context) Refresh() (*State, error) {
630 } 642 }
631 643
632 // Do the walk 644 // Do the walk
633 if _, err := c.walk(graph, graph, walkRefresh); err != nil { 645 if _, err := c.walk(graph, walkRefresh); err != nil {
634 return nil, err 646 return nil, err
635 } 647 }
636 648
@@ -670,29 +682,27 @@ func (c *Context) Stop() {
670} 682}
671 683
672// Validate validates the configuration and returns any warnings or errors. 684// Validate validates the configuration and returns any warnings or errors.
673func (c *Context) Validate() ([]string, []error) { 685func (c *Context) Validate() tfdiags.Diagnostics {
674 defer c.acquireRun("validate")() 686 defer c.acquireRun("validate")()
675 687
676 var errs error 688 var diags tfdiags.Diagnostics
677 689
678 // Validate the configuration itself 690 // Validate the configuration itself
679 if err := c.module.Validate(); err != nil { 691 diags = diags.Append(c.module.Validate())
680 errs = multierror.Append(errs, err)
681 }
682 692
683 // This only needs to be done for the root module, since inter-module 693 // This only needs to be done for the root module, since inter-module
684 // variables are validated in the module tree. 694 // variables are validated in the module tree.
685 if config := c.module.Config(); config != nil { 695 if config := c.module.Config(); config != nil {
686 // Validate the user variables 696 // Validate the user variables
687 if err := smcUserVariables(config, c.variables); len(err) > 0 { 697 for _, err := range smcUserVariables(config, c.variables) {
688 errs = multierror.Append(errs, err...) 698 diags = diags.Append(err)
689 } 699 }
690 } 700 }
691 701
692 // If we have errors at this point, the graphing has no chance, 702 // If we have errors at this point, the graphing has no chance,
693 // so just bail early. 703 // so just bail early.
694 if errs != nil { 704 if diags.HasErrors() {
695 return nil, []error{errs} 705 return diags
696 } 706 }
697 707
698 // Build the graph so we can walk it and run Validate on nodes. 708 // Build the graph so we can walk it and run Validate on nodes.
@@ -701,24 +711,29 @@ func (c *Context) Validate() ([]string, []error) {
701 // graph again later after Planning. 711 // graph again later after Planning.
702 graph, err := c.Graph(GraphTypeValidate, nil) 712 graph, err := c.Graph(GraphTypeValidate, nil)
703 if err != nil { 713 if err != nil {
704 return nil, []error{err} 714 diags = diags.Append(err)
715 return diags
705 } 716 }
706 717
707 // Walk 718 // Walk
708 walker, err := c.walk(graph, graph, walkValidate) 719 walker, err := c.walk(graph, walkValidate)
709 if err != nil { 720 if err != nil {
710 return nil, multierror.Append(errs, err).Errors 721 diags = diags.Append(err)
711 } 722 }
712 723
713 // Return the result
714 rerrs := multierror.Append(errs, walker.ValidationErrors...)
715
716 sort.Strings(walker.ValidationWarnings) 724 sort.Strings(walker.ValidationWarnings)
717 sort.Slice(rerrs.Errors, func(i, j int) bool { 725 sort.Slice(walker.ValidationErrors, func(i, j int) bool {
718 return rerrs.Errors[i].Error() < rerrs.Errors[j].Error() 726 return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error()
719 }) 727 })
720 728
721 return walker.ValidationWarnings, rerrs.Errors 729 for _, warn := range walker.ValidationWarnings {
730 diags = diags.Append(tfdiags.SimpleWarning(warn))
731 }
732 for _, err := range walker.ValidationErrors {
733 diags = diags.Append(err)
734 }
735
736 return diags
722} 737}
723 738
724// Module returns the module tree associated with this context. 739// Module returns the module tree associated with this context.
@@ -792,33 +807,11 @@ func (c *Context) releaseRun() {
792 c.runContext = nil 807 c.runContext = nil
793} 808}
794 809
795func (c *Context) walk( 810func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) {
796 graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) {
797 // Keep track of the "real" context which is the context that does 811 // Keep track of the "real" context which is the context that does
798 // the real work: talking to real providers, modifying real state, etc. 812 // the real work: talking to real providers, modifying real state, etc.
799 realCtx := c 813 realCtx := c
800 814
801 // If we don't want shadowing, remove it
802 if !experiment.Enabled(experiment.X_shadow) {
803 shadow = nil
804 }
805
806 // Just log this so we can see it in a debug log
807 if !c.shadow {
808 log.Printf("[WARN] terraform: shadow graph disabled")
809 shadow = nil
810 }
811
812 // If we have a shadow graph, walk that as well
813 var shadowCtx *Context
814 var shadowCloser Shadow
815 if shadow != nil {
816 // Build the shadow context. In the process, override the real context
817 // with the one that is wrapped so that the shadow context can verify
818 // the results of the real.
819 realCtx, shadowCtx, shadowCloser = newShadowContext(c)
820 }
821
822 log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) 815 log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
823 816
824 walker := &ContextGraphWalker{ 817 walker := &ContextGraphWalker{
@@ -837,90 +830,6 @@ func (c *Context) walk(
837 close(watchStop) 830 close(watchStop)
838 <-watchWait 831 <-watchWait
839 832
840 // If we have a shadow graph and we interrupted the real graph, then
841 // we just close the shadow and never verify it. It is non-trivial to
842 // recreate the exact execution state up until an interruption so this
843 // isn't supported with shadows at the moment.
844 if shadowCloser != nil && c.sh.Stopped() {
845 // Ignore the error result, there is nothing we could care about
846 shadowCloser.CloseShadow()
847
848 // Set it to nil so we don't do anything
849 shadowCloser = nil
850 }
851
852 // If we have a shadow graph, wait for that to complete.
853 if shadowCloser != nil {
854 // Build the graph walker for the shadow. We also wrap this in
855 // a panicwrap so that panics are captured. For the shadow graph,
856 // we just want panics to be normal errors rather than to crash
857 // Terraform.
858 shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{
859 Context: shadowCtx,
860 Operation: operation,
861 })
862
863 // Kick off the shadow walk. This will block on any operations
864 // on the real walk so it is fine to start first.
865 log.Printf("[INFO] Starting shadow graph walk: %s", operation.String())
866 shadowCh := make(chan error)
867 go func() {
868 shadowCh <- shadow.Walk(shadowWalker)
869 }()
870
871 // Notify the shadow that we're done
872 if err := shadowCloser.CloseShadow(); err != nil {
873 c.shadowErr = multierror.Append(c.shadowErr, err)
874 }
875
876 // Wait for the walk to end
877 log.Printf("[DEBUG] Waiting for shadow graph to complete...")
878 shadowWalkErr := <-shadowCh
879
880 // Get any shadow errors
881 if err := shadowCloser.ShadowError(); err != nil {
882 c.shadowErr = multierror.Append(c.shadowErr, err)
883 }
884
885 // Verify the contexts (compare)
886 if err := shadowContextVerify(realCtx, shadowCtx); err != nil {
887 c.shadowErr = multierror.Append(c.shadowErr, err)
888 }
889
890 // At this point, if we're supposed to fail on error, then
891 // we PANIC. Some tests just verify that there is an error,
892 // so simply appending it to realErr and returning could hide
893 // shadow problems.
894 //
895 // This must be done BEFORE appending shadowWalkErr since the
896 // shadowWalkErr may include expected errors.
897 //
898 // We only do this if we don't have a real error. In the case of
899 // a real error, we can't guarantee what nodes were and weren't
900 // traversed in parallel scenarios so we can't guarantee no
901 // shadow errors.
902 if c.shadowErr != nil && contextFailOnShadowError && realErr == nil {
903 panic(multierror.Prefix(c.shadowErr, "shadow graph:"))
904 }
905
906 // Now, if we have a walk error, we append that through
907 if shadowWalkErr != nil {
908 c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr)
909 }
910
911 if c.shadowErr == nil {
912 log.Printf("[INFO] Shadow graph success!")
913 } else {
914 log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr)
915
916 // If we're supposed to fail on shadow errors, then report it
917 if contextFailOnShadowError {
918 realErr = multierror.Append(realErr, multierror.Prefix(
919 c.shadowErr, "shadow graph:"))
920 }
921 }
922 }
923
924 return walker, realErr 833 return walker, realErr
925} 834}
926 835
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
index f1d5776..e940143 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/context_import.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
@@ -66,7 +66,7 @@ func (c *Context) Import(opts *ImportOpts) (*State, error) {
66 } 66 }
67 67
68 // Walk it 68 // Walk it
69 if _, err := c.walk(graph, nil, walkImport); err != nil { 69 if _, err := c.walk(graph, walkImport); err != nil {
70 return c.state, err 70 return c.state, err
71 } 71 }
72 72
diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go
index fd1687e..d6dc550 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/diff.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go
@@ -23,6 +23,12 @@ const (
23 DiffUpdate 23 DiffUpdate
24 DiffDestroy 24 DiffDestroy
25 DiffDestroyCreate 25 DiffDestroyCreate
26
27 // DiffRefresh is only used in the UI for displaying diffs.
28 // Managed resource reads never appear in plan, and when data source
29 // reads appear they are represented as DiffCreate in core before
30 // transforming to DiffRefresh in the UI layer.
31 DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion
26) 32)
27 33
28// multiVal matches the index key to a flatmapped set, list or map 34// multiVal matches the index key to a flatmapped set, list or map
@@ -831,7 +837,14 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) {
831 } 837 }
832 } 838 }
833 839
834 // TODO: check for the same value if not computed 840 // We don't compare the values because we can't currently actually
841 // guarantee to generate the same value two two diffs created from
842 // the same state+config: we have some pesky interpolation functions
843 // that do not behave as pure functions (uuid, timestamp) and so they
844 // can be different each time a diff is produced.
845 // FIXME: Re-organize our config handling so that we don't re-evaluate
846 // expressions when we produce a second comparison diff during
847 // apply (for EvalCompareDiff).
835 } 848 }
836 849
837 // Check for leftover attributes 850 // Check for leftover attributes
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go
index 3cb088a..10d9c22 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go
@@ -49,11 +49,11 @@ func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) {
49 path = strings.Join(ctx.Path(), ".") 49 path = strings.Join(ctx.Path(), ".")
50 } 50 }
51 51
52 log.Printf("[DEBUG] %s: eval: %T", path, n) 52 log.Printf("[TRACE] %s: eval: %T", path, n)
53 output, err := n.Eval(ctx) 53 output, err := n.Eval(ctx)
54 if err != nil { 54 if err != nil {
55 if _, ok := err.(EvalEarlyExitError); ok { 55 if _, ok := err.(EvalEarlyExitError); ok {
56 log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err) 56 log.Printf("[TRACE] %s: eval: %T, err: %s", path, n, err)
57 } else { 57 } else {
58 log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) 58 log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err)
59 } 59 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
index 2f6a497..b9b4806 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
@@ -112,7 +112,7 @@ func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
112 } 112 }
113 state.init() 113 state.init()
114 114
115 { 115 if resourceHasUserVisibleApply(n.Info) {
116 // Call post-apply hook 116 // Call post-apply hook
117 err := ctx.Hook(func(h Hook) (HookAction, error) { 117 err := ctx.Hook(func(h Hook) (HookAction, error) {
118 return h.PreApply(n.Info, state, diff) 118 return h.PreApply(n.Info, state, diff)
@@ -136,7 +136,7 @@ type EvalApplyPost struct {
136func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { 136func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
137 state := *n.State 137 state := *n.State
138 138
139 { 139 if resourceHasUserVisibleApply(n.Info) {
140 // Call post-apply hook 140 // Call post-apply hook
141 err := ctx.Hook(func(h Hook) (HookAction, error) { 141 err := ctx.Hook(func(h Hook) (HookAction, error) {
142 return h.PostApply(n.Info, state, *n.Error) 142 return h.PostApply(n.Info, state, *n.Error)
@@ -149,6 +149,22 @@ func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
149 return nil, *n.Error 149 return nil, *n.Error
150} 150}
151 151
152// resourceHasUserVisibleApply returns true if the given resource is one where
153// apply actions should be exposed to the user.
154//
155// Certain resources do apply actions only as an implementation detail, so
156// these should not be advertised to code outside of this package.
157func resourceHasUserVisibleApply(info *InstanceInfo) bool {
158 addr := info.ResourceAddress()
159
160 // Only managed resources have user-visible apply actions.
161 // In particular, this excludes data resources since we "apply" these
162 // only as an implementation detail of removing them from state when
163 // they are destroyed. (When reading, they don't get here at all because
164 // we present them as "Refresh" actions.)
165 return addr.Mode == config.ManagedResourceMode
166}
167
152// EvalApplyProvisioners is an EvalNode implementation that executes 168// EvalApplyProvisioners is an EvalNode implementation that executes
153// the provisioners for a resource. 169// the provisioners for a resource.
154// 170//
@@ -211,11 +227,8 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
211 state.Tainted = true 227 state.Tainted = true
212 } 228 }
213 229
214 if n.Error != nil { 230 *n.Error = multierror.Append(*n.Error, err)
215 *n.Error = multierror.Append(*n.Error, err) 231 return nil, err
216 } else {
217 return nil, err
218 }
219 } 232 }
220 233
221 { 234 {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
index a1f815b..86481de 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
@@ -22,11 +22,11 @@ type EvalContext interface {
22 // Input is the UIInput object for interacting with the UI. 22 // Input is the UIInput object for interacting with the UI.
23 Input() UIInput 23 Input() UIInput
24 24
25 // InitProvider initializes the provider with the given name and 25 // InitProvider initializes the provider with the given type and name, and
26 // returns the implementation of the resource provider or an error. 26 // returns the implementation of the resource provider or an error.
27 // 27 //
28 // It is an error to initialize the same provider more than once. 28 // It is an error to initialize the same provider more than once.
29 InitProvider(string) (ResourceProvider, error) 29 InitProvider(typ string, name string) (ResourceProvider, error)
30 30
31 // Provider gets the provider instance with the given name (already 31 // Provider gets the provider instance with the given name (already
32 // initialized) or returns nil if the provider isn't initialized. 32 // initialized) or returns nil if the provider isn't initialized.
@@ -40,8 +40,6 @@ type EvalContext interface {
40 // is used to store the provider configuration for inheritance lookups 40 // is used to store the provider configuration for inheritance lookups
41 // with ParentProviderConfig(). 41 // with ParentProviderConfig().
42 ConfigureProvider(string, *ResourceConfig) error 42 ConfigureProvider(string, *ResourceConfig) error
43 SetProviderConfig(string, *ResourceConfig) error
44 ParentProviderConfig(string) *ResourceConfig
45 43
46 // ProviderInput and SetProviderInput are used to configure providers 44 // ProviderInput and SetProviderInput are used to configure providers
47 // from user input. 45 // from user input.
@@ -69,6 +67,13 @@ type EvalContext interface {
69 // that is currently being acted upon. 67 // that is currently being acted upon.
70 Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error) 68 Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error)
71 69
70 // InterpolateProvider takes a ProviderConfig and interpolates it with the
71 // stored interpolation scope. Since provider configurations can be
72 // inherited, the interpolation scope may be different from the current
73 // context path. Interplation is otherwise executed the same as in the
74 // Interpolation method.
75 InterpolateProvider(*config.ProviderConfig, *Resource) (*ResourceConfig, error)
76
72 // SetVariables sets the variables for the module within 77 // SetVariables sets the variables for the module within
73 // this context with the name n. This function call is additive: 78 // this context with the name n. This function call is additive:
74 // the second parameter is merged with any previous call. 79 // the second parameter is merged with any previous call.
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
index 3dcfb22..1b6ee5a 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
@@ -4,7 +4,6 @@ import (
4 "context" 4 "context"
5 "fmt" 5 "fmt"
6 "log" 6 "log"
7 "strings"
8 "sync" 7 "sync"
9 8
10 "github.com/hashicorp/terraform/config" 9 "github.com/hashicorp/terraform/config"
@@ -34,7 +33,6 @@ type BuiltinEvalContext struct {
34 Hooks []Hook 33 Hooks []Hook
35 InputValue UIInput 34 InputValue UIInput
36 ProviderCache map[string]ResourceProvider 35 ProviderCache map[string]ResourceProvider
37 ProviderConfigCache map[string]*ResourceConfig
38 ProviderInputConfig map[string]map[string]interface{} 36 ProviderInputConfig map[string]map[string]interface{}
39 ProviderLock *sync.Mutex 37 ProviderLock *sync.Mutex
40 ProvisionerCache map[string]ResourceProvisioner 38 ProvisionerCache map[string]ResourceProvisioner
@@ -80,12 +78,12 @@ func (ctx *BuiltinEvalContext) Input() UIInput {
80 return ctx.InputValue 78 return ctx.InputValue
81} 79}
82 80
83func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) { 81func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProvider, error) {
84 ctx.once.Do(ctx.init) 82 ctx.once.Do(ctx.init)
85 83
86 // If we already initialized, it is an error 84 // If we already initialized, it is an error
87 if p := ctx.Provider(n); p != nil { 85 if p := ctx.Provider(name); p != nil {
88 return nil, fmt.Errorf("Provider '%s' already initialized", n) 86 return nil, fmt.Errorf("Provider '%s' already initialized", name)
89 } 87 }
90 88
91 // Warning: make sure to acquire these locks AFTER the call to Provider 89 // Warning: make sure to acquire these locks AFTER the call to Provider
@@ -93,18 +91,12 @@ func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error)
93 ctx.ProviderLock.Lock() 91 ctx.ProviderLock.Lock()
94 defer ctx.ProviderLock.Unlock() 92 defer ctx.ProviderLock.Unlock()
95 93
96 providerPath := make([]string, len(ctx.Path())+1) 94 p, err := ctx.Components.ResourceProvider(typeName, name)
97 copy(providerPath, ctx.Path())
98 providerPath[len(providerPath)-1] = n
99 key := PathCacheKey(providerPath)
100
101 typeName := strings.SplitN(n, ".", 2)[0]
102 p, err := ctx.Components.ResourceProvider(typeName, key)
103 if err != nil { 95 if err != nil {
104 return nil, err 96 return nil, err
105 } 97 }
106 98
107 ctx.ProviderCache[key] = p 99 ctx.ProviderCache[name] = p
108 return p, nil 100 return p, nil
109} 101}
110 102
@@ -114,11 +106,7 @@ func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider {
114 ctx.ProviderLock.Lock() 106 ctx.ProviderLock.Lock()
115 defer ctx.ProviderLock.Unlock() 107 defer ctx.ProviderLock.Unlock()
116 108
117 providerPath := make([]string, len(ctx.Path())+1) 109 return ctx.ProviderCache[n]
118 copy(providerPath, ctx.Path())
119 providerPath[len(providerPath)-1] = n
120
121 return ctx.ProviderCache[PathCacheKey(providerPath)]
122} 110}
123 111
124func (ctx *BuiltinEvalContext) CloseProvider(n string) error { 112func (ctx *BuiltinEvalContext) CloseProvider(n string) error {
@@ -127,15 +115,11 @@ func (ctx *BuiltinEvalContext) CloseProvider(n string) error {
127 ctx.ProviderLock.Lock() 115 ctx.ProviderLock.Lock()
128 defer ctx.ProviderLock.Unlock() 116 defer ctx.ProviderLock.Unlock()
129 117
130 providerPath := make([]string, len(ctx.Path())+1)
131 copy(providerPath, ctx.Path())
132 providerPath[len(providerPath)-1] = n
133
134 var provider interface{} 118 var provider interface{}
135 provider = ctx.ProviderCache[PathCacheKey(providerPath)] 119 provider = ctx.ProviderCache[n]
136 if provider != nil { 120 if provider != nil {
137 if p, ok := provider.(ResourceProviderCloser); ok { 121 if p, ok := provider.(ResourceProviderCloser); ok {
138 delete(ctx.ProviderCache, PathCacheKey(providerPath)) 122 delete(ctx.ProviderCache, n)
139 return p.Close() 123 return p.Close()
140 } 124 }
141 } 125 }
@@ -149,28 +133,9 @@ func (ctx *BuiltinEvalContext) ConfigureProvider(
149 if p == nil { 133 if p == nil {
150 return fmt.Errorf("Provider '%s' not initialized", n) 134 return fmt.Errorf("Provider '%s' not initialized", n)
151 } 135 }
152
153 if err := ctx.SetProviderConfig(n, cfg); err != nil {
154 return nil
155 }
156
157 return p.Configure(cfg) 136 return p.Configure(cfg)
158} 137}
159 138
160func (ctx *BuiltinEvalContext) SetProviderConfig(
161 n string, cfg *ResourceConfig) error {
162 providerPath := make([]string, len(ctx.Path())+1)
163 copy(providerPath, ctx.Path())
164 providerPath[len(providerPath)-1] = n
165
166 // Save the configuration
167 ctx.ProviderLock.Lock()
168 ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg
169 ctx.ProviderLock.Unlock()
170
171 return nil
172}
173
174func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} { 139func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} {
175 ctx.ProviderLock.Lock() 140 ctx.ProviderLock.Lock()
176 defer ctx.ProviderLock.Unlock() 141 defer ctx.ProviderLock.Unlock()
@@ -203,27 +168,6 @@ func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface
203 ctx.ProviderLock.Unlock() 168 ctx.ProviderLock.Unlock()
204} 169}
205 170
206func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig {
207 ctx.ProviderLock.Lock()
208 defer ctx.ProviderLock.Unlock()
209
210 // Make a copy of the path so we can safely edit it
211 path := ctx.Path()
212 pathCopy := make([]string, len(path)+1)
213 copy(pathCopy, path)
214
215 // Go up the tree.
216 for i := len(path) - 1; i >= 0; i-- {
217 pathCopy[i+1] = n
218 k := PathCacheKey(pathCopy[:i+2])
219 if v, ok := ctx.ProviderConfigCache[k]; ok {
220 return v
221 }
222 }
223
224 return nil
225}
226
227func (ctx *BuiltinEvalContext) InitProvisioner( 171func (ctx *BuiltinEvalContext) InitProvisioner(
228 n string) (ResourceProvisioner, error) { 172 n string) (ResourceProvisioner, error) {
229 ctx.once.Do(ctx.init) 173 ctx.once.Do(ctx.init)
@@ -289,6 +233,7 @@ func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
289 233
290func (ctx *BuiltinEvalContext) Interpolate( 234func (ctx *BuiltinEvalContext) Interpolate(
291 cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) { 235 cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) {
236
292 if cfg != nil { 237 if cfg != nil {
293 scope := &InterpolationScope{ 238 scope := &InterpolationScope{
294 Path: ctx.Path(), 239 Path: ctx.Path(),
@@ -311,6 +256,35 @@ func (ctx *BuiltinEvalContext) Interpolate(
311 return result, nil 256 return result, nil
312} 257}
313 258
259func (ctx *BuiltinEvalContext) InterpolateProvider(
260 pc *config.ProviderConfig, r *Resource) (*ResourceConfig, error) {
261
262 var cfg *config.RawConfig
263
264 if pc != nil && pc.RawConfig != nil {
265 scope := &InterpolationScope{
266 Path: ctx.Path(),
267 Resource: r,
268 }
269
270 cfg = pc.RawConfig
271
272 vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
273 if err != nil {
274 return nil, err
275 }
276
277 // Do the interpolation
278 if err := cfg.Interpolate(vs); err != nil {
279 return nil, err
280 }
281 }
282
283 result := NewResourceConfig(cfg)
284 result.interpolateForce()
285 return result, nil
286}
287
314func (ctx *BuiltinEvalContext) Path() []string { 288func (ctx *BuiltinEvalContext) Path() []string {
315 return ctx.PathValue 289 return ctx.PathValue
316} 290}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
index 4f90d5b..6464517 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
@@ -45,14 +45,6 @@ type MockEvalContext struct {
45 ConfigureProviderConfig *ResourceConfig 45 ConfigureProviderConfig *ResourceConfig
46 ConfigureProviderError error 46 ConfigureProviderError error
47 47
48 SetProviderConfigCalled bool
49 SetProviderConfigName string
50 SetProviderConfigConfig *ResourceConfig
51
52 ParentProviderConfigCalled bool
53 ParentProviderConfigName string
54 ParentProviderConfigConfig *ResourceConfig
55
56 InitProvisionerCalled bool 48 InitProvisionerCalled bool
57 InitProvisionerName string 49 InitProvisionerName string
58 InitProvisionerProvisioner ResourceProvisioner 50 InitProvisionerProvisioner ResourceProvisioner
@@ -72,6 +64,12 @@ type MockEvalContext struct {
72 InterpolateConfigResult *ResourceConfig 64 InterpolateConfigResult *ResourceConfig
73 InterpolateError error 65 InterpolateError error
74 66
67 InterpolateProviderCalled bool
68 InterpolateProviderConfig *config.ProviderConfig
69 InterpolateProviderResource *Resource
70 InterpolateProviderConfigResult *ResourceConfig
71 InterpolateProviderError error
72
75 PathCalled bool 73 PathCalled bool
76 PathPath []string 74 PathPath []string
77 75
@@ -109,7 +107,7 @@ func (c *MockEvalContext) Input() UIInput {
109 return c.InputInput 107 return c.InputInput
110} 108}
111 109
112func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) { 110func (c *MockEvalContext) InitProvider(t, n string) (ResourceProvider, error) {
113 c.InitProviderCalled = true 111 c.InitProviderCalled = true
114 c.InitProviderName = n 112 c.InitProviderName = n
115 return c.InitProviderProvider, c.InitProviderError 113 return c.InitProviderProvider, c.InitProviderError
@@ -134,20 +132,6 @@ func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error
134 return c.ConfigureProviderError 132 return c.ConfigureProviderError
135} 133}
136 134
137func (c *MockEvalContext) SetProviderConfig(
138 n string, cfg *ResourceConfig) error {
139 c.SetProviderConfigCalled = true
140 c.SetProviderConfigName = n
141 c.SetProviderConfigConfig = cfg
142 return nil
143}
144
145func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig {
146 c.ParentProviderConfigCalled = true
147 c.ParentProviderConfigName = n
148 return c.ParentProviderConfigConfig
149}
150
151func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} { 135func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} {
152 c.ProviderInputCalled = true 136 c.ProviderInputCalled = true
153 c.ProviderInputName = n 137 c.ProviderInputName = n
@@ -186,6 +170,14 @@ func (c *MockEvalContext) Interpolate(
186 return c.InterpolateConfigResult, c.InterpolateError 170 return c.InterpolateConfigResult, c.InterpolateError
187} 171}
188 172
173func (c *MockEvalContext) InterpolateProvider(
174 config *config.ProviderConfig, resource *Resource) (*ResourceConfig, error) {
175 c.InterpolateProviderCalled = true
176 c.InterpolateProviderConfig = config
177 c.InterpolateProviderResource = resource
178 return c.InterpolateProviderConfigResult, c.InterpolateError
179}
180
189func (c *MockEvalContext) Path() []string { 181func (c *MockEvalContext) Path() []string {
190 c.PathCalled = true 182 c.PathCalled = true
191 return c.PathPath 183 return c.PathPath
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
index c35f908..26205ce 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
@@ -6,6 +6,7 @@ import (
6 "strings" 6 "strings"
7 7
8 "github.com/hashicorp/terraform/config" 8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/version"
9) 10)
10 11
11// EvalCompareDiff is an EvalNode implementation that compares two diffs 12// EvalCompareDiff is an EvalNode implementation that compares two diffs
@@ -60,7 +61,7 @@ func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {
60 "\n"+ 61 "\n"+
61 "Also include as much context as you can about your config, state, "+ 62 "Also include as much context as you can about your config, state, "+
62 "and the steps you performed to trigger this error.\n", 63 "and the steps you performed to trigger this error.\n",
63 n.Info.Id, Version, n.Info.Id, reason, one, two) 64 n.Info.Id, version.Version, n.Info.Id, reason, one, two)
64 } 65 }
65 66
66 return nil, nil 67 return nil, nil
@@ -255,11 +256,15 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
255 containers := groupContainers(diff) 256 containers := groupContainers(diff)
256 keep := map[string]bool{} 257 keep := map[string]bool{}
257 for _, v := range containers { 258 for _, v := range containers {
258 if v.keepDiff() { 259 if v.keepDiff(ignorableAttrKeys) {
259 // At least one key has changes, so list all the sibling keys 260 // At least one key has changes, so list all the sibling keys
260 // to keep in the diff. 261 // to keep in the diff
261 for k := range v { 262 for k := range v {
262 keep[k] = true 263 keep[k] = true
264 // this key may have been added by the user to ignore, but
265 // if it's a subkey in a container, we need to un-ignore it
266 // to keep the complete containter.
267 delete(ignorableAttrKeys, k)
263 } 268 }
264 } 269 }
265 } 270 }
@@ -291,10 +296,17 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
291// a group of key-*ResourceAttrDiff pairs from the same flatmapped container 296// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
292type flatAttrDiff map[string]*ResourceAttrDiff 297type flatAttrDiff map[string]*ResourceAttrDiff
293 298
294// we need to keep all keys if any of them have a diff 299// we need to keep all keys if any of them have a diff that's not ignored
295func (f flatAttrDiff) keepDiff() bool { 300func (f flatAttrDiff) keepDiff(ignoreChanges map[string]bool) bool {
296 for _, v := range f { 301 for k, v := range f {
297 if !v.Empty() && !v.NewComputed { 302 ignore := false
303 for attr := range ignoreChanges {
304 if strings.HasPrefix(k, attr) {
305 ignore = true
306 }
307 }
308
309 if !v.Empty() && !v.NewComputed && !ignore {
298 return true 310 return true
299 } 311 }
300 } 312 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
index 6825ff5..6a78a6b 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
@@ -1,18 +1,50 @@
1package terraform 1package terraform
2 2
3import "github.com/hashicorp/terraform/config" 3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7)
4 8
5// EvalInterpolate is an EvalNode implementation that takes a raw 9// EvalInterpolate is an EvalNode implementation that takes a raw
6// configuration and interpolates it. 10// configuration and interpolates it.
7type EvalInterpolate struct { 11type EvalInterpolate struct {
8 Config *config.RawConfig 12 Config *config.RawConfig
9 Resource *Resource 13 Resource *Resource
10 Output **ResourceConfig 14 Output **ResourceConfig
15 ContinueOnErr bool
11} 16}
12 17
13func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) { 18func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) {
14 rc, err := ctx.Interpolate(n.Config, n.Resource) 19 rc, err := ctx.Interpolate(n.Config, n.Resource)
15 if err != nil { 20 if err != nil {
21 if n.ContinueOnErr {
22 log.Printf("[WARN] Interpolation %q failed: %s", n.Config.Key, err)
23 return nil, EvalEarlyExitError{}
24 }
25 return nil, err
26 }
27
28 if n.Output != nil {
29 *n.Output = rc
30 }
31
32 return nil, nil
33}
34
35// EvalInterpolateProvider is an EvalNode implementation that takes a
36// ProviderConfig and interpolates it. Provider configurations are the only
37// "inherited" type of configuration we have, and the original raw config may
38// have a different interpolation scope.
39type EvalInterpolateProvider struct {
40 Config *config.ProviderConfig
41 Resource *Resource
42 Output **ResourceConfig
43}
44
45func (n *EvalInterpolateProvider) Eval(ctx EvalContext) (interface{}, error) {
46 rc, err := ctx.InterpolateProvider(n.Config, n.Resource)
47 if err != nil {
16 return nil, err 48 return nil, err
17 } 49 }
18 50
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go
new file mode 100644
index 0000000..a4b2a50
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go
@@ -0,0 +1,86 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalLocal is an EvalNode implementation that evaluates the
10// expression for a local value and writes it into a transient part of
11// the state.
12type EvalLocal struct {
13 Name string
14 Value *config.RawConfig
15}
16
17func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) {
18 cfg, err := ctx.Interpolate(n.Value, nil)
19 if err != nil {
20 return nil, fmt.Errorf("local.%s: %s", n.Name, err)
21 }
22
23 state, lock := ctx.State()
24 if state == nil {
25 return nil, fmt.Errorf("cannot write local value to nil state")
26 }
27
28 // Get a write lock so we can access the state
29 lock.Lock()
30 defer lock.Unlock()
31
32 // Look for the module state. If we don't have one, create it.
33 mod := state.ModuleByPath(ctx.Path())
34 if mod == nil {
35 mod = state.AddModule(ctx.Path())
36 }
37
38 // Get the value from the config
39 var valueRaw interface{} = config.UnknownVariableValue
40 if cfg != nil {
41 var ok bool
42 valueRaw, ok = cfg.Get("value")
43 if !ok {
44 valueRaw = ""
45 }
46 if cfg.IsComputed("value") {
47 valueRaw = config.UnknownVariableValue
48 }
49 }
50
51 if mod.Locals == nil {
52 // initialize
53 mod.Locals = map[string]interface{}{}
54 }
55 mod.Locals[n.Name] = valueRaw
56
57 return nil, nil
58}
59
60// EvalDeleteLocal is an EvalNode implementation that deletes a Local value
61// from the state. Locals aren't persisted, but we don't need to evaluate them
62// during destroy.
63type EvalDeleteLocal struct {
64 Name string
65}
66
67func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) {
68 state, lock := ctx.State()
69 if state == nil {
70 return nil, nil
71 }
72
73 // Get a write lock so we can access this instance
74 lock.Lock()
75 defer lock.Unlock()
76
77 // Look for the module state. If we don't have one, create it.
78 mod := state.ModuleByPath(ctx.Path())
79 if mod == nil {
80 return nil, nil
81 }
82
83 delete(mod.Locals, n.Name)
84
85 return nil, nil
86}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
index cf61781..a834627 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
@@ -41,15 +41,16 @@ type EvalWriteOutput struct {
41 Name string 41 Name string
42 Sensitive bool 42 Sensitive bool
43 Value *config.RawConfig 43 Value *config.RawConfig
44 // ContinueOnErr allows interpolation to fail during Input
45 ContinueOnErr bool
44} 46}
45 47
46// TODO: test 48// TODO: test
47func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { 49func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
50 // This has to run before we have a state lock, since interpolation also
51 // reads the state
48 cfg, err := ctx.Interpolate(n.Value, nil) 52 cfg, err := ctx.Interpolate(n.Value, nil)
49 if err != nil { 53 // handle the error after we have the module from the state
50 // Log error but continue anyway
51 log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err)
52 }
53 54
54 state, lock := ctx.State() 55 state, lock := ctx.State()
55 if state == nil { 56 if state == nil {
@@ -59,13 +60,27 @@ func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
59 // Get a write lock so we can access this instance 60 // Get a write lock so we can access this instance
60 lock.Lock() 61 lock.Lock()
61 defer lock.Unlock() 62 defer lock.Unlock()
62
63 // Look for the module state. If we don't have one, create it. 63 // Look for the module state. If we don't have one, create it.
64 mod := state.ModuleByPath(ctx.Path()) 64 mod := state.ModuleByPath(ctx.Path())
65 if mod == nil { 65 if mod == nil {
66 mod = state.AddModule(ctx.Path()) 66 mod = state.AddModule(ctx.Path())
67 } 67 }
68 68
69 // handling the interpolation error
70 if err != nil {
71 if n.ContinueOnErr || flagWarnOutputErrors {
72 log.Printf("[ERROR] Output interpolation %q failed: %s", n.Name, err)
73 // if we're continuing, make sure the output is included, and
74 // marked as unknown
75 mod.Outputs[n.Name] = &OutputState{
76 Type: "string",
77 Value: config.UnknownVariableValue,
78 }
79 return nil, EvalEarlyExitError{}
80 }
81 return nil, err
82 }
83
69 // Get the value from the config 84 // Get the value from the config
70 var valueRaw interface{} = config.UnknownVariableValue 85 var valueRaw interface{} = config.UnknownVariableValue
71 if cfg != nil { 86 if cfg != nil {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
index 092fd18..61f6ff9 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
@@ -6,17 +6,6 @@ import (
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/config"
7) 7)
8 8
9// EvalSetProviderConfig sets the parent configuration for a provider
10// without configuring that provider, validating it, etc.
11type EvalSetProviderConfig struct {
12 Provider string
13 Config **ResourceConfig
14}
15
16func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
17 return nil, ctx.SetProviderConfig(n.Provider, *n.Config)
18}
19
20// EvalBuildProviderConfig outputs a *ResourceConfig that is properly 9// EvalBuildProviderConfig outputs a *ResourceConfig that is properly
21// merged with parents and inputs on top of what is configured in the file. 10// merged with parents and inputs on top of what is configured in the file.
22type EvalBuildProviderConfig struct { 11type EvalBuildProviderConfig struct {
@@ -28,7 +17,7 @@ type EvalBuildProviderConfig struct {
28func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) { 17func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
29 cfg := *n.Config 18 cfg := *n.Config
30 19
31 // If we have a configuration set, then merge that in 20 // If we have an Input configuration set, then merge that in
32 if input := ctx.ProviderInput(n.Provider); input != nil { 21 if input := ctx.ProviderInput(n.Provider); input != nil {
33 // "input" is a map of the subset of config values that were known 22 // "input" is a map of the subset of config values that were known
34 // during the input walk, set by EvalInputProvider. Note that 23 // during the input walk, set by EvalInputProvider. Note that
@@ -40,13 +29,7 @@ func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
40 return nil, err 29 return nil, err
41 } 30 }
42 31
43 merged := cfg.raw.Merge(rc) 32 merged := rc.Merge(cfg.raw)
44 cfg = NewResourceConfig(merged)
45 }
46
47 // Get the parent configuration if there is one
48 if parent := ctx.ParentProviderConfig(n.Provider); parent != nil {
49 merged := cfg.raw.Merge(parent.raw)
50 cfg = NewResourceConfig(merged) 33 cfg = NewResourceConfig(merged)
51 } 34 }
52 35
@@ -69,11 +52,12 @@ func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
69// and returns nothing. The provider can be retrieved again with the 52// and returns nothing. The provider can be retrieved again with the
70// EvalGetProvider node. 53// EvalGetProvider node.
71type EvalInitProvider struct { 54type EvalInitProvider struct {
72 Name string 55 TypeName string
56 Name string
73} 57}
74 58
75func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { 59func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) {
76 return ctx.InitProvider(n.Name) 60 return ctx.InitProvider(n.TypeName, n.Name)
77} 61}
78 62
79// EvalCloseProvider is an EvalNode implementation that closes provider 63// EvalCloseProvider is an EvalNode implementation that closes provider
@@ -116,12 +100,8 @@ type EvalInputProvider struct {
116} 100}
117 101
118func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) { 102func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
119 // If we already configured this provider, then don't do this again
120 if v := ctx.ProviderInput(n.Name); v != nil {
121 return nil, nil
122 }
123
124 rc := *n.Config 103 rc := *n.Config
104 orig := rc.DeepCopy()
125 105
126 // Wrap the input into a namespace 106 // Wrap the input into a namespace
127 input := &PrefixUIInput{ 107 input := &PrefixUIInput{
@@ -138,27 +118,20 @@ func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
138 "Error configuring %s: %s", n.Name, err) 118 "Error configuring %s: %s", n.Name, err)
139 } 119 }
140 120
141 // Set the input that we received so that child modules don't attempt 121 // We only store values that have changed through Input.
142 // to ask for input again. 122 // The goal is to cache cache input responses, not to provide a complete
123 // config for other providers.
124 confMap := make(map[string]interface{})
143 if config != nil && len(config.Config) > 0 { 125 if config != nil && len(config.Config) > 0 {
144 // This repository of provider input results on the context doesn't 126 // any values that weren't in the original ResourcConfig will be cached
145 // retain config.ComputedKeys, so we need to filter those out here 127 for k, v := range config.Config {
146 // in order that later users of this data won't try to use the unknown 128 if _, ok := orig.Config[k]; !ok {
147 // value placeholder as if it were a literal value. This map is just 129 confMap[k] = v
148 // of known values we've been able to complete so far; dynamic stuff
149 // will be merged in by EvalBuildProviderConfig on subsequent
150 // (post-input) walks.
151 confMap := config.Config
152 if config.ComputedKeys != nil {
153 for _, key := range config.ComputedKeys {
154 delete(confMap, key)
155 } 130 }
156 } 131 }
157
158 ctx.SetProviderInput(n.Name, confMap)
159 } else {
160 ctx.SetProviderInput(n.Name, map[string]interface{}{})
161 } 132 }
162 133
134 ctx.SetProviderInput(n.Name, confMap)
135
163 return nil, nil 136 return nil, nil
164} 137}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
index 126a0e6..1182690 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
@@ -1,6 +1,8 @@
1package terraform 1package terraform
2 2
3import "fmt" 3import (
4 "fmt"
5)
4 6
5// EvalReadState is an EvalNode implementation that reads the 7// EvalReadState is an EvalNode implementation that reads the
6// primary InstanceState for a specific resource out of the state. 8// primary InstanceState for a specific resource out of the state.
@@ -212,37 +214,6 @@ func writeInstanceToState(
212 return nil, nil 214 return nil, nil
213} 215}
214 216
215// EvalClearPrimaryState is an EvalNode implementation that clears the primary
216// instance from a resource state.
217type EvalClearPrimaryState struct {
218 Name string
219}
220
221func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) {
222 state, lock := ctx.State()
223
224 // Get a read lock so we can access this instance
225 lock.RLock()
226 defer lock.RUnlock()
227
228 // Look for the module state. If we don't have one, then it doesn't matter.
229 mod := state.ModuleByPath(ctx.Path())
230 if mod == nil {
231 return nil, nil
232 }
233
234 // Look for the resource state. If we don't have one, then it is okay.
235 rs := mod.Resources[n.Name]
236 if rs == nil {
237 return nil, nil
238 }
239
240 // Clear primary from the resource state
241 rs.Primary = nil
242
243 return nil, nil
244}
245
246// EvalDeposeState is an EvalNode implementation that takes the primary 217// EvalDeposeState is an EvalNode implementation that takes the primary
247// out of a state and makes it Deposed. This is done at the beginning of 218// out of a state and makes it Deposed. This is done at the beginning of
248// create-before-destroy calls so that the create can create while preserving 219// create-before-destroy calls so that the create can create while preserving
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
index 478aa64..3e5a84c 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
@@ -144,16 +144,20 @@ func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig)
144 144
145 // For type=ssh only (enforced in ssh communicator) 145 // For type=ssh only (enforced in ssh communicator)
146 PrivateKey interface{} `mapstructure:"private_key"` 146 PrivateKey interface{} `mapstructure:"private_key"`
147 HostKey interface{} `mapstructure:"host_key"`
147 Agent interface{} `mapstructure:"agent"` 148 Agent interface{} `mapstructure:"agent"`
148 BastionHost interface{} `mapstructure:"bastion_host"` 149 BastionHost interface{} `mapstructure:"bastion_host"`
150 BastionHostKey interface{} `mapstructure:"bastion_host_key"`
149 BastionPort interface{} `mapstructure:"bastion_port"` 151 BastionPort interface{} `mapstructure:"bastion_port"`
150 BastionUser interface{} `mapstructure:"bastion_user"` 152 BastionUser interface{} `mapstructure:"bastion_user"`
151 BastionPassword interface{} `mapstructure:"bastion_password"` 153 BastionPassword interface{} `mapstructure:"bastion_password"`
152 BastionPrivateKey interface{} `mapstructure:"bastion_private_key"` 154 BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
155 AgentIdentity interface{} `mapstructure:"agent_identity"`
153 156
154 // For type=winrm only (enforced in winrm communicator) 157 // For type=winrm only (enforced in winrm communicator)
155 HTTPS interface{} `mapstructure:"https"` 158 HTTPS interface{} `mapstructure:"https"`
156 Insecure interface{} `mapstructure:"insecure"` 159 Insecure interface{} `mapstructure:"insecure"`
160 NTLM interface{} `mapstructure:"use_ntlm"`
157 CACert interface{} `mapstructure:"cacert"` 161 CACert interface{} `mapstructure:"cacert"`
158 } 162 }
159 163
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
index 00392ef..0c3da48 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
@@ -1,17 +1,24 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "strings"
5
4 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/config"
5) 7)
6 8
7// ProviderEvalTree returns the evaluation tree for initializing and 9// ProviderEvalTree returns the evaluation tree for initializing and
8// configuring providers. 10// configuring providers.
9func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { 11func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) EvalNode {
10 var provider ResourceProvider 12 var provider ResourceProvider
11 var resourceConfig *ResourceConfig 13 var resourceConfig *ResourceConfig
12 14
15 typeName := strings.SplitN(n.NameValue, ".", 2)[0]
16
13 seq := make([]EvalNode, 0, 5) 17 seq := make([]EvalNode, 0, 5)
14 seq = append(seq, &EvalInitProvider{Name: n}) 18 seq = append(seq, &EvalInitProvider{
19 TypeName: typeName,
20 Name: n.Name(),
21 })
15 22
16 // Input stuff 23 // Input stuff
17 seq = append(seq, &EvalOpFilter{ 24 seq = append(seq, &EvalOpFilter{
@@ -19,20 +26,20 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
19 Node: &EvalSequence{ 26 Node: &EvalSequence{
20 Nodes: []EvalNode{ 27 Nodes: []EvalNode{
21 &EvalGetProvider{ 28 &EvalGetProvider{
22 Name: n, 29 Name: n.Name(),
23 Output: &provider, 30 Output: &provider,
24 }, 31 },
25 &EvalInterpolate{ 32 &EvalInterpolateProvider{
26 Config: config, 33 Config: config,
27 Output: &resourceConfig, 34 Output: &resourceConfig,
28 }, 35 },
29 &EvalBuildProviderConfig{ 36 &EvalBuildProviderConfig{
30 Provider: n, 37 Provider: n.NameValue,
31 Config: &resourceConfig, 38 Config: &resourceConfig,
32 Output: &resourceConfig, 39 Output: &resourceConfig,
33 }, 40 },
34 &EvalInputProvider{ 41 &EvalInputProvider{
35 Name: n, 42 Name: n.NameValue,
36 Provider: &provider, 43 Provider: &provider,
37 Config: &resourceConfig, 44 Config: &resourceConfig,
38 }, 45 },
@@ -45,15 +52,15 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
45 Node: &EvalSequence{ 52 Node: &EvalSequence{
46 Nodes: []EvalNode{ 53 Nodes: []EvalNode{
47 &EvalGetProvider{ 54 &EvalGetProvider{
48 Name: n, 55 Name: n.Name(),
49 Output: &provider, 56 Output: &provider,
50 }, 57 },
51 &EvalInterpolate{ 58 &EvalInterpolateProvider{
52 Config: config, 59 Config: config,
53 Output: &resourceConfig, 60 Output: &resourceConfig,
54 }, 61 },
55 &EvalBuildProviderConfig{ 62 &EvalBuildProviderConfig{
56 Provider: n, 63 Provider: n.NameValue,
57 Config: &resourceConfig, 64 Config: &resourceConfig,
58 Output: &resourceConfig, 65 Output: &resourceConfig,
59 }, 66 },
@@ -61,10 +68,6 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
61 Provider: &provider, 68 Provider: &provider,
62 Config: &resourceConfig, 69 Config: &resourceConfig,
63 }, 70 },
64 &EvalSetProviderConfig{
65 Provider: n,
66 Config: &resourceConfig,
67 },
68 }, 71 },
69 }, 72 },
70 }) 73 })
@@ -75,22 +78,18 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
75 Node: &EvalSequence{ 78 Node: &EvalSequence{
76 Nodes: []EvalNode{ 79 Nodes: []EvalNode{
77 &EvalGetProvider{ 80 &EvalGetProvider{
78 Name: n, 81 Name: n.Name(),
79 Output: &provider, 82 Output: &provider,
80 }, 83 },
81 &EvalInterpolate{ 84 &EvalInterpolateProvider{
82 Config: config, 85 Config: config,
83 Output: &resourceConfig, 86 Output: &resourceConfig,
84 }, 87 },
85 &EvalBuildProviderConfig{ 88 &EvalBuildProviderConfig{
86 Provider: n, 89 Provider: n.NameValue,
87 Config: &resourceConfig, 90 Config: &resourceConfig,
88 Output: &resourceConfig, 91 Output: &resourceConfig,
89 }, 92 },
90 &EvalSetProviderConfig{
91 Provider: n,
92 Config: &resourceConfig,
93 },
94 }, 93 },
95 }, 94 },
96 }) 95 })
@@ -102,7 +101,7 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
102 Node: &EvalSequence{ 101 Node: &EvalSequence{
103 Nodes: []EvalNode{ 102 Nodes: []EvalNode{
104 &EvalConfigProvider{ 103 &EvalConfigProvider{
105 Provider: n, 104 Provider: n.Name(),
106 Config: &resourceConfig, 105 Config: &resourceConfig,
107 }, 106 },
108 }, 107 },
diff --git a/vendor/github.com/hashicorp/terraform/terraform/features.go b/vendor/github.com/hashicorp/terraform/terraform/features.go
new file mode 100644
index 0000000..97c77bd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/features.go
@@ -0,0 +1,7 @@
1package terraform
2
3import "os"
4
5// This file holds feature flags for the next release
6
7var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != ""
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go
index 48ce6a3..735ec4e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go
@@ -70,7 +70,7 @@ func (g *Graph) walk(walker GraphWalker) error {
70 // Walk the graph. 70 // Walk the graph.
71 var walkFn dag.WalkFunc 71 var walkFn dag.WalkFunc
72 walkFn = func(v dag.Vertex) (rerr error) { 72 walkFn = func(v dag.Vertex) (rerr error) {
73 log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v)) 73 log.Printf("[TRACE] vertex '%s.%s': walking", path, dag.VertexName(v))
74 g.DebugVisitInfo(v, g.debugName) 74 g.DebugVisitInfo(v, g.debugName)
75 75
76 // If we have a panic wrap GraphWalker and a panic occurs, recover 76 // If we have a panic wrap GraphWalker and a panic occurs, recover
@@ -118,7 +118,7 @@ func (g *Graph) walk(walker GraphWalker) error {
118 118
119 // Allow the walker to change our tree if needed. Eval, 119 // Allow the walker to change our tree if needed. Eval,
120 // then callback with the output. 120 // then callback with the output.
121 log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v)) 121 log.Printf("[TRACE] vertex '%s.%s': evaluating", path, dag.VertexName(v))
122 122
123 g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) 123 g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path))
124 124
@@ -132,7 +132,7 @@ func (g *Graph) walk(walker GraphWalker) error {
132 // If the node is dynamically expanded, then expand it 132 // If the node is dynamically expanded, then expand it
133 if ev, ok := v.(GraphNodeDynamicExpandable); ok { 133 if ev, ok := v.(GraphNodeDynamicExpandable); ok {
134 log.Printf( 134 log.Printf(
135 "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph", 135 "[TRACE] vertex '%s.%s': expanding/walking dynamic subgraph",
136 path, 136 path,
137 dag.VertexName(v)) 137 dag.VertexName(v))
138 138
@@ -154,7 +154,7 @@ func (g *Graph) walk(walker GraphWalker) error {
154 // If the node has a subgraph, then walk the subgraph 154 // If the node has a subgraph, then walk the subgraph
155 if sn, ok := v.(GraphNodeSubgraph); ok { 155 if sn, ok := v.(GraphNodeSubgraph); ok {
156 log.Printf( 156 log.Printf(
157 "[DEBUG] vertex '%s.%s': walking subgraph", 157 "[TRACE] vertex '%s.%s': walking subgraph",
158 path, 158 path,
159 dag.VertexName(v)) 159 dag.VertexName(v))
160 160
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
index 38a90f2..0c2b233 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
@@ -87,12 +87,8 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
87 // Attach the state 87 // Attach the state
88 &AttachStateTransformer{State: b.State}, 88 &AttachStateTransformer{State: b.State},
89 89
90 // Create all the providers 90 // add providers
91 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, 91 TransformProviders(b.Providers, concreteProvider, b.Module),
92 &ProviderTransformer{},
93 &DisableProviderTransformer{},
94 &ParentProviderTransformer{},
95 &AttachProviderConfigTransformer{Module: b.Module},
96 92
97 // Destruction ordering 93 // Destruction ordering
98 &DestroyEdgeTransformer{Module: b.Module, State: b.State}, 94 &DestroyEdgeTransformer{Module: b.Module, State: b.State},
@@ -108,15 +104,36 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
108 // Add root variables 104 // Add root variables
109 &RootVariableTransformer{Module: b.Module}, 105 &RootVariableTransformer{Module: b.Module},
110 106
107 // Add the local values
108 &LocalTransformer{Module: b.Module},
109
111 // Add the outputs 110 // Add the outputs
112 &OutputTransformer{Module: b.Module}, 111 &OutputTransformer{Module: b.Module},
113 112
114 // Add module variables 113 // Add module variables
115 &ModuleVariableTransformer{Module: b.Module}, 114 &ModuleVariableTransformer{Module: b.Module},
116 115
116 // Remove modules no longer present in the config
117 &RemovedModuleTransformer{Module: b.Module, State: b.State},
118
117 // Connect references so ordering is correct 119 // Connect references so ordering is correct
118 &ReferenceTransformer{}, 120 &ReferenceTransformer{},
119 121
122 // Handle destroy time transformations for output and local values.
123 // Reverse the edges from outputs and locals, so that
124 // interpolations don't fail during destroy.
125 // Create a destroy node for outputs to remove them from the state.
126 // Prune unreferenced values, which may have interpolations that can't
127 // be resolved.
128 GraphTransformIf(
129 func() bool { return b.Destroy },
130 GraphTransformMulti(
131 &DestroyValueReferenceTransformer{},
132 &DestroyOutputTransformer{},
133 &PruneUnusedValuesTransformer{},
134 ),
135 ),
136
120 // Add the node to fix the state count boundaries 137 // Add the node to fix the state count boundaries
121 &CountBoundaryTransformer{}, 138 &CountBoundaryTransformer{},
122 139
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
index 7070c59..07a1eaf 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
@@ -52,12 +52,7 @@ func (b *ImportGraphBuilder) Steps() []GraphTransformer {
52 // Add the import steps 52 // Add the import steps
53 &ImportStateTransformer{Targets: b.ImportTargets}, 53 &ImportStateTransformer{Targets: b.ImportTargets},
54 54
55 // Provider-related transformations 55 TransformProviders(b.Providers, concreteProvider, mod),
56 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
57 &ProviderTransformer{},
58 &DisableProviderTransformer{},
59 &ParentProviderTransformer{},
60 &AttachProviderConfigTransformer{Module: mod},
61 56
62 // This validates that the providers only depend on variables 57 // This validates that the providers only depend on variables
63 &ImportProviderValidateTransformer{}, 58 &ImportProviderValidateTransformer{},
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
index 4b29bbb..f8dd0fc 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
@@ -71,6 +71,9 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
71 Module: b.Module, 71 Module: b.Module,
72 }, 72 },
73 73
74 // Add the local values
75 &LocalTransformer{Module: b.Module},
76
74 // Add the outputs 77 // Add the outputs
75 &OutputTransformer{Module: b.Module}, 78 &OutputTransformer{Module: b.Module},
76 79
@@ -81,6 +84,12 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
81 Module: b.Module, 84 Module: b.Module,
82 }, 85 },
83 86
87 // Create orphan output nodes
88 &OrphanOutputTransformer{
89 Module: b.Module,
90 State: b.State,
91 },
92
84 // Attach the configuration to any resources 93 // Attach the configuration to any resources
85 &AttachResourceConfigTransformer{Module: b.Module}, 94 &AttachResourceConfigTransformer{Module: b.Module},
86 95
@@ -90,12 +99,7 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
90 // Add root variables 99 // Add root variables
91 &RootVariableTransformer{Module: b.Module}, 100 &RootVariableTransformer{Module: b.Module},
92 101
93 // Create all the providers 102 TransformProviders(b.Providers, b.ConcreteProvider, b.Module),
94 &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider},
95 &ProviderTransformer{},
96 &DisableProviderTransformer{},
97 &ParentProviderTransformer{},
98 &AttachProviderConfigTransformer{Module: b.Module},
99 103
100 // Provisioner-related transformations. Only add these if requested. 104 // Provisioner-related transformations. Only add these if requested.
101 GraphTransformIf( 105 GraphTransformIf(
@@ -107,7 +111,12 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
107 ), 111 ),
108 112
109 // Add module variables 113 // Add module variables
110 &ModuleVariableTransformer{Module: b.Module}, 114 &ModuleVariableTransformer{
115 Module: b.Module,
116 },
117
118 // Remove modules no longer present in the config
119 &RemovedModuleTransformer{Module: b.Module, State: b.State},
111 120
112 // Connect so that the references are ready for targeting. We'll 121 // Connect so that the references are ready for targeting. We'll
113 // have to connect again later for providers and so on. 122 // have to connect again later for providers and so on.
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
index 3d3e968..9638d4c 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
@@ -126,12 +126,10 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
126 // Add root variables 126 // Add root variables
127 &RootVariableTransformer{Module: b.Module}, 127 &RootVariableTransformer{Module: b.Module},
128 128
129 // Create all the providers 129 TransformProviders(b.Providers, concreteProvider, b.Module),
130 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, 130
131 &ProviderTransformer{}, 131 // Add the local values
132 &DisableProviderTransformer{}, 132 &LocalTransformer{Module: b.Module},
133 &ParentProviderTransformer{},
134 &AttachProviderConfigTransformer{Module: b.Module},
135 133
136 // Add the outputs 134 // Add the outputs
137 &OutputTransformer{Module: b.Module}, 135 &OutputTransformer{Module: b.Module},
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
index e63b460..89f376e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
@@ -32,7 +32,6 @@ type ContextGraphWalker struct {
32 interpolaterVars map[string]map[string]interface{} 32 interpolaterVars map[string]map[string]interface{}
33 interpolaterVarLock sync.Mutex 33 interpolaterVarLock sync.Mutex
34 providerCache map[string]ResourceProvider 34 providerCache map[string]ResourceProvider
35 providerConfigCache map[string]*ResourceConfig
36 providerLock sync.Mutex 35 providerLock sync.Mutex
37 provisionerCache map[string]ResourceProvisioner 36 provisionerCache map[string]ResourceProvisioner
38 provisionerLock sync.Mutex 37 provisionerLock sync.Mutex
@@ -73,7 +72,6 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
73 InputValue: w.Context.uiInput, 72 InputValue: w.Context.uiInput,
74 Components: w.Context.components, 73 Components: w.Context.components,
75 ProviderCache: w.providerCache, 74 ProviderCache: w.providerCache,
76 ProviderConfigCache: w.providerConfigCache,
77 ProviderInputConfig: w.Context.providerInputConfig, 75 ProviderInputConfig: w.Context.providerInputConfig,
78 ProviderLock: &w.providerLock, 76 ProviderLock: &w.providerLock,
79 ProvisionerCache: w.provisionerCache, 77 ProvisionerCache: w.provisionerCache,
@@ -151,7 +149,6 @@ func (w *ContextGraphWalker) ExitEvalTree(
151func (w *ContextGraphWalker) init() { 149func (w *ContextGraphWalker) init() {
152 w.contexts = make(map[string]*BuiltinEvalContext, 5) 150 w.contexts = make(map[string]*BuiltinEvalContext, 5)
153 w.providerCache = make(map[string]ResourceProvider, 5) 151 w.providerCache = make(map[string]ResourceProvider, 5)
154 w.providerConfigCache = make(map[string]*ResourceConfig, 5)
155 w.provisionerCache = make(map[string]ResourceProvisioner, 5) 152 w.provisionerCache = make(map[string]ResourceProvisioner, 5)
156 w.interpolaterVars = make(map[string]map[string]interface{}, 5) 153 w.interpolaterVars = make(map[string]map[string]interface{}, 5)
157} 154}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
index e97b485..95ef4e9 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
@@ -2,7 +2,7 @@
2 2
3package terraform 3package terraform
4 4
5import "fmt" 5import "strconv"
6 6
7const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate" 7const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate"
8 8
@@ -10,7 +10,7 @@ var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125}
10 10
11func (i GraphType) String() string { 11func (i GraphType) String() string {
12 if i >= GraphType(len(_GraphType_index)-1) { 12 if i >= GraphType(len(_GraphType_index)-1) {
13 return fmt.Sprintf("GraphType(%d)", i) 13 return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")"
14 } 14 }
15 return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] 15 return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]]
16} 16}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
index f69267c..b8e7d1f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
@@ -2,7 +2,7 @@
2 2
3package terraform 3package terraform
4 4
5import "fmt" 5import "strconv"
6 6
7const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" 7const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
8 8
@@ -10,7 +10,7 @@ var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}
10 10
11func (i InstanceType) String() string { 11func (i InstanceType) String() string {
12 if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { 12 if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) {
13 return fmt.Sprintf("InstanceType(%d)", i) 13 return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")"
14 } 14 }
15 return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] 15 return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]]
16} 16}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
index 22ddce6..4f4e178 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
@@ -90,6 +90,8 @@ func (i *Interpolater) Values(
90 err = i.valueSimpleVar(scope, n, v, result) 90 err = i.valueSimpleVar(scope, n, v, result)
91 case *config.TerraformVariable: 91 case *config.TerraformVariable:
92 err = i.valueTerraformVar(scope, n, v, result) 92 err = i.valueTerraformVar(scope, n, v, result)
93 case *config.LocalVariable:
94 err = i.valueLocalVar(scope, n, v, result)
93 case *config.UserVariable: 95 case *config.UserVariable:
94 err = i.valueUserVar(scope, n, v, result) 96 err = i.valueUserVar(scope, n, v, result)
95 default: 97 default:
@@ -140,7 +142,6 @@ func (i *Interpolater) valueModuleVar(
140 n string, 142 n string,
141 v *config.ModuleVariable, 143 v *config.ModuleVariable,
142 result map[string]ast.Variable) error { 144 result map[string]ast.Variable) error {
143
144 // Build the path to the child module we want 145 // Build the path to the child module we want
145 path := make([]string, len(scope.Path), len(scope.Path)+1) 146 path := make([]string, len(scope.Path), len(scope.Path)+1)
146 copy(path, scope.Path) 147 copy(path, scope.Path)
@@ -317,7 +318,6 @@ func (i *Interpolater) valueTerraformVar(
317 n string, 318 n string,
318 v *config.TerraformVariable, 319 v *config.TerraformVariable,
319 result map[string]ast.Variable) error { 320 result map[string]ast.Variable) error {
320
321 // "env" is supported for backward compatibility, but it's deprecated and 321 // "env" is supported for backward compatibility, but it's deprecated and
322 // so we won't advertise it as being allowed in the error message. It will 322 // so we won't advertise it as being allowed in the error message. It will
323 // be removed in a future version of Terraform. 323 // be removed in a future version of Terraform.
@@ -335,6 +335,59 @@ func (i *Interpolater) valueTerraformVar(
335 return nil 335 return nil
336} 336}
337 337
338func (i *Interpolater) valueLocalVar(
339 scope *InterpolationScope,
340 n string,
341 v *config.LocalVariable,
342 result map[string]ast.Variable,
343) error {
344 i.StateLock.RLock()
345 defer i.StateLock.RUnlock()
346
347 modTree := i.Module
348 if len(scope.Path) > 1 {
349 modTree = i.Module.Child(scope.Path[1:])
350 }
351
352 // Get the resource from the configuration so we can verify
353 // that the resource is in the configuration and so we can access
354 // the configuration if we need to.
355 var cl *config.Local
356 for _, l := range modTree.Config().Locals {
357 if l.Name == v.Name {
358 cl = l
359 break
360 }
361 }
362
363 if cl == nil {
364 return fmt.Errorf("%s: no local value of this name has been declared", n)
365 }
366
367 // Get the relevant module
368 module := i.State.ModuleByPath(scope.Path)
369 if module == nil {
370 result[n] = unknownVariable()
371 return nil
372 }
373
374 rawV, exists := module.Locals[v.Name]
375 if !exists {
376 result[n] = unknownVariable()
377 return nil
378 }
379
380 varV, err := hil.InterfaceToVariable(rawV)
381 if err != nil {
382 // Should never happen, since interpolation should always produce
383 // something we can feed back in to interpolation.
384 return fmt.Errorf("%s: %s", n, err)
385 }
386
387 result[n] = varV
388 return nil
389}
390
338func (i *Interpolater) valueUserVar( 391func (i *Interpolater) valueUserVar(
339 scope *InterpolationScope, 392 scope *InterpolationScope,
340 n string, 393 n string,
@@ -465,6 +518,16 @@ func (i *Interpolater) computeResourceVariable(
465 return &v, err 518 return &v, err
466 } 519 }
467 520
521 // special case for the "id" field which is usually also an attribute
522 if v.Field == "id" && r.Primary.ID != "" {
523 // This is usually pulled from the attributes, but is sometimes missing
524 // during destroy. We can return the ID field in this case.
525 // FIXME: there should only be one ID to rule them all.
526 log.Printf("[WARN] resource %s missing 'id' attribute", v.ResourceId())
527 v, err := hil.InterfaceToVariable(r.Primary.ID)
528 return &v, err
529 }
530
468 // computed list or map attribute 531 // computed list or map attribute
469 _, isList = r.Primary.Attributes[v.Field+".#"] 532 _, isList = r.Primary.Attributes[v.Field+".#"]
470 _, isMap = r.Primary.Attributes[v.Field+".%"] 533 _, isMap = r.Primary.Attributes[v.Field+".%"]
@@ -602,6 +665,11 @@ func (i *Interpolater) computeResourceMultiVariable(
602 continue 665 continue
603 } 666 }
604 667
668 if v.Field == "id" && r.Primary.ID != "" {
669 log.Printf("[WARN] resource %s missing 'id' attribute", v.ResourceId())
670 values = append(values, r.Primary.ID)
671 }
672
605 // computed list or map attribute 673 // computed list or map attribute
606 _, isList := r.Primary.Attributes[v.Field+".#"] 674 _, isList := r.Primary.Attributes[v.Field+".#"]
607 _, isMap := r.Primary.Attributes[v.Field+".%"] 675 _, isMap := r.Primary.Attributes[v.Field+".%"]
@@ -646,7 +714,6 @@ func (i *Interpolater) computeResourceMultiVariable(
646func (i *Interpolater) interpolateComplexTypeAttribute( 714func (i *Interpolater) interpolateComplexTypeAttribute(
647 resourceID string, 715 resourceID string,
648 attributes map[string]string) (ast.Variable, error) { 716 attributes map[string]string) (ast.Variable, error) {
649
650 // We can now distinguish between lists and maps in state by the count field: 717 // We can now distinguish between lists and maps in state by the count field:
651 // - lists (and by extension, sets) use the traditional .# notation 718 // - lists (and by extension, sets) use the traditional .# notation
652 // - maps use the newer .% notation 719 // - maps use the newer .% notation
@@ -722,7 +789,8 @@ func (i *Interpolater) resourceCountMax(
722 // If we're NOT applying, then we assume we can read the count 789 // If we're NOT applying, then we assume we can read the count
723 // from the state. Plan and so on may not have any state yet so 790 // from the state. Plan and so on may not have any state yet so
724 // we do a full interpolation. 791 // we do a full interpolation.
725 if i.Operation != walkApply { 792 // Don't forget walkDestroy, which is a special case of walkApply
793 if !(i.Operation == walkApply || i.Operation == walkDestroy) {
726 if cr == nil { 794 if cr == nil {
727 return 0, nil 795 return 0, nil
728 } 796 }
@@ -753,7 +821,13 @@ func (i *Interpolater) resourceCountMax(
753 // use "cr.Count()" but that doesn't work if the count is interpolated 821 // use "cr.Count()" but that doesn't work if the count is interpolated
754 // and we can't guarantee that so we instead depend on the state. 822 // and we can't guarantee that so we instead depend on the state.
755 max := -1 823 max := -1
756 for k, _ := range ms.Resources { 824 for k, s := range ms.Resources {
825 // This resource may have been just removed, in which case the Primary
826 // may be nil, or just empty.
827 if s == nil || s.Primary == nil || len(s.Primary.Attributes) == 0 {
828 continue
829 }
830
757 // Get the index number for this resource 831 // Get the index number for this resource
758 index := "" 832 index := ""
759 if k == id { 833 if k == id {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go
index b9f44a0..4594cb6 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go
@@ -17,7 +17,6 @@ import (
17// present in the configuration. This is guaranteed not to happen for any 17// present in the configuration. This is guaranteed not to happen for any
18// configuration that has passed a call to Config.Validate(). 18// configuration that has passed a call to Config.Validate().
19func ModuleTreeDependencies(root *module.Tree, state *State) *moduledeps.Module { 19func ModuleTreeDependencies(root *module.Tree, state *State) *moduledeps.Module {
20
21 // First we walk the configuration tree to build the overall structure 20 // First we walk the configuration tree to build the overall structure
22 // and capture the explicit/implicit/inherited provider dependencies. 21 // and capture the explicit/implicit/inherited provider dependencies.
23 deps := moduleTreeConfigDependencies(root, nil) 22 deps := moduleTreeConfigDependencies(root, nil)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
index 45129b3..d5ca641 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
@@ -27,6 +27,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 27 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
28 // Add the config and state since we don't do that via transforms 28 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config 29 a.Config = n.Config
30 a.ResolvedProvider = n.ResolvedProvider
30 31
31 return &NodeRefreshableDataResourceInstance{ 32 return &NodeRefreshableDataResourceInstance{
32 NodeAbstractResource: a, 33 NodeAbstractResource: a,
@@ -107,7 +108,9 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
107 // Get the state if we have it, if not we build it 108 // Get the state if we have it, if not we build it
108 rs := n.ResourceState 109 rs := n.ResourceState
109 if rs == nil { 110 if rs == nil {
110 rs = &ResourceState{} 111 rs = &ResourceState{
112 Provider: n.ResolvedProvider,
113 }
111 } 114 }
112 115
113 // If the config isn't empty we update the state 116 // If the config isn't empty we update the state
@@ -145,7 +148,7 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
145 &EvalWriteState{ 148 &EvalWriteState{
146 Name: stateId, 149 Name: stateId,
147 ResourceType: rs.Type, 150 ResourceType: rs.Type,
148 Provider: rs.Provider, 151 Provider: n.ResolvedProvider,
149 Dependencies: rs.Dependencies, 152 Dependencies: rs.Dependencies,
150 State: &state, // state is nil here 153 State: &state, // state is nil here
151 }, 154 },
@@ -185,7 +188,7 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
185 // provider configurations that need this data during 188 // provider configurations that need this data during
186 // refresh/plan. 189 // refresh/plan.
187 &EvalGetProvider{ 190 &EvalGetProvider{
188 Name: n.ProvidedBy()[0], 191 Name: n.ResolvedProvider,
189 Output: &provider, 192 Output: &provider,
190 }, 193 },
191 194
@@ -207,7 +210,7 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
207 &EvalWriteState{ 210 &EvalWriteState{
208 Name: stateId, 211 Name: stateId,
209 ResourceType: rs.Type, 212 ResourceType: rs.Type,
210 Provider: rs.Provider, 213 Provider: n.ResolvedProvider,
211 Dependencies: rs.Dependencies, 214 Dependencies: rs.Dependencies,
212 State: &state, 215 State: &state,
213 }, 216 },
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_local.go b/vendor/github.com/hashicorp/terraform/terraform/node_local.go
new file mode 100644
index 0000000..d387222
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_local.go
@@ -0,0 +1,66 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/terraform/config"
8)
9
10// NodeLocal represents a named local value in a particular module.
11//
12// Local value nodes only have one operation, common to all walk types:
13// evaluate the result and place it in state.
14type NodeLocal struct {
15 PathValue []string
16 Config *config.Local
17}
18
19func (n *NodeLocal) Name() string {
20 result := fmt.Sprintf("local.%s", n.Config.Name)
21 if len(n.PathValue) > 1 {
22 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
23 }
24
25 return result
26}
27
28// GraphNodeSubPath
29func (n *NodeLocal) Path() []string {
30 return n.PathValue
31}
32
33// RemovableIfNotTargeted
34func (n *NodeLocal) RemoveIfNotTargeted() bool {
35 return true
36}
37
38// GraphNodeReferenceable
39func (n *NodeLocal) ReferenceableName() []string {
40 name := fmt.Sprintf("local.%s", n.Config.Name)
41 return []string{name}
42}
43
44// GraphNodeReferencer
45func (n *NodeLocal) References() []string {
46 var result []string
47 result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
48 for _, v := range result {
49 split := strings.Split(v, "/")
50 for i, s := range split {
51 split[i] = s + ".destroy"
52 }
53
54 result = append(result, strings.Join(split, "/"))
55 }
56
57 return result
58}
59
60// GraphNodeEvalable
61func (n *NodeLocal) EvalTree() EvalNode {
62 return &EvalLocal{
63 Name: n.Config.Name,
64 Value: n.Config.RawConfig,
65 }
66}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
deleted file mode 100644
index 319df1e..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
+++ /dev/null
@@ -1,29 +0,0 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// NodeDestroyableModule represents a module destruction.
8type NodeDestroyableModuleVariable struct {
9 PathValue []string
10}
11
12func (n *NodeDestroyableModuleVariable) Name() string {
13 result := "plan-destroy"
14 if len(n.PathValue) > 1 {
15 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
16 }
17
18 return result
19}
20
21// GraphNodeSubPath
22func (n *NodeDestroyableModuleVariable) Path() []string {
23 return n.PathValue
24}
25
26// GraphNodeEvalable
27func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode {
28 return &EvalDiffDestroyModule{Path: n.PathValue}
29}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go
new file mode 100644
index 0000000..bb3e5ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go
@@ -0,0 +1,77 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "reflect"
7)
8
9// NodeModuleRemoved represents a module that is no longer in the
10// config.
11type NodeModuleRemoved struct {
12 PathValue []string
13}
14
15func (n *NodeModuleRemoved) Name() string {
16 return fmt.Sprintf("%s (removed)", modulePrefixStr(n.PathValue))
17}
18
19// GraphNodeSubPath
20func (n *NodeModuleRemoved) Path() []string {
21 return n.PathValue
22}
23
24// GraphNodeEvalable
25func (n *NodeModuleRemoved) EvalTree() EvalNode {
26 return &EvalOpFilter{
27 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
28 Node: &EvalDeleteModule{
29 PathValue: n.PathValue,
30 },
31 }
32}
33
34func (n *NodeModuleRemoved) ReferenceGlobal() bool {
35 return true
36}
37
38func (n *NodeModuleRemoved) References() []string {
39 return []string{modulePrefixStr(n.PathValue)}
40}
41
42// EvalDeleteModule is an EvalNode implementation that removes an empty module
43// entry from the state.
44type EvalDeleteModule struct {
45 PathValue []string
46}
47
48func (n *EvalDeleteModule) Eval(ctx EvalContext) (interface{}, error) {
49 state, lock := ctx.State()
50 if state == nil {
51 return nil, nil
52 }
53
54 // Get a write lock so we can access this instance
55 lock.Lock()
56 defer lock.Unlock()
57
58 // Make sure we have a clean state
59 // Destroyed resources aren't deleted, they're written with an ID of "".
60 state.prune()
61
62 // find the module and delete it
63 for i, m := range state.Modules {
64 if reflect.DeepEqual(m.Path, n.PathValue) {
65 if !m.Empty() {
66 // a targeted apply may leave module resources even without a config,
67 // so just log this and return.
68 log.Printf("[DEBUG] cannot remove module %s, not empty", modulePrefixStr(n.PathValue))
69 break
70 }
71 state.Modules = append(state.Modules[:i], state.Modules[i+1:]...)
72 break
73 }
74 }
75
76 return nil, nil
77}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
index 13fe8fc..66ff7d5 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
@@ -92,11 +92,24 @@ func (n *NodeApplyableModuleVariable) EvalTree() EvalNode {
92 // within the variables mapping. 92 // within the variables mapping.
93 var config *ResourceConfig 93 var config *ResourceConfig
94 variables := make(map[string]interface{}) 94 variables := make(map[string]interface{})
95
95 return &EvalSequence{ 96 return &EvalSequence{
96 Nodes: []EvalNode{ 97 Nodes: []EvalNode{
97 &EvalInterpolate{ 98 &EvalOpFilter{
98 Config: n.Value, 99 Ops: []walkOperation{walkInput},
99 Output: &config, 100 Node: &EvalInterpolate{
101 Config: n.Value,
102 Output: &config,
103 ContinueOnErr: true,
104 },
105 },
106 &EvalOpFilter{
107 Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
108 walkDestroy, walkValidate},
109 Node: &EvalInterpolate{
110 Config: n.Value,
111 Output: &config,
112 },
100 }, 113 },
101 114
102 &EvalVariableBlock{ 115 &EvalVariableBlock{
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
index 9017a63..83e9925 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
@@ -69,12 +69,22 @@ func (n *NodeApplyableOutput) References() []string {
69 69
70// GraphNodeEvalable 70// GraphNodeEvalable
71func (n *NodeApplyableOutput) EvalTree() EvalNode { 71func (n *NodeApplyableOutput) EvalTree() EvalNode {
72 return &EvalOpFilter{ 72 return &EvalSequence{
73 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, 73 Nodes: []EvalNode{
74 walkDestroy, walkInput, walkValidate}, 74 &EvalOpFilter{
75 Node: &EvalSequence{ 75 // Don't let interpolation errors stop Input, since it happens
76 Nodes: []EvalNode{ 76 // before Refresh.
77 &EvalWriteOutput{ 77 Ops: []walkOperation{walkInput},
78 Node: &EvalWriteOutput{
79 Name: n.Config.Name,
80 Sensitive: n.Config.Sensitive,
81 Value: n.Config.RawConfig,
82 ContinueOnErr: true,
83 },
84 },
85 &EvalOpFilter{
86 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy},
87 Node: &EvalWriteOutput{
78 Name: n.Config.Name, 88 Name: n.Config.Name,
79 Sensitive: n.Config.Sensitive, 89 Sensitive: n.Config.Sensitive,
80 Value: n.Config.RawConfig, 90 Value: n.Config.RawConfig,
@@ -83,3 +93,61 @@ func (n *NodeApplyableOutput) EvalTree() EvalNode {
83 }, 93 },
84 } 94 }
85} 95}
96
97// NodeDestroyableOutput represents an output that is "destroybale":
98// its application will remove the output from the state.
99type NodeDestroyableOutput struct {
100 PathValue []string
101 Config *config.Output // Config is the output in the config
102}
103
104func (n *NodeDestroyableOutput) Name() string {
105 result := fmt.Sprintf("output.%s (destroy)", n.Config.Name)
106 if len(n.PathValue) > 1 {
107 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
108 }
109
110 return result
111}
112
113// GraphNodeSubPath
114func (n *NodeDestroyableOutput) Path() []string {
115 return n.PathValue
116}
117
118// RemovableIfNotTargeted
119func (n *NodeDestroyableOutput) RemoveIfNotTargeted() bool {
120 // We need to add this so that this node will be removed if
121 // it isn't targeted or a dependency of a target.
122 return true
123}
124
125// This will keep the destroy node in the graph if its corresponding output
126// node is also in the destroy graph.
127func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool {
128 return true
129}
130
131// GraphNodeReferencer
132func (n *NodeDestroyableOutput) References() []string {
133 var result []string
134 result = append(result, n.Config.DependsOn...)
135 result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
136 for _, v := range result {
137 split := strings.Split(v, "/")
138 for i, s := range split {
139 split[i] = s + ".destroy"
140 }
141
142 result = append(result, strings.Join(split, "/"))
143 }
144
145 return result
146}
147
148// GraphNodeEvalable
149func (n *NodeDestroyableOutput) EvalTree() EvalNode {
150 return &EvalDeleteOutput{
151 Name: n.Config.Name,
152 }
153}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
index 636a15d..0fd1554 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
@@ -19,6 +19,11 @@ func (n *NodeOutputOrphan) Name() string {
19 return result 19 return result
20} 20}
21 21
22// GraphNodeReferenceable
23func (n *NodeOutputOrphan) ReferenceableName() []string {
24 return []string{"output." + n.OutputName}
25}
26
22// GraphNodeSubPath 27// GraphNodeSubPath
23func (n *NodeOutputOrphan) Path() []string { 28func (n *NodeOutputOrphan) Path() []string {
24 return n.PathValue 29 return n.PathValue
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
index 8e2c176..2071ab1 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
@@ -7,5 +7,5 @@ type NodeApplyableProvider struct {
7 7
8// GraphNodeEvalable 8// GraphNodeEvalable
9func (n *NodeApplyableProvider) EvalTree() EvalNode { 9func (n *NodeApplyableProvider) EvalTree() EvalNode {
10 return ProviderEvalTree(n.NameValue, n.ProviderConfig()) 10 return ProviderEvalTree(n, n.ProviderConfig())
11} 11}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
index 6cc8365..9e490f7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
@@ -2,6 +2,7 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "strings"
5 6
6 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/dag" 8 "github.com/hashicorp/terraform/dag"
@@ -24,13 +25,22 @@ type NodeAbstractProvider struct {
24 Config *config.ProviderConfig 25 Config *config.ProviderConfig
25} 26}
26 27
27func (n *NodeAbstractProvider) Name() string { 28func ResolveProviderName(name string, path []string) string {
28 result := fmt.Sprintf("provider.%s", n.NameValue) 29 if strings.Contains(name, "provider.") {
29 if len(n.PathValue) > 1 { 30 // already resolved
30 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) 31 return name
32 }
33
34 name = fmt.Sprintf("provider.%s", name)
35 if len(path) >= 1 {
36 name = fmt.Sprintf("%s.%s", modulePrefixStr(path), name)
31 } 37 }
32 38
33 return result 39 return name
40}
41
42func (n *NodeAbstractProvider) Name() string {
43 return ResolveProviderName(n.NameValue, n.PathValue)
34} 44}
35 45
36// GraphNodeSubPath 46// GraphNodeSubPath
@@ -60,12 +70,12 @@ func (n *NodeAbstractProvider) ProviderName() string {
60} 70}
61 71
62// GraphNodeProvider 72// GraphNodeProvider
63func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig { 73func (n *NodeAbstractProvider) ProviderConfig() *config.ProviderConfig {
64 if n.Config == nil { 74 if n.Config == nil {
65 return nil 75 return nil
66 } 76 }
67 77
68 return n.Config.RawConfig 78 return n.Config
69} 79}
70 80
71// GraphNodeAttachProvider 81// GraphNodeAttachProvider
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
index 25e7e62..a00bc46 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
@@ -20,7 +20,7 @@ func (n *NodeDisabledProvider) EvalTree() EvalNode {
20 var resourceConfig *ResourceConfig 20 var resourceConfig *ResourceConfig
21 return &EvalSequence{ 21 return &EvalSequence{
22 Nodes: []EvalNode{ 22 Nodes: []EvalNode{
23 &EvalInterpolate{ 23 &EvalInterpolateProvider{
24 Config: n.ProviderConfig(), 24 Config: n.ProviderConfig(),
25 Output: &resourceConfig, 25 Output: &resourceConfig,
26 }, 26 },
@@ -29,10 +29,6 @@ func (n *NodeDisabledProvider) EvalTree() EvalNode {
29 Config: &resourceConfig, 29 Config: &resourceConfig,
30 Output: &resourceConfig, 30 Output: &resourceConfig,
31 }, 31 },
32 &EvalSetProviderConfig{
33 Provider: n.ProviderName(),
34 Config: &resourceConfig,
35 },
36 }, 32 },
37 } 33 }
38} 34}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
index 50bb707..73509c8 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
@@ -33,6 +33,9 @@ type NodeAbstractResource struct {
33 ResourceState *ResourceState // ResourceState is the ResourceState for this 33 ResourceState *ResourceState // ResourceState is the ResourceState for this
34 34
35 Targets []ResourceAddress // Set from GraphNodeTargetable 35 Targets []ResourceAddress // Set from GraphNodeTargetable
36
37 // The address of the provider this resource will use
38 ResolvedProvider string
36} 39}
37 40
38func (n *NodeAbstractResource) Name() string { 41func (n *NodeAbstractResource) Name() string {
@@ -170,20 +173,24 @@ func (n *NodeAbstractResource) StateReferences() []string {
170 return deps 173 return deps
171} 174}
172 175
176func (n *NodeAbstractResource) SetProvider(p string) {
177 n.ResolvedProvider = p
178}
179
173// GraphNodeProviderConsumer 180// GraphNodeProviderConsumer
174func (n *NodeAbstractResource) ProvidedBy() []string { 181func (n *NodeAbstractResource) ProvidedBy() string {
175 // If we have a config we prefer that above all else 182 // If we have a config we prefer that above all else
176 if n.Config != nil { 183 if n.Config != nil {
177 return []string{resourceProvider(n.Config.Type, n.Config.Provider)} 184 return resourceProvider(n.Config.Type, n.Config.Provider)
178 } 185 }
179 186
180 // If we have state, then we will use the provider from there 187 // If we have state, then we will use the provider from there
181 if n.ResourceState != nil && n.ResourceState.Provider != "" { 188 if n.ResourceState != nil && n.ResourceState.Provider != "" {
182 return []string{n.ResourceState.Provider} 189 return n.ResourceState.Provider
183 } 190 }
184 191
185 // Use our type 192 // Use our type
186 return []string{resourceProvider(n.Addr.Type, "")} 193 return resourceProvider(n.Addr.Type, "")
187} 194}
188 195
189// GraphNodeProvisionerConsumer 196// GraphNodeProvisionerConsumer
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
index 3599782..40ee1cf 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
@@ -124,6 +124,27 @@ func (n *NodeApplyableResource) evalTreeDataResource(
124 Then: EvalNoop{}, 124 Then: EvalNoop{},
125 }, 125 },
126 126
127 // Normally we interpolate count as a preparation step before
128 // a DynamicExpand, but an apply graph has pre-expanded nodes
129 // and so the count would otherwise never be interpolated.
130 //
131 // This is redundant when there are multiple instances created
132 // from the same config (count > 1) but harmless since the
133 // underlying structures have mutexes to make this concurrency-safe.
134 //
135 // In most cases this isn't actually needed because we dealt with
136 // all of the counts during the plan walk, but we do it here
137 // for completeness because other code assumes that the
138 // final count is always available during interpolation.
139 //
140 // Here we are just populating the interpolated value in-place
141 // inside this RawConfig object, like we would in
142 // NodeAbstractCountResource.
143 &EvalInterpolate{
144 Config: n.Config.RawCount,
145 ContinueOnErr: true,
146 },
147
127 // We need to re-interpolate the config here, rather than 148 // We need to re-interpolate the config here, rather than
128 // just using the diff's values directly, because we've 149 // just using the diff's values directly, because we've
129 // potentially learned more variable values during the 150 // potentially learned more variable values during the
@@ -135,7 +156,7 @@ func (n *NodeApplyableResource) evalTreeDataResource(
135 }, 156 },
136 157
137 &EvalGetProvider{ 158 &EvalGetProvider{
138 Name: n.ProvidedBy()[0], 159 Name: n.ResolvedProvider,
139 Output: &provider, 160 Output: &provider,
140 }, 161 },
141 162
@@ -158,7 +179,7 @@ func (n *NodeApplyableResource) evalTreeDataResource(
158 &EvalWriteState{ 179 &EvalWriteState{
159 Name: stateId, 180 Name: stateId,
160 ResourceType: n.Config.Type, 181 ResourceType: n.Config.Type,
161 Provider: n.Config.Provider, 182 Provider: n.ResolvedProvider,
162 Dependencies: stateDeps, 183 Dependencies: stateDeps,
163 State: &state, 184 State: &state,
164 }, 185 },
@@ -236,13 +257,35 @@ func (n *NodeApplyableResource) evalTreeManagedResource(
236 }, 257 },
237 }, 258 },
238 259
260 // Normally we interpolate count as a preparation step before
261 // a DynamicExpand, but an apply graph has pre-expanded nodes
262 // and so the count would otherwise never be interpolated.
263 //
264 // This is redundant when there are multiple instances created
265 // from the same config (count > 1) but harmless since the
266 // underlying structures have mutexes to make this concurrency-safe.
267 //
268 // In most cases this isn't actually needed because we dealt with
269 // all of the counts during the plan walk, but we need to do this
270 // in order to support interpolation of resource counts from
271 // apply-time-interpolated expressions, such as those in
272 // "provisioner" blocks.
273 //
274 // Here we are just populating the interpolated value in-place
275 // inside this RawConfig object, like we would in
276 // NodeAbstractCountResource.
277 &EvalInterpolate{
278 Config: n.Config.RawCount,
279 ContinueOnErr: true,
280 },
281
239 &EvalInterpolate{ 282 &EvalInterpolate{
240 Config: n.Config.RawConfig.Copy(), 283 Config: n.Config.RawConfig.Copy(),
241 Resource: resource, 284 Resource: resource,
242 Output: &resourceConfig, 285 Output: &resourceConfig,
243 }, 286 },
244 &EvalGetProvider{ 287 &EvalGetProvider{
245 Name: n.ProvidedBy()[0], 288 Name: n.ResolvedProvider,
246 Output: &provider, 289 Output: &provider,
247 }, 290 },
248 &EvalReadState{ 291 &EvalReadState{
@@ -283,7 +326,7 @@ func (n *NodeApplyableResource) evalTreeManagedResource(
283 }, 326 },
284 327
285 &EvalGetProvider{ 328 &EvalGetProvider{
286 Name: n.ProvidedBy()[0], 329 Name: n.ResolvedProvider,
287 Output: &provider, 330 Output: &provider,
288 }, 331 },
289 &EvalReadState{ 332 &EvalReadState{
@@ -308,7 +351,7 @@ func (n *NodeApplyableResource) evalTreeManagedResource(
308 &EvalWriteState{ 351 &EvalWriteState{
309 Name: stateId, 352 Name: stateId,
310 ResourceType: n.Config.Type, 353 ResourceType: n.Config.Type,
311 Provider: n.Config.Provider, 354 Provider: n.ResolvedProvider,
312 Dependencies: stateDeps, 355 Dependencies: stateDeps,
313 State: &state, 356 State: &state,
314 }, 357 },
@@ -332,7 +375,7 @@ func (n *NodeApplyableResource) evalTreeManagedResource(
332 Else: &EvalWriteState{ 375 Else: &EvalWriteState{
333 Name: stateId, 376 Name: stateId,
334 ResourceType: n.Config.Type, 377 ResourceType: n.Config.Type,
335 Provider: n.Config.Provider, 378 Provider: n.ResolvedProvider,
336 Dependencies: stateDeps, 379 Dependencies: stateDeps,
337 State: &state, 380 State: &state,
338 }, 381 },
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
index c2efd2c..657bbee 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
@@ -102,8 +102,9 @@ func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
102 102
103 // We want deposed resources in the state to be destroyed 103 // We want deposed resources in the state to be destroyed
104 steps = append(steps, &DeposedTransformer{ 104 steps = append(steps, &DeposedTransformer{
105 State: state, 105 State: state,
106 View: n.Addr.stateId(), 106 View: n.Addr.stateId(),
107 ResolvedProvider: n.ResolvedProvider,
107 }) 108 })
108 109
109 // Target 110 // Target
@@ -148,7 +149,9 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
148 // Get our state 149 // Get our state
149 rs := n.ResourceState 150 rs := n.ResourceState
150 if rs == nil { 151 if rs == nil {
151 rs = &ResourceState{} 152 rs = &ResourceState{
153 Provider: n.ResolvedProvider,
154 }
152 } 155 }
153 156
154 var diffApply *InstanceDiff 157 var diffApply *InstanceDiff
@@ -188,7 +191,7 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
188 &EvalInstanceInfo{Info: info}, 191 &EvalInstanceInfo{Info: info},
189 192
190 &EvalGetProvider{ 193 &EvalGetProvider{
191 Name: n.ProvidedBy()[0], 194 Name: n.ResolvedProvider,
192 Output: &provider, 195 Output: &provider,
193 }, 196 },
194 &EvalReadState{ 197 &EvalReadState{
@@ -272,7 +275,7 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
272 &EvalWriteState{ 275 &EvalWriteState{
273 Name: stateId, 276 Name: stateId,
274 ResourceType: n.Addr.Type, 277 ResourceType: n.Addr.Type,
275 Provider: rs.Provider, 278 Provider: n.ResolvedProvider,
276 Dependencies: rs.Dependencies, 279 Dependencies: rs.Dependencies,
277 State: &state, 280 State: &state,
278 }, 281 },
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
index 52bbf88..1afae7a 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
@@ -27,6 +27,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 27 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
28 // Add the config and state since we don't do that via transforms 28 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config 29 a.Config = n.Config
30 a.ResolvedProvider = n.ResolvedProvider
30 31
31 return &NodePlannableResourceInstance{ 32 return &NodePlannableResourceInstance{
32 NodeAbstractResource: a, 33 NodeAbstractResource: a,
@@ -37,6 +38,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
37 concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex { 38 concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex {
38 // Add the config and state since we don't do that via transforms 39 // Add the config and state since we don't do that via transforms
39 a.Config = n.Config 40 a.Config = n.Config
41 a.ResolvedProvider = n.ResolvedProvider
40 42
41 return &NodePlannableResourceOrphan{ 43 return &NodePlannableResourceOrphan{
42 NodeAbstractResource: a, 44 NodeAbstractResource: a,
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
index b529569..7d9fcdd 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
@@ -97,7 +97,7 @@ func (n *NodePlannableResourceInstance) evalTreeDataResource(
97 }, 97 },
98 98
99 &EvalGetProvider{ 99 &EvalGetProvider{
100 Name: n.ProvidedBy()[0], 100 Name: n.ResolvedProvider,
101 Output: &provider, 101 Output: &provider,
102 }, 102 },
103 103
@@ -112,7 +112,7 @@ func (n *NodePlannableResourceInstance) evalTreeDataResource(
112 &EvalWriteState{ 112 &EvalWriteState{
113 Name: stateId, 113 Name: stateId,
114 ResourceType: n.Config.Type, 114 ResourceType: n.Config.Type,
115 Provider: n.Config.Provider, 115 Provider: n.ResolvedProvider,
116 Dependencies: stateDeps, 116 Dependencies: stateDeps,
117 State: &state, 117 State: &state,
118 }, 118 },
@@ -143,7 +143,7 @@ func (n *NodePlannableResourceInstance) evalTreeManagedResource(
143 Output: &resourceConfig, 143 Output: &resourceConfig,
144 }, 144 },
145 &EvalGetProvider{ 145 &EvalGetProvider{
146 Name: n.ProvidedBy()[0], 146 Name: n.ResolvedProvider,
147 Output: &provider, 147 Output: &provider,
148 }, 148 },
149 // Re-run validation to catch any errors we missed, e.g. type 149 // Re-run validation to catch any errors we missed, e.g. type
@@ -177,7 +177,7 @@ func (n *NodePlannableResourceInstance) evalTreeManagedResource(
177 &EvalWriteState{ 177 &EvalWriteState{
178 Name: stateId, 178 Name: stateId,
179 ResourceType: n.Config.Type, 179 ResourceType: n.Config.Type,
180 Provider: n.Config.Provider, 180 Provider: n.ResolvedProvider,
181 Dependencies: stateDeps, 181 Dependencies: stateDeps,
182 State: &state, 182 State: &state,
183 }, 183 },
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
index cd4fe92..697bd49 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
@@ -30,6 +30,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
30 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 30 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
31 // Add the config and state since we don't do that via transforms 31 // Add the config and state since we don't do that via transforms
32 a.Config = n.Config 32 a.Config = n.Config
33 a.ResolvedProvider = n.ResolvedProvider
33 34
34 return &NodeRefreshableManagedResourceInstance{ 35 return &NodeRefreshableManagedResourceInstance{
35 NodeAbstractResource: a, 36 NodeAbstractResource: a,
@@ -149,7 +150,7 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN
149 return &EvalSequence{ 150 return &EvalSequence{
150 Nodes: []EvalNode{ 151 Nodes: []EvalNode{
151 &EvalGetProvider{ 152 &EvalGetProvider{
152 Name: n.ProvidedBy()[0], 153 Name: n.ResolvedProvider,
153 Output: &provider, 154 Output: &provider,
154 }, 155 },
155 &EvalReadState{ 156 &EvalReadState{
@@ -165,7 +166,7 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN
165 &EvalWriteState{ 166 &EvalWriteState{
166 Name: stateId, 167 Name: stateId,
167 ResourceType: n.ResourceState.Type, 168 ResourceType: n.ResourceState.Type,
168 Provider: n.ResourceState.Provider, 169 Provider: n.ResolvedProvider,
169 Dependencies: n.ResourceState.Dependencies, 170 Dependencies: n.ResourceState.Dependencies,
170 State: &state, 171 State: &state,
171 }, 172 },
@@ -212,15 +213,21 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState(
212 // Determine the dependencies for the state. 213 // Determine the dependencies for the state.
213 stateDeps := n.StateReferences() 214 stateDeps := n.StateReferences()
214 215
216 // n.Config can be nil if the config and state don't match
217 var raw *config.RawConfig
218 if n.Config != nil {
219 raw = n.Config.RawConfig.Copy()
220 }
221
215 return &EvalSequence{ 222 return &EvalSequence{
216 Nodes: []EvalNode{ 223 Nodes: []EvalNode{
217 &EvalInterpolate{ 224 &EvalInterpolate{
218 Config: n.Config.RawConfig.Copy(), 225 Config: raw,
219 Resource: resource, 226 Resource: resource,
220 Output: &resourceConfig, 227 Output: &resourceConfig,
221 }, 228 },
222 &EvalGetProvider{ 229 &EvalGetProvider{
223 Name: n.ProvidedBy()[0], 230 Name: n.ResolvedProvider,
224 Output: &provider, 231 Output: &provider,
225 }, 232 },
226 // Re-run validation to catch any errors we missed, e.g. type 233 // Re-run validation to catch any errors we missed, e.g. type
@@ -250,7 +257,7 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState(
250 &EvalWriteState{ 257 &EvalWriteState{
251 Name: stateID, 258 Name: stateID,
252 ResourceType: n.Config.Type, 259 ResourceType: n.Config.Type,
253 Provider: n.Config.Provider, 260 Provider: n.ResolvedProvider,
254 Dependencies: stateDeps, 261 Dependencies: stateDeps,
255 State: &state, 262 State: &state,
256 }, 263 },
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
index f528f24..0df223d 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
@@ -39,6 +39,7 @@ func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error)
39 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 39 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
40 // Add the config and state since we don't do that via transforms 40 // Add the config and state since we don't do that via transforms
41 a.Config = n.Config 41 a.Config = n.Config
42 a.ResolvedProvider = n.ResolvedProvider
42 43
43 return &NodeValidatableResourceInstance{ 44 return &NodeValidatableResourceInstance{
44 NodeAbstractResource: a, 45 NodeAbstractResource: a,
@@ -108,7 +109,7 @@ func (n *NodeValidatableResourceInstance) EvalTree() EvalNode {
108 Config: &n.Config.RawConfig, 109 Config: &n.Config.RawConfig,
109 }, 110 },
110 &EvalGetProvider{ 111 &EvalGetProvider{
111 Name: n.ProvidedBy()[0], 112 Name: n.ResolvedProvider,
112 Output: &provider, 113 Output: &provider,
113 }, 114 },
114 &EvalInterpolate{ 115 &EvalInterpolate{
diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go
index ca99685..51dd412 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/path.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/path.go
@@ -1,24 +1,10 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "crypto/md5" 4 "strings"
5 "encoding/hex"
6) 5)
7 6
8// PathCacheKey returns a cache key for a module path. 7// PathCacheKey returns a cache key for a module path.
9//
10// TODO: test
11func PathCacheKey(path []string) string { 8func PathCacheKey(path []string) string {
12 // There is probably a better way to do this, but this is working for now. 9 return strings.Join(path, "|")
13 // We just create an MD5 hash of all the MD5 hashes of all the path
14 // elements. This gets us the property that it is unique per ordering.
15 hash := md5.New()
16 for _, p := range path {
17 single := md5.Sum([]byte(p))
18 if _, err := hash.Write(single[:]); err != nil {
19 panic(err)
20 }
21 }
22
23 return hex.EncodeToString(hash.Sum(nil))
24} 10}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go
index 51d6652..30db195 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go
@@ -10,6 +10,7 @@ import (
10 "sync" 10 "sync"
11 11
12 "github.com/hashicorp/terraform/config/module" 12 "github.com/hashicorp/terraform/config/module"
13 "github.com/hashicorp/terraform/version"
13) 14)
14 15
15func init() { 16func init() {
@@ -26,18 +27,54 @@ func init() {
26// necessary to make a change: the state, diff, config, backend config, etc. 27// necessary to make a change: the state, diff, config, backend config, etc.
27// This is so that it can run alone without any other data. 28// This is so that it can run alone without any other data.
28type Plan struct { 29type Plan struct {
29 Diff *Diff 30 // Diff describes the resource actions that must be taken when this
30 Module *module.Tree 31 // plan is applied.
31 State *State 32 Diff *Diff
32 Vars map[string]interface{} 33
34 // Module represents the entire configuration that was present when this
35 // plan was created.
36 Module *module.Tree
37
38 // State is the Terraform state that was current when this plan was
39 // created.
40 //
41 // It is not allowed to apply a plan that has a stale state, since its
42 // diff could be outdated.
43 State *State
44
45 // Vars retains the variables that were set when creating the plan, so
46 // that the same variables can be applied during apply.
47 Vars map[string]interface{}
48
49 // Targets, if non-empty, contains a set of resource address strings that
50 // identify graph nodes that were selected as targets for plan.
51 //
52 // When targets are set, any graph node that is not directly targeted or
53 // indirectly targeted via dependencies is excluded from the graph.
33 Targets []string 54 Targets []string
34 55
56 // TerraformVersion is the version of Terraform that was used to create
57 // this plan.
58 //
59 // It is not allowed to apply a plan created with a different version of
60 // Terraform, since the other fields of this structure may be interpreted
61 // in different ways between versions.
35 TerraformVersion string 62 TerraformVersion string
36 ProviderSHA256s map[string][]byte 63
64 // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries
65 // used as plugins for each provider during plan.
66 //
67 // These must match between plan and apply to ensure that the diff is
68 // correctly interpreted, since different provider versions may have
69 // different attributes or attribute value constraints.
70 ProviderSHA256s map[string][]byte
37 71
38 // Backend is the backend that this plan should use and store data with. 72 // Backend is the backend that this plan should use and store data with.
39 Backend *BackendState 73 Backend *BackendState
40 74
75 // Destroy indicates that this plan was created for a full destroy operation
76 Destroy bool
77
41 once sync.Once 78 once sync.Once
42} 79}
43 80
@@ -67,6 +104,7 @@ func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) {
67 opts.Module = p.Module 104 opts.Module = p.Module
68 opts.Targets = p.Targets 105 opts.Targets = p.Targets
69 opts.ProviderSHA256s = p.ProviderSHA256s 106 opts.ProviderSHA256s = p.ProviderSHA256s
107 opts.Destroy = p.Destroy
70 108
71 if opts.State == nil { 109 if opts.State == nil {
72 opts.State = p.State 110 opts.State = p.State
@@ -79,10 +117,10 @@ func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) {
79 // the state, there is little chance that these aren't actually equal. 117 // the state, there is little chance that these aren't actually equal.
80 // Log the error condition for reference, but continue with the state 118 // Log the error condition for reference, but continue with the state
81 // we have. 119 // we have.
82 log.Println("[WARNING] Plan state and ContextOpts state are not equal") 120 log.Println("[WARN] Plan state and ContextOpts state are not equal")
83 } 121 }
84 122
85 thisVersion := VersionString() 123 thisVersion := version.String()
86 if p.TerraformVersion != "" && p.TerraformVersion != thisVersion { 124 if p.TerraformVersion != "" && p.TerraformVersion != thisVersion {
87 return nil, fmt.Errorf( 125 return nil, fmt.Errorf(
88 "plan was created with a different version of Terraform (created with %s, but running %s)", 126 "plan was created with a different version of Terraform (created with %s, but running %s)",
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go
index 0acf0be..2f5ebb5 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go
@@ -88,6 +88,46 @@ func (i *InstanceInfo) HumanId() string {
88 i.Id) 88 i.Id)
89} 89}
90 90
91// ResourceAddress returns the address of the resource that the receiver is describing.
92func (i *InstanceInfo) ResourceAddress() *ResourceAddress {
93 // GROSS: for tainted and deposed instances, their status gets appended
94 // to i.Id to create a unique id for the graph node. Historically these
95 // ids were displayed to the user, so it's designed to be human-readable:
96 // "aws_instance.bar.0 (deposed #0)"
97 //
98 // So here we detect such suffixes and try to interpret them back to
99 // their original meaning so we can then produce a ResourceAddress
100 // with a suitable InstanceType.
101 id := i.Id
102 instanceType := TypeInvalid
103 if idx := strings.Index(id, " ("); idx != -1 {
104 remain := id[idx:]
105 id = id[:idx]
106
107 switch {
108 case strings.Contains(remain, "tainted"):
109 instanceType = TypeTainted
110 case strings.Contains(remain, "deposed"):
111 instanceType = TypeDeposed
112 }
113 }
114
115 addr, err := parseResourceAddressInternal(id)
116 if err != nil {
117 // should never happen, since that would indicate a bug in the
118 // code that constructed this InstanceInfo.
119 panic(fmt.Errorf("InstanceInfo has invalid Id %s", id))
120 }
121 if len(i.ModulePath) > 1 {
122 addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied
123 }
124 if instanceType != TypeInvalid {
125 addr.InstanceTypeSet = true
126 addr.InstanceType = instanceType
127 }
128 return addr
129}
130
91func (i *InstanceInfo) uniqueId() string { 131func (i *InstanceInfo) uniqueId() string {
92 prefix := i.HumanId() 132 prefix := i.HumanId()
93 if v := i.uniqueExtra; v != "" { 133 if v := i.uniqueExtra; v != "" {
@@ -306,7 +346,7 @@ func (c *ResourceConfig) get(
306 if err != nil { 346 if err != nil {
307 return nil, false 347 return nil, false
308 } 348 }
309 if i >= int64(cv.Len()) { 349 if int(i) < 0 || int(i) >= cv.Len() {
310 return nil, false 350 return nil, false
311 } 351 }
312 current = cv.Index(int(i)).Interface() 352 current = cv.Index(int(i)).Interface()
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
index 8badca8..a64f5d8 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
@@ -42,9 +42,9 @@ func (r *ResourceAddress) Copy() *ResourceAddress {
42 Type: r.Type, 42 Type: r.Type,
43 Mode: r.Mode, 43 Mode: r.Mode,
44 } 44 }
45 for _, p := range r.Path { 45
46 n.Path = append(n.Path, p) 46 n.Path = append(n.Path, r.Path...)
47 } 47
48 return n 48 return n
49} 49}
50 50
@@ -362,40 +362,41 @@ func (addr *ResourceAddress) Less(other *ResourceAddress) bool {
362 362
363 switch { 363 switch {
364 364
365 case len(addr.Path) < len(other.Path): 365 case len(addr.Path) != len(other.Path):
366 return true 366 return len(addr.Path) < len(other.Path)
367 367
368 case !reflect.DeepEqual(addr.Path, other.Path): 368 case !reflect.DeepEqual(addr.Path, other.Path):
369 // If the two paths are the same length but don't match, we'll just 369 // If the two paths are the same length but don't match, we'll just
370 // cheat and compare the string forms since it's easier than 370 // cheat and compare the string forms since it's easier than
371 // comparing all of the path segments in turn. 371 // comparing all of the path segments in turn, and lexicographic
372 // comparison is correct for the module path portion.
372 addrStr := addr.String() 373 addrStr := addr.String()
373 otherStr := other.String() 374 otherStr := other.String()
374 return addrStr < otherStr 375 return addrStr < otherStr
375 376
376 case addr.Mode == config.DataResourceMode && other.Mode != config.DataResourceMode: 377 case addr.Mode != other.Mode:
377 return true 378 return addr.Mode == config.DataResourceMode
378 379
379 case addr.Type < other.Type: 380 case addr.Type != other.Type:
380 return true 381 return addr.Type < other.Type
381 382
382 case addr.Name < other.Name: 383 case addr.Name != other.Name:
383 return true 384 return addr.Name < other.Name
384 385
385 case addr.Index < other.Index: 386 case addr.Index != other.Index:
386 // Since "Index" is -1 for an un-indexed address, this also conveniently 387 // Since "Index" is -1 for an un-indexed address, this also conveniently
387 // sorts unindexed addresses before indexed ones, should they both 388 // sorts unindexed addresses before indexed ones, should they both
388 // appear for some reason. 389 // appear for some reason.
389 return true 390 return addr.Index < other.Index
390 391
391 case other.InstanceTypeSet && !addr.InstanceTypeSet: 392 case addr.InstanceTypeSet != other.InstanceTypeSet:
392 return true 393 return !addr.InstanceTypeSet
393 394
394 case addr.InstanceType < other.InstanceType: 395 case addr.InstanceType != other.InstanceType:
395 // InstanceType is actually an enum, so this is just an arbitrary 396 // InstanceType is actually an enum, so this is just an arbitrary
396 // sort based on the enum numeric values, and thus not particularly 397 // sort based on the enum numeric values, and thus not particularly
397 // meaningful. 398 // meaningful.
398 return true 399 return addr.InstanceType < other.InstanceType
399 400
400 default: 401 default:
401 return false 402 return false
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
index 7d78f67..93fd14f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
@@ -21,6 +21,15 @@ type ResourceProvider interface {
21 * Functions related to the provider 21 * Functions related to the provider
22 *********************************************************************/ 22 *********************************************************************/
23 23
24 // ProviderSchema returns the config schema for the main provider
25 // configuration, as would appear in a "provider" block in the
26 // configuration files.
27 //
28 // Currently not all providers support schema. Callers must therefore
29 // first call Resources and DataSources and ensure that at least one
30 // resource or data source has the SchemaAvailable flag set.
31 GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error)
32
24 // Input is called to ask the provider to ask the user for input 33 // Input is called to ask the provider to ask the user for input
25 // for completing the configuration if necesarry. 34 // for completing the configuration if necesarry.
26 // 35 //
@@ -183,11 +192,25 @@ type ResourceProviderCloser interface {
183type ResourceType struct { 192type ResourceType struct {
184 Name string // Name of the resource, example "instance" (no provider prefix) 193 Name string // Name of the resource, example "instance" (no provider prefix)
185 Importable bool // Whether this resource supports importing 194 Importable bool // Whether this resource supports importing
195
196 // SchemaAvailable is set if the provider supports the ProviderSchema,
197 // ResourceTypeSchema and DataSourceSchema methods. Although it is
198 // included on each resource type, it's actually a provider-wide setting
199 // that's smuggled here only because that avoids a breaking change to
200 // the plugin protocol.
201 SchemaAvailable bool
186} 202}
187 203
188// DataSource is a data source that a resource provider implements. 204// DataSource is a data source that a resource provider implements.
189type DataSource struct { 205type DataSource struct {
190 Name string 206 Name string
207
208 // SchemaAvailable is set if the provider supports the ProviderSchema,
209 // ResourceTypeSchema and DataSourceSchema methods. Although it is
210 // included on each resource type, it's actually a provider-wide setting
211 // that's smuggled here only because that avoids a breaking change to
212 // the plugin protocol.
213 SchemaAvailable bool
191} 214}
192 215
193// ResourceProviderResolver is an interface implemented by objects that are 216// ResourceProviderResolver is an interface implemented by objects that are
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
index f531533..4000e3d 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
@@ -1,6 +1,8 @@
1package terraform 1package terraform
2 2
3import "sync" 3import (
4 "sync"
5)
4 6
5// MockResourceProvider implements ResourceProvider but mocks out all the 7// MockResourceProvider implements ResourceProvider but mocks out all the
6// calls for testing purposes. 8// calls for testing purposes.
@@ -12,6 +14,10 @@ type MockResourceProvider struct {
12 14
13 CloseCalled bool 15 CloseCalled bool
14 CloseError error 16 CloseError error
17 GetSchemaCalled bool
18 GetSchemaRequest *ProviderSchemaRequest
19 GetSchemaReturn *ProviderSchema
20 GetSchemaReturnError error
15 InputCalled bool 21 InputCalled bool
16 InputInput UIInput 22 InputInput UIInput
17 InputConfig *ResourceConfig 23 InputConfig *ResourceConfig
@@ -92,8 +98,19 @@ func (p *MockResourceProvider) Close() error {
92 return p.CloseError 98 return p.CloseError
93} 99}
94 100
101func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) {
102 p.Lock()
103 defer p.Unlock()
104
105 p.GetSchemaCalled = true
106 p.GetSchemaRequest = req
107 return p.GetSchemaReturn, p.GetSchemaReturnError
108}
109
95func (p *MockResourceProvider) Input( 110func (p *MockResourceProvider) Input(
96 input UIInput, c *ResourceConfig) (*ResourceConfig, error) { 111 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
112 p.Lock()
113 defer p.Unlock()
97 p.InputCalled = true 114 p.InputCalled = true
98 p.InputInput = input 115 p.InputInput = input
99 p.InputConfig = c 116 p.InputConfig = c
@@ -186,6 +203,7 @@ func (p *MockResourceProvider) Diff(
186 p.DiffInfo = info 203 p.DiffInfo = info
187 p.DiffState = state 204 p.DiffState = state
188 p.DiffDesired = desired 205 p.DiffDesired = desired
206
189 if p.DiffFn != nil { 207 if p.DiffFn != nil {
190 return p.DiffFn(info, state, desired) 208 return p.DiffFn(info, state, desired)
191 } 209 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/schemas.go b/vendor/github.com/hashicorp/terraform/terraform/schemas.go
new file mode 100644
index 0000000..ec46efc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/schemas.go
@@ -0,0 +1,34 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/configschema"
5)
6
7type Schemas struct {
8 Providers ProviderSchemas
9}
10
11// ProviderSchemas is a map from provider names to provider schemas.
12//
13// The names in this map are the direct plugin name (e.g. "aws") rather than
14// any alias name (e.g. "aws.foo"), since.
15type ProviderSchemas map[string]*ProviderSchema
16
17// ProviderSchema represents the schema for a provider's own configuration
18// and the configuration for some or all of its resources and data sources.
19//
20// The completeness of this structure depends on how it was constructed.
21// When constructed for a configuration, it will generally include only
22// resource types and data sources used by that configuration.
23type ProviderSchema struct {
24 Provider *configschema.Block
25 ResourceTypes map[string]*configschema.Block
26 DataSources map[string]*configschema.Block
27}
28
29// ProviderSchemaRequest is used to describe to a ResourceProvider which
30// aspects of schema are required, when calling the GetSchema method.
31type ProviderSchemaRequest struct {
32 ResourceTypes []string
33 DataSources []string
34}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
deleted file mode 100644
index 4632559..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/shadow.go
+++ /dev/null
@@ -1,28 +0,0 @@
1package terraform
2
3// Shadow is the interface that any "shadow" structures must implement.
4//
5// A shadow structure is an interface implementation (typically) that
6// shadows a real implementation and verifies that the same behavior occurs
7// on both. The semantics of this behavior are up to the interface itself.
8//
9// A shadow NEVER modifies real values or state. It must always be safe to use.
10//
11// For example, a ResourceProvider shadow ensures that the same operations
12// are done on the same resources with the same configurations.
13//
14// The typical usage of a shadow following this interface is to complete
15// the real operations, then call CloseShadow which tells the shadow that
16// the real side is done. Then, once the shadow is also complete, call
17// ShadowError to find any errors that may have been caught.
18type Shadow interface {
19 // CloseShadow tells the shadow that the REAL implementation is
20 // complete. Therefore, any calls that would block should now return
21 // immediately since no more changes will happen to the real side.
22 CloseShadow() error
23
24 // ShadowError returns the errors that the shadow has found.
25 // This should be called AFTER CloseShadow and AFTER the shadow is
26 // known to be complete (no more calls to it).
27 ShadowError() error
28}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
deleted file mode 100644
index 116cf84..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
+++ /dev/null
@@ -1,273 +0,0 @@
1package terraform
2
3import (
4 "fmt"
5 "sync"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/terraform/helper/shadow"
9)
10
11// newShadowComponentFactory creates a shadowed contextComponentFactory
12// so that requests to create new components result in both a real and
13// shadow side.
14func newShadowComponentFactory(
15 f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) {
16 // Create the shared data
17 shared := &shadowComponentFactoryShared{contextComponentFactory: f}
18
19 // Create the real side
20 real := &shadowComponentFactory{
21 shadowComponentFactoryShared: shared,
22 }
23
24 // Create the shadow
25 shadow := &shadowComponentFactory{
26 shadowComponentFactoryShared: shared,
27 Shadow: true,
28 }
29
30 return real, shadow
31}
32
33// shadowComponentFactory is the shadow side. Any components created
34// with this factory are fake and will not cause real work to happen.
35//
36// Unlike other shadowers, the shadow component factory will allow the
37// shadow to create _any_ component even if it is never requested on the
38// real side. This is because errors will happen later downstream as function
39// calls are made to the shadows that are never matched on the real side.
40type shadowComponentFactory struct {
41 *shadowComponentFactoryShared
42
43 Shadow bool // True if this should return the shadow
44 lock sync.Mutex
45}
46
47func (f *shadowComponentFactory) ResourceProvider(
48 n, uid string) (ResourceProvider, error) {
49 f.lock.Lock()
50 defer f.lock.Unlock()
51
52 real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid)
53 var result ResourceProvider = real
54 if f.Shadow {
55 result = shadow
56 }
57
58 return result, err
59}
60
61func (f *shadowComponentFactory) ResourceProvisioner(
62 n, uid string) (ResourceProvisioner, error) {
63 f.lock.Lock()
64 defer f.lock.Unlock()
65
66 real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid)
67 var result ResourceProvisioner = real
68 if f.Shadow {
69 result = shadow
70 }
71
72 return result, err
73}
74
75// CloseShadow is called when the _real_ side is complete. This will cause
76// all future blocking operations to return immediately on the shadow to
77// ensure the shadow also completes.
78func (f *shadowComponentFactory) CloseShadow() error {
79 // If we aren't the shadow, just return
80 if !f.Shadow {
81 return nil
82 }
83
84 // Lock ourselves so we don't modify state
85 f.lock.Lock()
86 defer f.lock.Unlock()
87
88 // Grab our shared state
89 shared := f.shadowComponentFactoryShared
90
91 // If we're already closed, its an error
92 if shared.closed {
93 return fmt.Errorf("component factory shadow already closed")
94 }
95
96 // Close all the providers and provisioners and return the error
97 var result error
98 for _, n := range shared.providerKeys {
99 _, shadow, err := shared.ResourceProvider(n, n)
100 if err == nil && shadow != nil {
101 if err := shadow.CloseShadow(); err != nil {
102 result = multierror.Append(result, err)
103 }
104 }
105 }
106
107 for _, n := range shared.provisionerKeys {
108 _, shadow, err := shared.ResourceProvisioner(n, n)
109 if err == nil && shadow != nil {
110 if err := shadow.CloseShadow(); err != nil {
111 result = multierror.Append(result, err)
112 }
113 }
114 }
115
116 // Mark ourselves as closed
117 shared.closed = true
118
119 return result
120}
121
122func (f *shadowComponentFactory) ShadowError() error {
123 // If we aren't the shadow, just return
124 if !f.Shadow {
125 return nil
126 }
127
128 // Lock ourselves so we don't modify state
129 f.lock.Lock()
130 defer f.lock.Unlock()
131
132 // Grab our shared state
133 shared := f.shadowComponentFactoryShared
134
135 // If we're not closed, its an error
136 if !shared.closed {
137 return fmt.Errorf("component factory must be closed to retrieve errors")
138 }
139
140 // Close all the providers and provisioners and return the error
141 var result error
142 for _, n := range shared.providerKeys {
143 _, shadow, err := shared.ResourceProvider(n, n)
144 if err == nil && shadow != nil {
145 if err := shadow.ShadowError(); err != nil {
146 result = multierror.Append(result, err)
147 }
148 }
149 }
150
151 for _, n := range shared.provisionerKeys {
152 _, shadow, err := shared.ResourceProvisioner(n, n)
153 if err == nil && shadow != nil {
154 if err := shadow.ShadowError(); err != nil {
155 result = multierror.Append(result, err)
156 }
157 }
158 }
159
160 return result
161}
162
163// shadowComponentFactoryShared is shared data between the two factories.
164//
165// It is NOT SAFE to run any function on this struct in parallel. Lock
166// access to this struct.
167type shadowComponentFactoryShared struct {
168 contextComponentFactory
169
170 closed bool
171 providers shadow.KeyedValue
172 providerKeys []string
173 provisioners shadow.KeyedValue
174 provisionerKeys []string
175}
176
177// shadowResourceProviderFactoryEntry is the entry that is stored in
178// the Shadows key/value for a provider.
179type shadowComponentFactoryProviderEntry struct {
180 Real ResourceProvider
181 Shadow shadowResourceProvider
182 Err error
183}
184
185type shadowComponentFactoryProvisionerEntry struct {
186 Real ResourceProvisioner
187 Shadow shadowResourceProvisioner
188 Err error
189}
190
191func (f *shadowComponentFactoryShared) ResourceProvider(
192 n, uid string) (ResourceProvider, shadowResourceProvider, error) {
193 // Determine if we already have a value
194 raw, ok := f.providers.ValueOk(uid)
195 if !ok {
196 // Build the entry
197 var entry shadowComponentFactoryProviderEntry
198
199 // No value, initialize. Create the original
200 p, err := f.contextComponentFactory.ResourceProvider(n, uid)
201 if err != nil {
202 entry.Err = err
203 p = nil // Just to be sure
204 }
205
206 if p != nil {
207 // Create the shadow
208 real, shadow := newShadowResourceProvider(p)
209 entry.Real = real
210 entry.Shadow = shadow
211
212 if f.closed {
213 shadow.CloseShadow()
214 }
215 }
216
217 // Store the value
218 f.providers.SetValue(uid, &entry)
219 f.providerKeys = append(f.providerKeys, uid)
220 raw = &entry
221 }
222
223 // Read the entry
224 entry, ok := raw.(*shadowComponentFactoryProviderEntry)
225 if !ok {
226 return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw)
227 }
228
229 // Return
230 return entry.Real, entry.Shadow, entry.Err
231}
232
233func (f *shadowComponentFactoryShared) ResourceProvisioner(
234 n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) {
235 // Determine if we already have a value
236 raw, ok := f.provisioners.ValueOk(uid)
237 if !ok {
238 // Build the entry
239 var entry shadowComponentFactoryProvisionerEntry
240
241 // No value, initialize. Create the original
242 p, err := f.contextComponentFactory.ResourceProvisioner(n, uid)
243 if err != nil {
244 entry.Err = err
245 p = nil // Just to be sure
246 }
247
248 if p != nil {
249 // For now, just create a mock since we don't support provisioners yet
250 real, shadow := newShadowResourceProvisioner(p)
251 entry.Real = real
252 entry.Shadow = shadow
253
254 if f.closed {
255 shadow.CloseShadow()
256 }
257 }
258
259 // Store the value
260 f.provisioners.SetValue(uid, &entry)
261 f.provisionerKeys = append(f.provisionerKeys, uid)
262 raw = &entry
263 }
264
265 // Read the entry
266 entry, ok := raw.(*shadowComponentFactoryProvisionerEntry)
267 if !ok {
268 return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw)
269 }
270
271 // Return
272 return entry.Real, entry.Shadow, entry.Err
273}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
deleted file mode 100644
index 5588af2..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
+++ /dev/null
@@ -1,158 +0,0 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/mitchellh/copystructure"
9)
10
11// newShadowContext creates a new context that will shadow the given context
12// when walking the graph. The resulting context should be used _only once_
13// for a graph walk.
14//
15// The returned Shadow should be closed after the graph walk with the
16// real context is complete. Errors from the shadow can be retrieved there.
17//
18// Most importantly, any operations done on the shadow context (the returned
19// context) will NEVER affect the real context. All structures are deep
20// copied, no real providers or resources are used, etc.
21func newShadowContext(c *Context) (*Context, *Context, Shadow) {
22 // Copy the targets
23 targetRaw, err := copystructure.Copy(c.targets)
24 if err != nil {
25 panic(err)
26 }
27
28 // Copy the variables
29 varRaw, err := copystructure.Copy(c.variables)
30 if err != nil {
31 panic(err)
32 }
33
34 // Copy the provider inputs
35 providerInputRaw, err := copystructure.Copy(c.providerInputConfig)
36 if err != nil {
37 panic(err)
38 }
39
40 // The factories
41 componentsReal, componentsShadow := newShadowComponentFactory(c.components)
42
43 // Create the shadow
44 shadow := &Context{
45 components: componentsShadow,
46 destroy: c.destroy,
47 diff: c.diff.DeepCopy(),
48 hooks: nil,
49 meta: c.meta,
50 module: c.module,
51 state: c.state.DeepCopy(),
52 targets: targetRaw.([]string),
53 variables: varRaw.(map[string]interface{}),
54
55 // NOTE(mitchellh): This is not going to work for shadows that are
56 // testing that input results in the proper end state. At the time
57 // of writing, input is not used in any state-changing graph
58 // walks anyways, so this checks nothing. We set it to this to avoid
59 // any panics but even a "nil" value worked here.
60 uiInput: new(MockUIInput),
61
62 // Hardcoded to 4 since parallelism in the shadow doesn't matter
63 // a ton since we're doing far less compared to the real side
64 // and our operations are MUCH faster.
65 parallelSem: NewSemaphore(4),
66 providerInputConfig: providerInputRaw.(map[string]map[string]interface{}),
67 }
68
69 // Create the real context. This is effectively just a copy of
70 // the context given except we need to modify some of the values
71 // to point to the real side of a shadow so the shadow can compare values.
72 real := &Context{
73 // The fields below are changed.
74 components: componentsReal,
75
76 // The fields below are direct copies
77 destroy: c.destroy,
78 diff: c.diff,
79 // diffLock - no copy
80 hooks: c.hooks,
81 meta: c.meta,
82 module: c.module,
83 sh: c.sh,
84 state: c.state,
85 // stateLock - no copy
86 targets: c.targets,
87 uiInput: c.uiInput,
88 variables: c.variables,
89
90 // l - no copy
91 parallelSem: c.parallelSem,
92 providerInputConfig: c.providerInputConfig,
93 runContext: c.runContext,
94 runContextCancel: c.runContextCancel,
95 shadowErr: c.shadowErr,
96 }
97
98 return real, shadow, &shadowContextCloser{
99 Components: componentsShadow,
100 }
101}
102
103// shadowContextVerify takes the real and shadow context and verifies they
104// have equal diffs and states.
105func shadowContextVerify(real, shadow *Context) error {
106 var result error
107
108 // The states compared must be pruned so they're minimal/clean
109 real.state.prune()
110 shadow.state.prune()
111
112 // Compare the states
113 if !real.state.Equal(shadow.state) {
114 result = multierror.Append(result, fmt.Errorf(
115 "Real and shadow states do not match! "+
116 "Real state:\n\n%s\n\n"+
117 "Shadow state:\n\n%s\n\n",
118 real.state, shadow.state))
119 }
120
121 // Compare the diffs
122 if !real.diff.Equal(shadow.diff) {
123 result = multierror.Append(result, fmt.Errorf(
124 "Real and shadow diffs do not match! "+
125 "Real diff:\n\n%s\n\n"+
126 "Shadow diff:\n\n%s\n\n",
127 real.diff, shadow.diff))
128 }
129
130 return result
131}
132
133// shadowContextCloser is the io.Closer returned by newShadowContext that
134// closes all the shadows and returns the results.
135type shadowContextCloser struct {
136 Components *shadowComponentFactory
137}
138
139// Close closes the shadow context.
140func (c *shadowContextCloser) CloseShadow() error {
141 return c.Components.CloseShadow()
142}
143
144func (c *shadowContextCloser) ShadowError() error {
145 err := c.Components.ShadowError()
146 if err == nil {
147 return nil
148 }
149
150 // This is a sad edge case: if the configuration contains uuid() at
151 // any point, we cannot reason aboyt the shadow execution. Tested
152 // with Context2Plan_shadowUuid.
153 if strings.Contains(err.Error(), "uuid()") {
154 err = nil
155 }
156
157 return err
158}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
deleted file mode 100644
index 9741d7e..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
+++ /dev/null
@@ -1,815 +0,0 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "sync"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/helper/shadow"
10)
11
12// shadowResourceProvider implements ResourceProvider for the shadow
13// eval context defined in eval_context_shadow.go.
14//
15// This is used to verify behavior with a real provider. This shouldn't
16// be used directly.
17type shadowResourceProvider interface {
18 ResourceProvider
19 Shadow
20}
21
22// newShadowResourceProvider creates a new shadowed ResourceProvider.
23//
24// This will assume a well behaved real ResourceProvider. For example,
25// it assumes that the `Resources` call underneath doesn't change values
26// since once it is called on the real provider, it will be cached and
27// returned in the shadow since number of calls to that shouldn't affect
28// actual behavior.
29//
30// However, with calls like Apply, call order is taken into account,
31// parameters are checked for equality, etc.
32func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) {
33 // Create the shared data
34 shared := shadowResourceProviderShared{}
35
36 // Create the real provider that does actual work
37 real := &shadowResourceProviderReal{
38 ResourceProvider: p,
39 Shared: &shared,
40 }
41
42 // Create the shadow that watches the real value
43 shadow := &shadowResourceProviderShadow{
44 Shared: &shared,
45
46 resources: p.Resources(),
47 dataSources: p.DataSources(),
48 }
49
50 return real, shadow
51}
52
53// shadowResourceProviderReal is the real resource provider. Function calls
54// to this will perform real work. This records the parameters and return
55// values and call order for the shadow to reproduce.
56type shadowResourceProviderReal struct {
57 ResourceProvider
58
59 Shared *shadowResourceProviderShared
60}
61
62func (p *shadowResourceProviderReal) Close() error {
63 var result error
64 if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok {
65 result = c.Close()
66 }
67
68 p.Shared.CloseErr.SetValue(result)
69 return result
70}
71
72func (p *shadowResourceProviderReal) Input(
73 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
74 cCopy := c.DeepCopy()
75
76 result, err := p.ResourceProvider.Input(input, c)
77 p.Shared.Input.SetValue(&shadowResourceProviderInput{
78 Config: cCopy,
79 Result: result.DeepCopy(),
80 ResultErr: err,
81 })
82
83 return result, err
84}
85
86func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) {
87 warns, errs := p.ResourceProvider.Validate(c)
88 p.Shared.Validate.SetValue(&shadowResourceProviderValidate{
89 Config: c.DeepCopy(),
90 ResultWarn: warns,
91 ResultErr: errs,
92 })
93
94 return warns, errs
95}
96
97func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error {
98 cCopy := c.DeepCopy()
99
100 err := p.ResourceProvider.Configure(c)
101 p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{
102 Config: cCopy,
103 Result: err,
104 })
105
106 return err
107}
108
109func (p *shadowResourceProviderReal) Stop() error {
110 return p.ResourceProvider.Stop()
111}
112
113func (p *shadowResourceProviderReal) ValidateResource(
114 t string, c *ResourceConfig) ([]string, []error) {
115 key := t
116 configCopy := c.DeepCopy()
117
118 // Real operation
119 warns, errs := p.ResourceProvider.ValidateResource(t, c)
120
121 // Initialize to ensure we always have a wrapper with a lock
122 p.Shared.ValidateResource.Init(
123 key, &shadowResourceProviderValidateResourceWrapper{})
124
125 // Get the result
126 raw := p.Shared.ValidateResource.Value(key)
127 wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
128 if !ok {
129 // If this fails then we just continue with our day... the shadow
130 // will fail to but there isn't much we can do.
131 log.Printf(
132 "[ERROR] unknown value in ValidateResource shadow value: %#v", raw)
133 return warns, errs
134 }
135
136 // Lock the wrapper for writing and record our call
137 wrapper.Lock()
138 defer wrapper.Unlock()
139
140 wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{
141 Config: configCopy,
142 Warns: warns,
143 Errors: errs,
144 })
145
146 // With it locked, call SetValue again so that it triggers WaitForChange
147 p.Shared.ValidateResource.SetValue(key, wrapper)
148
149 // Return the result
150 return warns, errs
151}
152
153func (p *shadowResourceProviderReal) Apply(
154 info *InstanceInfo,
155 state *InstanceState,
156 diff *InstanceDiff) (*InstanceState, error) {
157 // Thse have to be copied before the call since call can modify
158 stateCopy := state.DeepCopy()
159 diffCopy := diff.DeepCopy()
160
161 result, err := p.ResourceProvider.Apply(info, state, diff)
162 p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{
163 State: stateCopy,
164 Diff: diffCopy,
165 Result: result.DeepCopy(),
166 ResultErr: err,
167 })
168
169 return result, err
170}
171
172func (p *shadowResourceProviderReal) Diff(
173 info *InstanceInfo,
174 state *InstanceState,
175 desired *ResourceConfig) (*InstanceDiff, error) {
176 // Thse have to be copied before the call since call can modify
177 stateCopy := state.DeepCopy()
178 desiredCopy := desired.DeepCopy()
179
180 result, err := p.ResourceProvider.Diff(info, state, desired)
181 p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{
182 State: stateCopy,
183 Desired: desiredCopy,
184 Result: result.DeepCopy(),
185 ResultErr: err,
186 })
187
188 return result, err
189}
190
191func (p *shadowResourceProviderReal) Refresh(
192 info *InstanceInfo,
193 state *InstanceState) (*InstanceState, error) {
194 // Thse have to be copied before the call since call can modify
195 stateCopy := state.DeepCopy()
196
197 result, err := p.ResourceProvider.Refresh(info, state)
198 p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{
199 State: stateCopy,
200 Result: result.DeepCopy(),
201 ResultErr: err,
202 })
203
204 return result, err
205}
206
207func (p *shadowResourceProviderReal) ValidateDataSource(
208 t string, c *ResourceConfig) ([]string, []error) {
209 key := t
210 configCopy := c.DeepCopy()
211
212 // Real operation
213 warns, errs := p.ResourceProvider.ValidateDataSource(t, c)
214
215 // Initialize
216 p.Shared.ValidateDataSource.Init(
217 key, &shadowResourceProviderValidateDataSourceWrapper{})
218
219 // Get the result
220 raw := p.Shared.ValidateDataSource.Value(key)
221 wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
222 if !ok {
223 // If this fails then we just continue with our day... the shadow
224 // will fail to but there isn't much we can do.
225 log.Printf(
226 "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw)
227 return warns, errs
228 }
229
230 // Lock the wrapper for writing and record our call
231 wrapper.Lock()
232 defer wrapper.Unlock()
233
234 wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{
235 Config: configCopy,
236 Warns: warns,
237 Errors: errs,
238 })
239
240 // Set it
241 p.Shared.ValidateDataSource.SetValue(key, wrapper)
242
243 // Return the result
244 return warns, errs
245}
246
247func (p *shadowResourceProviderReal) ReadDataDiff(
248 info *InstanceInfo,
249 desired *ResourceConfig) (*InstanceDiff, error) {
250 // These have to be copied before the call since call can modify
251 desiredCopy := desired.DeepCopy()
252
253 result, err := p.ResourceProvider.ReadDataDiff(info, desired)
254 p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{
255 Desired: desiredCopy,
256 Result: result.DeepCopy(),
257 ResultErr: err,
258 })
259
260 return result, err
261}
262
263func (p *shadowResourceProviderReal) ReadDataApply(
264 info *InstanceInfo,
265 diff *InstanceDiff) (*InstanceState, error) {
266 // Thse have to be copied before the call since call can modify
267 diffCopy := diff.DeepCopy()
268
269 result, err := p.ResourceProvider.ReadDataApply(info, diff)
270 p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{
271 Diff: diffCopy,
272 Result: result.DeepCopy(),
273 ResultErr: err,
274 })
275
276 return result, err
277}
278
279// shadowResourceProviderShadow is the shadow resource provider. Function
280// calls never affect real resources. This is paired with the "real" side
281// which must be called properly to enable recording.
282type shadowResourceProviderShadow struct {
283 Shared *shadowResourceProviderShared
284
285 // Cached values that are expected to not change
286 resources []ResourceType
287 dataSources []DataSource
288
289 Error error // Error is the list of errors from the shadow
290 ErrorLock sync.Mutex
291}
292
293type shadowResourceProviderShared struct {
294 // NOTE: Anytime a value is added here, be sure to add it to
295 // the Close() method so that it is closed.
296
297 CloseErr shadow.Value
298 Input shadow.Value
299 Validate shadow.Value
300 Configure shadow.Value
301 ValidateResource shadow.KeyedValue
302 Apply shadow.KeyedValue
303 Diff shadow.KeyedValue
304 Refresh shadow.KeyedValue
305 ValidateDataSource shadow.KeyedValue
306 ReadDataDiff shadow.KeyedValue
307 ReadDataApply shadow.KeyedValue
308}
309
310func (p *shadowResourceProviderShared) Close() error {
311 return shadow.Close(p)
312}
313
314func (p *shadowResourceProviderShadow) CloseShadow() error {
315 err := p.Shared.Close()
316 if err != nil {
317 err = fmt.Errorf("close error: %s", err)
318 }
319
320 return err
321}
322
323func (p *shadowResourceProviderShadow) ShadowError() error {
324 return p.Error
325}
326
327func (p *shadowResourceProviderShadow) Resources() []ResourceType {
328 return p.resources
329}
330
331func (p *shadowResourceProviderShadow) DataSources() []DataSource {
332 return p.dataSources
333}
334
335func (p *shadowResourceProviderShadow) Close() error {
336 v := p.Shared.CloseErr.Value()
337 if v == nil {
338 return nil
339 }
340
341 return v.(error)
342}
343
344func (p *shadowResourceProviderShadow) Input(
345 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
346 // Get the result of the input call
347 raw := p.Shared.Input.Value()
348 if raw == nil {
349 return nil, nil
350 }
351
352 result, ok := raw.(*shadowResourceProviderInput)
353 if !ok {
354 p.ErrorLock.Lock()
355 defer p.ErrorLock.Unlock()
356 p.Error = multierror.Append(p.Error, fmt.Errorf(
357 "Unknown 'input' shadow value: %#v", raw))
358 return nil, nil
359 }
360
361 // Compare the parameters, which should be identical
362 if !c.Equal(result.Config) {
363 p.ErrorLock.Lock()
364 p.Error = multierror.Append(p.Error, fmt.Errorf(
365 "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
366 result.Config, c))
367 p.ErrorLock.Unlock()
368 }
369
370 // Return the results
371 return result.Result, result.ResultErr
372}
373
374func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) {
375 // Get the result of the validate call
376 raw := p.Shared.Validate.Value()
377 if raw == nil {
378 return nil, nil
379 }
380
381 result, ok := raw.(*shadowResourceProviderValidate)
382 if !ok {
383 p.ErrorLock.Lock()
384 defer p.ErrorLock.Unlock()
385 p.Error = multierror.Append(p.Error, fmt.Errorf(
386 "Unknown 'validate' shadow value: %#v", raw))
387 return nil, nil
388 }
389
390 // Compare the parameters, which should be identical
391 if !c.Equal(result.Config) {
392 p.ErrorLock.Lock()
393 p.Error = multierror.Append(p.Error, fmt.Errorf(
394 "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
395 result.Config, c))
396 p.ErrorLock.Unlock()
397 }
398
399 // Return the results
400 return result.ResultWarn, result.ResultErr
401}
402
403func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error {
404 // Get the result of the call
405 raw := p.Shared.Configure.Value()
406 if raw == nil {
407 return nil
408 }
409
410 result, ok := raw.(*shadowResourceProviderConfigure)
411 if !ok {
412 p.ErrorLock.Lock()
413 defer p.ErrorLock.Unlock()
414 p.Error = multierror.Append(p.Error, fmt.Errorf(
415 "Unknown 'configure' shadow value: %#v", raw))
416 return nil
417 }
418
419 // Compare the parameters, which should be identical
420 if !c.Equal(result.Config) {
421 p.ErrorLock.Lock()
422 p.Error = multierror.Append(p.Error, fmt.Errorf(
423 "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
424 result.Config, c))
425 p.ErrorLock.Unlock()
426 }
427
428 // Return the results
429 return result.Result
430}
431
432// Stop returns immediately.
433func (p *shadowResourceProviderShadow) Stop() error {
434 return nil
435}
436
437func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
438 // Unique key
439 key := t
440
441 // Get the initial value
442 raw := p.Shared.ValidateResource.Value(key)
443
444 // Find a validation with our configuration
445 var result *shadowResourceProviderValidateResource
446 for {
447 // Get the value
448 if raw == nil {
449 p.ErrorLock.Lock()
450 defer p.ErrorLock.Unlock()
451 p.Error = multierror.Append(p.Error, fmt.Errorf(
452 "Unknown 'ValidateResource' call for %q:\n\n%#v",
453 key, c))
454 return nil, nil
455 }
456
457 wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
458 if !ok {
459 p.ErrorLock.Lock()
460 defer p.ErrorLock.Unlock()
461 p.Error = multierror.Append(p.Error, fmt.Errorf(
462 "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw))
463 return nil, nil
464 }
465
466 // Look for the matching call with our configuration
467 wrapper.RLock()
468 for _, call := range wrapper.Calls {
469 if call.Config.Equal(c) {
470 result = call
471 break
472 }
473 }
474 wrapper.RUnlock()
475
476 // If we found a result, exit
477 if result != nil {
478 break
479 }
480
481 // Wait for a change so we can get the wrapper again
482 raw = p.Shared.ValidateResource.WaitForChange(key)
483 }
484
485 return result.Warns, result.Errors
486}
487
488func (p *shadowResourceProviderShadow) Apply(
489 info *InstanceInfo,
490 state *InstanceState,
491 diff *InstanceDiff) (*InstanceState, error) {
492 // Unique key
493 key := info.uniqueId()
494 raw := p.Shared.Apply.Value(key)
495 if raw == nil {
496 p.ErrorLock.Lock()
497 defer p.ErrorLock.Unlock()
498 p.Error = multierror.Append(p.Error, fmt.Errorf(
499 "Unknown 'apply' call for %q:\n\n%#v\n\n%#v",
500 key, state, diff))
501 return nil, nil
502 }
503
504 result, ok := raw.(*shadowResourceProviderApply)
505 if !ok {
506 p.ErrorLock.Lock()
507 defer p.ErrorLock.Unlock()
508 p.Error = multierror.Append(p.Error, fmt.Errorf(
509 "Unknown 'apply' shadow value for %q: %#v", key, raw))
510 return nil, nil
511 }
512
513 // Compare the parameters, which should be identical
514 if !state.Equal(result.State) {
515 p.ErrorLock.Lock()
516 p.Error = multierror.Append(p.Error, fmt.Errorf(
517 "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v",
518 key, result.State, state))
519 p.ErrorLock.Unlock()
520 }
521
522 if !diff.Equal(result.Diff) {
523 p.ErrorLock.Lock()
524 p.Error = multierror.Append(p.Error, fmt.Errorf(
525 "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
526 key, result.Diff, diff))
527 p.ErrorLock.Unlock()
528 }
529
530 return result.Result, result.ResultErr
531}
532
533func (p *shadowResourceProviderShadow) Diff(
534 info *InstanceInfo,
535 state *InstanceState,
536 desired *ResourceConfig) (*InstanceDiff, error) {
537 // Unique key
538 key := info.uniqueId()
539 raw := p.Shared.Diff.Value(key)
540 if raw == nil {
541 p.ErrorLock.Lock()
542 defer p.ErrorLock.Unlock()
543 p.Error = multierror.Append(p.Error, fmt.Errorf(
544 "Unknown 'diff' call for %q:\n\n%#v\n\n%#v",
545 key, state, desired))
546 return nil, nil
547 }
548
549 result, ok := raw.(*shadowResourceProviderDiff)
550 if !ok {
551 p.ErrorLock.Lock()
552 defer p.ErrorLock.Unlock()
553 p.Error = multierror.Append(p.Error, fmt.Errorf(
554 "Unknown 'diff' shadow value for %q: %#v", key, raw))
555 return nil, nil
556 }
557
558 // Compare the parameters, which should be identical
559 if !state.Equal(result.State) {
560 p.ErrorLock.Lock()
561 p.Error = multierror.Append(p.Error, fmt.Errorf(
562 "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
563 key, result.State, state))
564 p.ErrorLock.Unlock()
565 }
566 if !desired.Equal(result.Desired) {
567 p.ErrorLock.Lock()
568 p.Error = multierror.Append(p.Error, fmt.Errorf(
569 "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
570 key, result.Desired, desired))
571 p.ErrorLock.Unlock()
572 }
573
574 return result.Result, result.ResultErr
575}
576
577func (p *shadowResourceProviderShadow) Refresh(
578 info *InstanceInfo,
579 state *InstanceState) (*InstanceState, error) {
580 // Unique key
581 key := info.uniqueId()
582 raw := p.Shared.Refresh.Value(key)
583 if raw == nil {
584 p.ErrorLock.Lock()
585 defer p.ErrorLock.Unlock()
586 p.Error = multierror.Append(p.Error, fmt.Errorf(
587 "Unknown 'refresh' call for %q:\n\n%#v",
588 key, state))
589 return nil, nil
590 }
591
592 result, ok := raw.(*shadowResourceProviderRefresh)
593 if !ok {
594 p.ErrorLock.Lock()
595 defer p.ErrorLock.Unlock()
596 p.Error = multierror.Append(p.Error, fmt.Errorf(
597 "Unknown 'refresh' shadow value: %#v", raw))
598 return nil, nil
599 }
600
601 // Compare the parameters, which should be identical
602 if !state.Equal(result.State) {
603 p.ErrorLock.Lock()
604 p.Error = multierror.Append(p.Error, fmt.Errorf(
605 "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
606 key, result.State, state))
607 p.ErrorLock.Unlock()
608 }
609
610 return result.Result, result.ResultErr
611}
612
613func (p *shadowResourceProviderShadow) ValidateDataSource(
614 t string, c *ResourceConfig) ([]string, []error) {
615 // Unique key
616 key := t
617
618 // Get the initial value
619 raw := p.Shared.ValidateDataSource.Value(key)
620
621 // Find a validation with our configuration
622 var result *shadowResourceProviderValidateDataSource
623 for {
624 // Get the value
625 if raw == nil {
626 p.ErrorLock.Lock()
627 defer p.ErrorLock.Unlock()
628 p.Error = multierror.Append(p.Error, fmt.Errorf(
629 "Unknown 'ValidateDataSource' call for %q:\n\n%#v",
630 key, c))
631 return nil, nil
632 }
633
634 wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
635 if !ok {
636 p.ErrorLock.Lock()
637 defer p.ErrorLock.Unlock()
638 p.Error = multierror.Append(p.Error, fmt.Errorf(
639 "Unknown 'ValidateDataSource' shadow value: %#v", raw))
640 return nil, nil
641 }
642
643 // Look for the matching call with our configuration
644 wrapper.RLock()
645 for _, call := range wrapper.Calls {
646 if call.Config.Equal(c) {
647 result = call
648 break
649 }
650 }
651 wrapper.RUnlock()
652
653 // If we found a result, exit
654 if result != nil {
655 break
656 }
657
658 // Wait for a change so we can get the wrapper again
659 raw = p.Shared.ValidateDataSource.WaitForChange(key)
660 }
661
662 return result.Warns, result.Errors
663}
664
665func (p *shadowResourceProviderShadow) ReadDataDiff(
666 info *InstanceInfo,
667 desired *ResourceConfig) (*InstanceDiff, error) {
668 // Unique key
669 key := info.uniqueId()
670 raw := p.Shared.ReadDataDiff.Value(key)
671 if raw == nil {
672 p.ErrorLock.Lock()
673 defer p.ErrorLock.Unlock()
674 p.Error = multierror.Append(p.Error, fmt.Errorf(
675 "Unknown 'ReadDataDiff' call for %q:\n\n%#v",
676 key, desired))
677 return nil, nil
678 }
679
680 result, ok := raw.(*shadowResourceProviderReadDataDiff)
681 if !ok {
682 p.ErrorLock.Lock()
683 defer p.ErrorLock.Unlock()
684 p.Error = multierror.Append(p.Error, fmt.Errorf(
685 "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw))
686 return nil, nil
687 }
688
689 // Compare the parameters, which should be identical
690 if !desired.Equal(result.Desired) {
691 p.ErrorLock.Lock()
692 p.Error = multierror.Append(p.Error, fmt.Errorf(
693 "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v",
694 key, result.Desired, desired))
695 p.ErrorLock.Unlock()
696 }
697
698 return result.Result, result.ResultErr
699}
700
701func (p *shadowResourceProviderShadow) ReadDataApply(
702 info *InstanceInfo,
703 d *InstanceDiff) (*InstanceState, error) {
704 // Unique key
705 key := info.uniqueId()
706 raw := p.Shared.ReadDataApply.Value(key)
707 if raw == nil {
708 p.ErrorLock.Lock()
709 defer p.ErrorLock.Unlock()
710 p.Error = multierror.Append(p.Error, fmt.Errorf(
711 "Unknown 'ReadDataApply' call for %q:\n\n%#v",
712 key, d))
713 return nil, nil
714 }
715
716 result, ok := raw.(*shadowResourceProviderReadDataApply)
717 if !ok {
718 p.ErrorLock.Lock()
719 defer p.ErrorLock.Unlock()
720 p.Error = multierror.Append(p.Error, fmt.Errorf(
721 "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw))
722 return nil, nil
723 }
724
725 // Compare the parameters, which should be identical
726 if !d.Equal(result.Diff) {
727 p.ErrorLock.Lock()
728 p.Error = multierror.Append(p.Error, fmt.Errorf(
729 "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
730 result.Diff, d))
731 p.ErrorLock.Unlock()
732 }
733
734 return result.Result, result.ResultErr
735}
736
737func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
738 panic("import not supported by shadow graph")
739}
740
741// The structs for the various function calls are put below. These structs
742// are used to carry call information across the real/shadow boundaries.
743
744type shadowResourceProviderInput struct {
745 Config *ResourceConfig
746 Result *ResourceConfig
747 ResultErr error
748}
749
750type shadowResourceProviderValidate struct {
751 Config *ResourceConfig
752 ResultWarn []string
753 ResultErr []error
754}
755
756type shadowResourceProviderConfigure struct {
757 Config *ResourceConfig
758 Result error
759}
760
761type shadowResourceProviderValidateResourceWrapper struct {
762 sync.RWMutex
763
764 Calls []*shadowResourceProviderValidateResource
765}
766
767type shadowResourceProviderValidateResource struct {
768 Config *ResourceConfig
769 Warns []string
770 Errors []error
771}
772
773type shadowResourceProviderApply struct {
774 State *InstanceState
775 Diff *InstanceDiff
776 Result *InstanceState
777 ResultErr error
778}
779
780type shadowResourceProviderDiff struct {
781 State *InstanceState
782 Desired *ResourceConfig
783 Result *InstanceDiff
784 ResultErr error
785}
786
787type shadowResourceProviderRefresh struct {
788 State *InstanceState
789 Result *InstanceState
790 ResultErr error
791}
792
793type shadowResourceProviderValidateDataSourceWrapper struct {
794 sync.RWMutex
795
796 Calls []*shadowResourceProviderValidateDataSource
797}
798
799type shadowResourceProviderValidateDataSource struct {
800 Config *ResourceConfig
801 Warns []string
802 Errors []error
803}
804
805type shadowResourceProviderReadDataDiff struct {
806 Desired *ResourceConfig
807 Result *InstanceDiff
808 ResultErr error
809}
810
811type shadowResourceProviderReadDataApply struct {
812 Diff *InstanceDiff
813 Result *InstanceState
814 ResultErr error
815}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
deleted file mode 100644
index 60a4908..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
+++ /dev/null
@@ -1,282 +0,0 @@
1package terraform
2
3import (
4 "fmt"
5 "io"
6 "log"
7 "sync"
8
9 "github.com/hashicorp/go-multierror"
10 "github.com/hashicorp/terraform/helper/shadow"
11)
12
13// shadowResourceProvisioner implements ResourceProvisioner for the shadow
14// eval context defined in eval_context_shadow.go.
15//
16// This is used to verify behavior with a real provisioner. This shouldn't
17// be used directly.
18type shadowResourceProvisioner interface {
19 ResourceProvisioner
20 Shadow
21}
22
23// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner.
24func newShadowResourceProvisioner(
25 p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) {
26 // Create the shared data
27 shared := shadowResourceProvisionerShared{
28 Validate: shadow.ComparedValue{
29 Func: shadowResourceProvisionerValidateCompare,
30 },
31 }
32
33 // Create the real provisioner that does actual work
34 real := &shadowResourceProvisionerReal{
35 ResourceProvisioner: p,
36 Shared: &shared,
37 }
38
39 // Create the shadow that watches the real value
40 shadow := &shadowResourceProvisionerShadow{
41 Shared: &shared,
42 }
43
44 return real, shadow
45}
46
47// shadowResourceProvisionerReal is the real resource provisioner. Function calls
48// to this will perform real work. This records the parameters and return
49// values and call order for the shadow to reproduce.
50type shadowResourceProvisionerReal struct {
51 ResourceProvisioner
52
53 Shared *shadowResourceProvisionerShared
54}
55
56func (p *shadowResourceProvisionerReal) Close() error {
57 var result error
58 if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok {
59 result = c.Close()
60 }
61
62 p.Shared.CloseErr.SetValue(result)
63 return result
64}
65
66func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) {
67 warns, errs := p.ResourceProvisioner.Validate(c)
68 p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{
69 Config: c,
70 ResultWarn: warns,
71 ResultErr: errs,
72 })
73
74 return warns, errs
75}
76
77func (p *shadowResourceProvisionerReal) Apply(
78 output UIOutput, s *InstanceState, c *ResourceConfig) error {
79 err := p.ResourceProvisioner.Apply(output, s, c)
80
81 // Write the result, grab a lock for writing. This should nver
82 // block long since the operations below don't block.
83 p.Shared.ApplyLock.Lock()
84 defer p.Shared.ApplyLock.Unlock()
85
86 key := s.ID
87 raw, ok := p.Shared.Apply.ValueOk(key)
88 if !ok {
89 // Setup a new value
90 raw = &shadow.ComparedValue{
91 Func: shadowResourceProvisionerApplyCompare,
92 }
93
94 // Set it
95 p.Shared.Apply.SetValue(key, raw)
96 }
97
98 compareVal, ok := raw.(*shadow.ComparedValue)
99 if !ok {
100 // Just log and return so that we don't cause the real side
101 // any side effects.
102 log.Printf("[ERROR] unknown value in 'apply': %#v", raw)
103 return err
104 }
105
106 // Write the resulting value
107 compareVal.SetValue(&shadowResourceProvisionerApply{
108 Config: c,
109 ResultErr: err,
110 })
111
112 return err
113}
114
115func (p *shadowResourceProvisionerReal) Stop() error {
116 return p.ResourceProvisioner.Stop()
117}
118
119// shadowResourceProvisionerShadow is the shadow resource provisioner. Function
120// calls never affect real resources. This is paired with the "real" side
121// which must be called properly to enable recording.
122type shadowResourceProvisionerShadow struct {
123 Shared *shadowResourceProvisionerShared
124
125 Error error // Error is the list of errors from the shadow
126 ErrorLock sync.Mutex
127}
128
129type shadowResourceProvisionerShared struct {
130 // NOTE: Anytime a value is added here, be sure to add it to
131 // the Close() method so that it is closed.
132
133 CloseErr shadow.Value
134 Validate shadow.ComparedValue
135 Apply shadow.KeyedValue
136 ApplyLock sync.Mutex // For writing only
137}
138
139func (p *shadowResourceProvisionerShared) Close() error {
140 closers := []io.Closer{
141 &p.CloseErr,
142 }
143
144 for _, c := range closers {
145 // This should never happen, but we don't panic because a panic
146 // could affect the real behavior of Terraform and a shadow should
147 // never be able to do that.
148 if err := c.Close(); err != nil {
149 return err
150 }
151 }
152
153 return nil
154}
155
156func (p *shadowResourceProvisionerShadow) CloseShadow() error {
157 err := p.Shared.Close()
158 if err != nil {
159 err = fmt.Errorf("close error: %s", err)
160 }
161
162 return err
163}
164
165func (p *shadowResourceProvisionerShadow) ShadowError() error {
166 return p.Error
167}
168
169func (p *shadowResourceProvisionerShadow) Close() error {
170 v := p.Shared.CloseErr.Value()
171 if v == nil {
172 return nil
173 }
174
175 return v.(error)
176}
177
178func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) {
179 // Get the result of the validate call
180 raw := p.Shared.Validate.Value(c)
181 if raw == nil {
182 return nil, nil
183 }
184
185 result, ok := raw.(*shadowResourceProvisionerValidate)
186 if !ok {
187 p.ErrorLock.Lock()
188 defer p.ErrorLock.Unlock()
189 p.Error = multierror.Append(p.Error, fmt.Errorf(
190 "Unknown 'validate' shadow value: %#v", raw))
191 return nil, nil
192 }
193
194 // We don't need to compare configurations because we key on the
195 // configuration so just return right away.
196 return result.ResultWarn, result.ResultErr
197}
198
199func (p *shadowResourceProvisionerShadow) Apply(
200 output UIOutput, s *InstanceState, c *ResourceConfig) error {
201 // Get the value based on the key
202 key := s.ID
203 raw := p.Shared.Apply.Value(key)
204 if raw == nil {
205 return nil
206 }
207
208 compareVal, ok := raw.(*shadow.ComparedValue)
209 if !ok {
210 p.ErrorLock.Lock()
211 defer p.ErrorLock.Unlock()
212 p.Error = multierror.Append(p.Error, fmt.Errorf(
213 "Unknown 'apply' shadow value: %#v", raw))
214 return nil
215 }
216
217 // With the compared value, we compare against our config
218 raw = compareVal.Value(c)
219 if raw == nil {
220 return nil
221 }
222
223 result, ok := raw.(*shadowResourceProvisionerApply)
224 if !ok {
225 p.ErrorLock.Lock()
226 defer p.ErrorLock.Unlock()
227 p.Error = multierror.Append(p.Error, fmt.Errorf(
228 "Unknown 'apply' shadow value: %#v", raw))
229 return nil
230 }
231
232 return result.ResultErr
233}
234
235func (p *shadowResourceProvisionerShadow) Stop() error {
236 // For the shadow, we always just return nil since a Stop indicates
237 // that we were interrupted and shadows are disabled during interrupts
238 // anyways.
239 return nil
240}
241
242// The structs for the various function calls are put below. These structs
243// are used to carry call information across the real/shadow boundaries.
244
245type shadowResourceProvisionerValidate struct {
246 Config *ResourceConfig
247 ResultWarn []string
248 ResultErr []error
249}
250
251type shadowResourceProvisionerApply struct {
252 Config *ResourceConfig
253 ResultErr error
254}
255
256func shadowResourceProvisionerValidateCompare(k, v interface{}) bool {
257 c, ok := k.(*ResourceConfig)
258 if !ok {
259 return false
260 }
261
262 result, ok := v.(*shadowResourceProvisionerValidate)
263 if !ok {
264 return false
265 }
266
267 return c.Equal(result.Config)
268}
269
270func shadowResourceProvisionerApplyCompare(k, v interface{}) bool {
271 c, ok := k.(*ResourceConfig)
272 if !ok {
273 return false
274 }
275
276 result, ok := v.(*shadowResourceProvisionerApply)
277 if !ok {
278 return false
279 }
280
281 return c.Equal(result.Config)
282}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go
index 0c46194..04b14a6 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/state.go
@@ -9,6 +9,7 @@ import (
9 "io" 9 "io"
10 "io/ioutil" 10 "io/ioutil"
11 "log" 11 "log"
12 "os"
12 "reflect" 13 "reflect"
13 "sort" 14 "sort"
14 "strconv" 15 "strconv"
@@ -16,10 +17,12 @@ import (
16 "sync" 17 "sync"
17 18
18 "github.com/hashicorp/go-multierror" 19 "github.com/hashicorp/go-multierror"
20 "github.com/hashicorp/go-uuid"
19 "github.com/hashicorp/go-version" 21 "github.com/hashicorp/go-version"
20 "github.com/hashicorp/terraform/config" 22 "github.com/hashicorp/terraform/config"
21 "github.com/mitchellh/copystructure" 23 "github.com/mitchellh/copystructure"
22 "github.com/satori/go.uuid" 24
25 tfversion "github.com/hashicorp/terraform/version"
23) 26)
24 27
25const ( 28const (
@@ -664,7 +667,7 @@ func (s *State) FromFutureTerraform() bool {
664 } 667 }
665 668
666 v := version.Must(version.NewVersion(s.TFVersion)) 669 v := version.Must(version.NewVersion(s.TFVersion))
667 return SemVersion.LessThan(v) 670 return tfversion.SemVer.LessThan(v)
668} 671}
669 672
670func (s *State) Init() { 673func (s *State) Init() {
@@ -704,7 +707,11 @@ func (s *State) EnsureHasLineage() {
704 707
705func (s *State) ensureHasLineage() { 708func (s *State) ensureHasLineage() {
706 if s.Lineage == "" { 709 if s.Lineage == "" {
707 s.Lineage = uuid.NewV4().String() 710 lineage, err := uuid.GenerateUUID()
711 if err != nil {
712 panic(fmt.Errorf("Failed to generate lineage: %v", err))
713 }
714 s.Lineage = lineage
708 log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) 715 log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage)
709 } else { 716 } else {
710 log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) 717 log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage)
@@ -977,6 +984,10 @@ type ModuleState struct {
977 // always disjoint, so the path represents amodule tree 984 // always disjoint, so the path represents amodule tree
978 Path []string `json:"path"` 985 Path []string `json:"path"`
979 986
987 // Locals are kept only transiently in-memory, because we can always
988 // re-compute them.
989 Locals map[string]interface{} `json:"-"`
990
980 // Outputs declared by the module and maintained for each module 991 // Outputs declared by the module and maintained for each module
981 // even though only the root module technically needs to be kept. 992 // even though only the root module technically needs to be kept.
982 // This allows operators to inspect values at the boundaries. 993 // This allows operators to inspect values at the boundaries.
@@ -1083,7 +1094,7 @@ func (m *ModuleState) Orphans(c *config.Config) []string {
1083 defer m.Unlock() 1094 defer m.Unlock()
1084 1095
1085 keys := make(map[string]struct{}) 1096 keys := make(map[string]struct{})
1086 for k, _ := range m.Resources { 1097 for k := range m.Resources {
1087 keys[k] = struct{}{} 1098 keys[k] = struct{}{}
1088 } 1099 }
1089 1100
@@ -1091,7 +1102,7 @@ func (m *ModuleState) Orphans(c *config.Config) []string {
1091 for _, r := range c.Resources { 1102 for _, r := range c.Resources {
1092 delete(keys, r.Id()) 1103 delete(keys, r.Id())
1093 1104
1094 for k, _ := range keys { 1105 for k := range keys {
1095 if strings.HasPrefix(k, r.Id()+".") { 1106 if strings.HasPrefix(k, r.Id()+".") {
1096 delete(keys, k) 1107 delete(keys, k)
1097 } 1108 }
@@ -1100,7 +1111,32 @@ func (m *ModuleState) Orphans(c *config.Config) []string {
1100 } 1111 }
1101 1112
1102 result := make([]string, 0, len(keys)) 1113 result := make([]string, 0, len(keys))
1103 for k, _ := range keys { 1114 for k := range keys {
1115 result = append(result, k)
1116 }
1117
1118 return result
1119}
1120
1121// RemovedOutputs returns a list of outputs that are in the State but aren't
1122// present in the configuration itself.
1123func (m *ModuleState) RemovedOutputs(c *config.Config) []string {
1124 m.Lock()
1125 defer m.Unlock()
1126
1127 keys := make(map[string]struct{})
1128 for k := range m.Outputs {
1129 keys[k] = struct{}{}
1130 }
1131
1132 if c != nil {
1133 for _, o := range c.Outputs {
1134 delete(keys, o.Name)
1135 }
1136 }
1137
1138 result := make([]string, 0, len(keys))
1139 for k := range keys {
1104 result = append(result, k) 1140 result = append(result, k)
1105 } 1141 }
1106 1142
@@ -1308,6 +1344,10 @@ func (m *ModuleState) String() string {
1308 return buf.String() 1344 return buf.String()
1309} 1345}
1310 1346
1347func (m *ModuleState) Empty() bool {
1348 return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0
1349}
1350
1311// ResourceStateKey is a structured representation of the key used for the 1351// ResourceStateKey is a structured representation of the key used for the
1312// ModuleState.Resources mapping 1352// ModuleState.Resources mapping
1313type ResourceStateKey struct { 1353type ResourceStateKey struct {
@@ -1681,7 +1721,20 @@ func (s *InstanceState) Equal(other *InstanceState) bool {
1681 // We only do the deep check if both are non-nil. If one is nil 1721 // We only do the deep check if both are non-nil. If one is nil
1682 // we treat it as equal since their lengths are both zero (check 1722 // we treat it as equal since their lengths are both zero (check
1683 // above). 1723 // above).
1684 if !reflect.DeepEqual(s.Meta, other.Meta) { 1724 //
1725 // Since this can contain numeric values that may change types during
1726 // serialization, let's compare the serialized values.
1727 sMeta, err := json.Marshal(s.Meta)
1728 if err != nil {
1729 // marshaling primitives shouldn't ever error out
1730 panic(err)
1731 }
1732 otherMeta, err := json.Marshal(other.Meta)
1733 if err != nil {
1734 panic(err)
1735 }
1736
1737 if !bytes.Equal(sMeta, otherMeta) {
1685 return false 1738 return false
1686 } 1739 }
1687 } 1740 }
@@ -1824,11 +1877,19 @@ var ErrNoState = errors.New("no state")
1824// ReadState reads a state structure out of a reader in the format that 1877// ReadState reads a state structure out of a reader in the format that
1825// was written by WriteState. 1878// was written by WriteState.
1826func ReadState(src io.Reader) (*State, error) { 1879func ReadState(src io.Reader) (*State, error) {
1880 // check for a nil file specifically, since that produces a platform
1881 // specific error if we try to use it in a bufio.Reader.
1882 if f, ok := src.(*os.File); ok && f == nil {
1883 return nil, ErrNoState
1884 }
1885
1827 buf := bufio.NewReader(src) 1886 buf := bufio.NewReader(src)
1887
1828 if _, err := buf.Peek(1); err != nil { 1888 if _, err := buf.Peek(1); err != nil {
1829 // the error is either io.EOF or "invalid argument", and both are from 1889 if err == io.EOF {
1830 // an empty state. 1890 return nil, ErrNoState
1831 return nil, ErrNoState 1891 }
1892 return nil, err
1832 } 1893 }
1833 1894
1834 if err := testForV0State(buf); err != nil { 1895 if err := testForV0State(buf); err != nil {
@@ -1891,7 +1952,7 @@ func ReadState(src io.Reader) (*State, error) {
1891 result = v3State 1952 result = v3State
1892 default: 1953 default:
1893 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", 1954 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1894 SemVersion.String(), versionIdentifier.Version) 1955 tfversion.SemVer.String(), versionIdentifier.Version)
1895 } 1956 }
1896 1957
1897 // If we reached this place we must have a result set 1958 // If we reached this place we must have a result set
@@ -1935,7 +1996,7 @@ func ReadStateV2(jsonBytes []byte) (*State, error) {
1935 // version that we don't understand 1996 // version that we don't understand
1936 if state.Version > StateVersion { 1997 if state.Version > StateVersion {
1937 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", 1998 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1938 SemVersion.String(), state.Version) 1999 tfversion.SemVer.String(), state.Version)
1939 } 2000 }
1940 2001
1941 // Make sure the version is semantic 2002 // Make sure the version is semantic
@@ -1970,7 +2031,7 @@ func ReadStateV3(jsonBytes []byte) (*State, error) {
1970 // version that we don't understand 2031 // version that we don't understand
1971 if state.Version > StateVersion { 2032 if state.Version > StateVersion {
1972 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", 2033 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1973 SemVersion.String(), state.Version) 2034 tfversion.SemVer.String(), state.Version)
1974 } 2035 }
1975 2036
1976 // Make sure the version is semantic 2037 // Make sure the version is semantic
@@ -2126,6 +2187,19 @@ func (s moduleStateSort) Swap(i, j int) {
2126 s[i], s[j] = s[j], s[i] 2187 s[i], s[j] = s[j], s[i]
2127} 2188}
2128 2189
2190// StateCompatible returns an error if the state is not compatible with the
2191// current version of terraform.
2192func CheckStateVersion(state *State) error {
2193 if state == nil {
2194 return nil
2195 }
2196
2197 if state.FromFutureTerraform() {
2198 return fmt.Errorf(stateInvalidTerraformVersionErr, state.TFVersion)
2199 }
2200 return nil
2201}
2202
2129const stateValidateErrMultiModule = ` 2203const stateValidateErrMultiModule = `
2130Multiple modules with the same path: %s 2204Multiple modules with the same path: %s
2131 2205
@@ -2134,3 +2208,11 @@ in your state file that point to the same module. This will cause Terraform
2134to behave in unexpected and error prone ways and is invalid. Please back up 2208to behave in unexpected and error prone ways and is invalid. Please back up
2135and modify your state file manually to resolve this. 2209and modify your state file manually to resolve this.
2136` 2210`
2211
2212const stateInvalidTerraformVersionErr = `
2213Terraform doesn't allow running any operations against a state
2214that was written by a future Terraform version. The state is
2215reporting it is written by Terraform '%s'
2216
2217Please run at least that version of Terraform to continue.
2218`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/test_failure b/vendor/github.com/hashicorp/terraform/terraform/test_failure
deleted file mode 100644
index 5d3ad1a..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/test_failure
+++ /dev/null
@@ -1,9 +0,0 @@
1--- FAIL: TestContext2Plan_moduleProviderInherit (0.01s)
2 context_plan_test.go:552: bad: []string{"child"}
3map[string]dag.Vertex{}
4"module.middle.null"
5map[string]dag.Vertex{}
6"module.middle.module.inner.null"
7map[string]dag.Vertex{}
8"aws"
9FAIL
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go
index f4a431a..0e47f20 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go
@@ -1,6 +1,8 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "log"
5
4 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
5) 7)
6 8
@@ -40,6 +42,9 @@ func (t *graphTransformerMulti) Transform(g *Graph) error {
40 if err := t.Transform(g); err != nil { 42 if err := t.Transform(g); err != nil {
41 return err 43 return err
42 } 44 }
45 log.Printf(
46 "[TRACE] Graph after step %T:\n\n%s",
47 t, g.StringWithNodeTypes())
43 } 48 }
44 49
45 return nil 50 return nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
index 10506ea..39cf097 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
@@ -1,10 +1,7 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config" 4 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8) 5)
9 6
10// GraphNodeAttachProvider is an interface that must be implemented by nodes 7// GraphNodeAttachProvider is an interface that must be implemented by nodes
@@ -19,62 +16,3 @@ type GraphNodeAttachProvider interface {
19 // Sets the configuration 16 // Sets the configuration
20 AttachProvider(*config.ProviderConfig) 17 AttachProvider(*config.ProviderConfig)
21} 18}
22
23// AttachProviderConfigTransformer goes through the graph and attaches
24// provider configuration structures to nodes that implement the interfaces
25// above.
26//
27// The attached configuration structures are directly from the configuration.
28// If they're going to be modified, a copy should be made.
29type AttachProviderConfigTransformer struct {
30 Module *module.Tree // Module is the root module for the config
31}
32
33func (t *AttachProviderConfigTransformer) Transform(g *Graph) error {
34 if err := t.attachProviders(g); err != nil {
35 return err
36 }
37
38 return nil
39}
40
41func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error {
42 // Go through and find GraphNodeAttachProvider
43 for _, v := range g.Vertices() {
44 // Only care about GraphNodeAttachProvider implementations
45 apn, ok := v.(GraphNodeAttachProvider)
46 if !ok {
47 continue
48 }
49
50 // Determine what we're looking for
51 path := normalizeModulePath(apn.Path())
52 path = path[1:]
53 name := apn.ProviderName()
54 log.Printf("[TRACE] Attach provider request: %#v %s", path, name)
55
56 // Get the configuration.
57 tree := t.Module.Child(path)
58 if tree == nil {
59 continue
60 }
61
62 // Go through the provider configs to find the matching config
63 for _, p := range tree.Config().ProviderConfigs {
64 // Build the name, which is "name.alias" if an alias exists
65 current := p.Name
66 if p.Alias != "" {
67 current += "." + p.Alias
68 }
69
70 // If the configs match then attach!
71 if current == name {
72 log.Printf("[TRACE] Attaching provider config: %#v", p)
73 apn.AttachProvider(p)
74 break
75 }
76 }
77 }
78
79 return nil
80}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
index 2148cef..87a1f9c 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
@@ -12,6 +12,9 @@ type DeposedTransformer struct {
12 // View, if non-empty, is the ModuleState.View used around the state 12 // View, if non-empty, is the ModuleState.View used around the state
13 // to find deposed resources. 13 // to find deposed resources.
14 View string 14 View string
15
16 // The provider used by the resourced which were deposed
17 ResolvedProvider string
15} 18}
16 19
17func (t *DeposedTransformer) Transform(g *Graph) error { 20func (t *DeposedTransformer) Transform(g *Graph) error {
@@ -33,14 +36,16 @@ func (t *DeposedTransformer) Transform(g *Graph) error {
33 if len(rs.Deposed) == 0 { 36 if len(rs.Deposed) == 0 {
34 continue 37 continue
35 } 38 }
39
36 deposed := rs.Deposed 40 deposed := rs.Deposed
37 41
38 for i, _ := range deposed { 42 for i, _ := range deposed {
39 g.Add(&graphNodeDeposedResource{ 43 g.Add(&graphNodeDeposedResource{
40 Index: i, 44 Index: i,
41 ResourceName: k, 45 ResourceName: k,
42 ResourceType: rs.Type, 46 ResourceType: rs.Type,
43 Provider: rs.Provider, 47 ProviderName: rs.Provider,
48 ResolvedProvider: t.ResolvedProvider,
44 }) 49 })
45 } 50 }
46 } 51 }
@@ -50,18 +55,23 @@ func (t *DeposedTransformer) Transform(g *Graph) error {
50 55
51// graphNodeDeposedResource is the graph vertex representing a deposed resource. 56// graphNodeDeposedResource is the graph vertex representing a deposed resource.
52type graphNodeDeposedResource struct { 57type graphNodeDeposedResource struct {
53 Index int 58 Index int
54 ResourceName string 59 ResourceName string
55 ResourceType string 60 ResourceType string
56 Provider string 61 ProviderName string
62 ResolvedProvider string
57} 63}
58 64
59func (n *graphNodeDeposedResource) Name() string { 65func (n *graphNodeDeposedResource) Name() string {
60 return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index) 66 return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index)
61} 67}
62 68
63func (n *graphNodeDeposedResource) ProvidedBy() []string { 69func (n *graphNodeDeposedResource) ProvidedBy() string {
64 return []string{resourceProvider(n.ResourceName, n.Provider)} 70 return resourceProvider(n.ResourceName, n.ProviderName)
71}
72
73func (n *graphNodeDeposedResource) SetProvider(p string) {
74 n.ResolvedProvider = p
65} 75}
66 76
67// GraphNodeEvalable impl. 77// GraphNodeEvalable impl.
@@ -81,7 +91,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode {
81 Node: &EvalSequence{ 91 Node: &EvalSequence{
82 Nodes: []EvalNode{ 92 Nodes: []EvalNode{
83 &EvalGetProvider{ 93 &EvalGetProvider{
84 Name: n.ProvidedBy()[0], 94 Name: n.ResolvedProvider,
85 Output: &provider, 95 Output: &provider,
86 }, 96 },
87 &EvalReadStateDeposed{ 97 &EvalReadStateDeposed{
@@ -98,7 +108,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode {
98 &EvalWriteStateDeposed{ 108 &EvalWriteStateDeposed{
99 Name: n.ResourceName, 109 Name: n.ResourceName,
100 ResourceType: n.ResourceType, 110 ResourceType: n.ResourceType,
101 Provider: n.Provider, 111 Provider: n.ResolvedProvider,
102 State: &state, 112 State: &state,
103 Index: n.Index, 113 Index: n.Index,
104 }, 114 },
@@ -114,7 +124,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode {
114 Node: &EvalSequence{ 124 Node: &EvalSequence{
115 Nodes: []EvalNode{ 125 Nodes: []EvalNode{
116 &EvalGetProvider{ 126 &EvalGetProvider{
117 Name: n.ProvidedBy()[0], 127 Name: n.ResolvedProvider,
118 Output: &provider, 128 Output: &provider,
119 }, 129 },
120 &EvalReadStateDeposed{ 130 &EvalReadStateDeposed{
@@ -147,7 +157,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode {
147 &EvalWriteStateDeposed{ 157 &EvalWriteStateDeposed{
148 Name: n.ResourceName, 158 Name: n.ResourceName,
149 ResourceType: n.ResourceType, 159 ResourceType: n.ResourceType,
150 Provider: n.Provider, 160 Provider: n.ResolvedProvider,
151 State: &state, 161 State: &state,
152 Index: n.Index, 162 Index: n.Index,
153 }, 163 },
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
index 22be1ab..a06ff29 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
@@ -119,17 +119,15 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
119 return &NodeApplyableProvider{NodeAbstractProvider: a} 119 return &NodeApplyableProvider{NodeAbstractProvider: a}
120 } 120 }
121 steps := []GraphTransformer{ 121 steps := []GraphTransformer{
122 // Add the local values
123 &LocalTransformer{Module: t.Module},
124
122 // Add outputs and metadata 125 // Add outputs and metadata
123 &OutputTransformer{Module: t.Module}, 126 &OutputTransformer{Module: t.Module},
124 &AttachResourceConfigTransformer{Module: t.Module}, 127 &AttachResourceConfigTransformer{Module: t.Module},
125 &AttachStateTransformer{State: t.State}, 128 &AttachStateTransformer{State: t.State},
126 129
127 // Add providers since they can affect destroy order as well 130 TransformProviders(nil, providerFn, t.Module),
128 &MissingProviderTransformer{AllowAny: true, Concrete: providerFn},
129 &ProviderTransformer{},
130 &DisableProviderTransformer{},
131 &ParentProviderTransformer{},
132 &AttachProviderConfigTransformer{Module: t.Module},
133 131
134 // Add all the variables. We can depend on resources through 132 // Add all the variables. We can depend on resources through
135 // variables due to module parameters, and we need to properly 133 // variables due to module parameters, and we need to properly
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
index 081df2f..fcbff65 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
@@ -21,9 +21,9 @@ func (t *ImportStateTransformer) Transform(g *Graph) error {
21 } 21 }
22 22
23 nodes = append(nodes, &graphNodeImportState{ 23 nodes = append(nodes, &graphNodeImportState{
24 Addr: addr, 24 Addr: addr,
25 ID: target.ID, 25 ID: target.ID,
26 Provider: target.Provider, 26 ProviderName: target.Provider,
27 }) 27 })
28 } 28 }
29 29
@@ -36,9 +36,10 @@ func (t *ImportStateTransformer) Transform(g *Graph) error {
36} 36}
37 37
38type graphNodeImportState struct { 38type graphNodeImportState struct {
39 Addr *ResourceAddress // Addr is the resource address to import to 39 Addr *ResourceAddress // Addr is the resource address to import to
40 ID string // ID is the ID to import as 40 ID string // ID is the ID to import as
41 Provider string // Provider string 41 ProviderName string // Provider string
42 ResolvedProvider string // provider node address
42 43
43 states []*InstanceState 44 states []*InstanceState
44} 45}
@@ -47,8 +48,12 @@ func (n *graphNodeImportState) Name() string {
47 return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID) 48 return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID)
48} 49}
49 50
50func (n *graphNodeImportState) ProvidedBy() []string { 51func (n *graphNodeImportState) ProvidedBy() string {
51 return []string{resourceProvider(n.Addr.Type, n.Provider)} 52 return resourceProvider(n.Addr.Type, n.ProviderName)
53}
54
55func (n *graphNodeImportState) SetProvider(p string) {
56 n.ResolvedProvider = p
52} 57}
53 58
54// GraphNodeSubPath 59// GraphNodeSubPath
@@ -72,7 +77,7 @@ func (n *graphNodeImportState) EvalTree() EvalNode {
72 return &EvalSequence{ 77 return &EvalSequence{
73 Nodes: []EvalNode{ 78 Nodes: []EvalNode{
74 &EvalGetProvider{ 79 &EvalGetProvider{
75 Name: n.ProvidedBy()[0], 80 Name: n.ResolvedProvider,
76 Output: &provider, 81 Output: &provider,
77 }, 82 },
78 &EvalImportState{ 83 &EvalImportState{
@@ -149,10 +154,11 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
149 // is safe. 154 // is safe.
150 for i, state := range n.states { 155 for i, state := range n.states {
151 g.Add(&graphNodeImportStateSub{ 156 g.Add(&graphNodeImportStateSub{
152 Target: addrs[i], 157 Target: addrs[i],
153 Path_: n.Path(), 158 Path_: n.Path(),
154 State: state, 159 State: state,
155 Provider: n.Provider, 160 ProviderName: n.ProviderName,
161 ResolvedProvider: n.ResolvedProvider,
156 }) 162 })
157 } 163 }
158 164
@@ -170,10 +176,11 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
170// and is part of the subgraph. This node is responsible for refreshing 176// and is part of the subgraph. This node is responsible for refreshing
171// and adding a resource to the state once it is imported. 177// and adding a resource to the state once it is imported.
172type graphNodeImportStateSub struct { 178type graphNodeImportStateSub struct {
173 Target *ResourceAddress 179 Target *ResourceAddress
174 State *InstanceState 180 State *InstanceState
175 Path_ []string 181 Path_ []string
176 Provider string 182 ProviderName string
183 ResolvedProvider string
177} 184}
178 185
179func (n *graphNodeImportStateSub) Name() string { 186func (n *graphNodeImportStateSub) Name() string {
@@ -216,7 +223,7 @@ func (n *graphNodeImportStateSub) EvalTree() EvalNode {
216 return &EvalSequence{ 223 return &EvalSequence{
217 Nodes: []EvalNode{ 224 Nodes: []EvalNode{
218 &EvalGetProvider{ 225 &EvalGetProvider{
219 Name: resourceProvider(info.Type, n.Provider), 226 Name: n.ResolvedProvider,
220 Output: &provider, 227 Output: &provider,
221 }, 228 },
222 &EvalRefresh{ 229 &EvalRefresh{
@@ -233,7 +240,7 @@ func (n *graphNodeImportStateSub) EvalTree() EvalNode {
233 &EvalWriteState{ 240 &EvalWriteState{
234 Name: key.String(), 241 Name: key.String(),
235 ResourceType: info.Type, 242 ResourceType: info.Type,
236 Provider: resourceProvider(info.Type, n.Provider), 243 Provider: n.ResolvedProvider,
237 State: &state, 244 State: &state,
238 }, 245 },
239 }, 246 },
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go
new file mode 100644
index 0000000..95ecfc0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go
@@ -0,0 +1,40 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5)
6
7// LocalTransformer is a GraphTransformer that adds all the local values
8// from the configuration to the graph.
9type LocalTransformer struct {
10 Module *module.Tree
11}
12
13func (t *LocalTransformer) Transform(g *Graph) error {
14 return t.transformModule(g, t.Module)
15}
16
17func (t *LocalTransformer) transformModule(g *Graph, m *module.Tree) error {
18 if m == nil {
19 // Can't have any locals if there's no config
20 return nil
21 }
22
23 for _, local := range m.Config().Locals {
24 node := &NodeLocal{
25 PathValue: normalizeModulePath(m.Path()),
26 Config: local,
27 }
28
29 g.Add(node)
30 }
31
32 // Also populate locals for child modules
33 for _, c := range m.Children() {
34 if err := t.transformModule(g, c); err != nil {
35 return err
36 }
37 }
38
39 return nil
40}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
index 49568d5..aea2bd0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
@@ -21,43 +21,32 @@ func (t *OrphanOutputTransformer) Transform(g *Graph) error {
21 return nil 21 return nil
22 } 22 }
23 23
24 return t.transform(g, t.Module) 24 for _, ms := range t.State.Modules {
25} 25 if err := t.transform(g, ms); err != nil {
26 26 return err
27func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error {
28 // Get our configuration, and recurse into children
29 var c *config.Config
30 if m != nil {
31 c = m.Config()
32 for _, child := range m.Children() {
33 if err := t.transform(g, child); err != nil {
34 return err
35 }
36 } 27 }
37 } 28 }
29 return nil
30}
38 31
39 // Get the state. If there is no state, then we have no orphans! 32func (t *OrphanOutputTransformer) transform(g *Graph, ms *ModuleState) error {
40 path := normalizeModulePath(m.Path()) 33 if ms == nil {
41 state := t.State.ModuleByPath(path)
42 if state == nil {
43 return nil 34 return nil
44 } 35 }
45 36
46 // Make a map of the valid outputs 37 path := normalizeModulePath(ms.Path)
47 valid := make(map[string]struct{})
48 for _, o := range c.Outputs {
49 valid[o.Name] = struct{}{}
50 }
51 38
52 // Go through the outputs and find the ones that aren't in our config. 39 // Get the config for this path, which is nil if the entire module has been
53 for n, _ := range state.Outputs { 40 // removed.
54 // If it is in the valid map, then ignore 41 var c *config.Config
55 if _, ok := valid[n]; ok { 42 if m := t.Module.Child(path[1:]); m != nil {
56 continue 43 c = m.Config()
57 } 44 }
58 45
59 // Orphan! 46 // add all the orphaned outputs to the graph
47 for _, n := range ms.RemovedOutputs(c) {
60 g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path}) 48 g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path})
49
61 } 50 }
62 51
63 return nil 52 return nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
index b260f4c..faa25e4 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
@@ -1,7 +1,10 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "log"
5
4 "github.com/hashicorp/terraform/config/module" 6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag"
5) 8)
6 9
7// OutputTransformer is a GraphTransformer that adds all the outputs 10// OutputTransformer is a GraphTransformer that adds all the outputs
@@ -41,11 +44,6 @@ func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error {
41 44
42 // Add all outputs here 45 // Add all outputs here
43 for _, o := range os { 46 for _, o := range os {
44 // Build the node.
45 //
46 // NOTE: For now this is just an "applyable" output. As we build
47 // new graph builders for the other operations I suspect we'll
48 // find a way to parameterize this, require new transforms, etc.
49 node := &NodeApplyableOutput{ 47 node := &NodeApplyableOutput{
50 PathValue: normalizeModulePath(m.Path()), 48 PathValue: normalizeModulePath(m.Path()),
51 Config: o, 49 Config: o,
@@ -57,3 +55,41 @@ func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error {
57 55
58 return nil 56 return nil
59} 57}
58
59// DestroyOutputTransformer is a GraphTransformer that adds nodes to delete
60// outputs during destroy. We need to do this to ensure that no stale outputs
61// are ever left in the state.
62type DestroyOutputTransformer struct {
63}
64
65func (t *DestroyOutputTransformer) Transform(g *Graph) error {
66 for _, v := range g.Vertices() {
67 output, ok := v.(*NodeApplyableOutput)
68 if !ok {
69 continue
70 }
71
72 // create the destroy node for this output
73 node := &NodeDestroyableOutput{
74 PathValue: output.PathValue,
75 Config: output.Config,
76 }
77
78 log.Printf("[TRACE] creating %s", node.Name())
79 g.Add(node)
80
81 deps, err := g.Descendents(v)
82 if err != nil {
83 return err
84 }
85
86 // the destroy node must depend on the eval node
87 deps.Add(v)
88
89 for _, d := range deps.List() {
90 log.Printf("[TRACE] %s depends on %s", node.Name(), dag.VertexName(d))
91 g.Connect(dag.BasicEdge(node, d))
92 }
93 }
94 return nil
95}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
index b9695d5..c4772b4 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
@@ -1,19 +1,46 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "errors"
4 "fmt" 5 "fmt"
5 "log" 6 "log"
6 "strings" 7 "strings"
7 8
8 "github.com/hashicorp/go-multierror" 9 "github.com/hashicorp/go-multierror"
10 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/config/module"
9 "github.com/hashicorp/terraform/dag" 12 "github.com/hashicorp/terraform/dag"
10) 13)
11 14
15func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, mod *module.Tree) GraphTransformer {
16 return GraphTransformMulti(
17 // Add providers from the config
18 &ProviderConfigTransformer{
19 Module: mod,
20 Providers: providers,
21 Concrete: concrete,
22 },
23 // Add any remaining missing providers
24 &MissingProviderTransformer{
25 Providers: providers,
26 Concrete: concrete,
27 },
28 // Connect the providers
29 &ProviderTransformer{},
30 // Remove unused providers and proxies
31 &PruneProviderTransformer{},
32 // Connect provider to their parent provider nodes
33 &ParentProviderTransformer{},
34 )
35}
36
12// GraphNodeProvider is an interface that nodes that can be a provider 37// GraphNodeProvider is an interface that nodes that can be a provider
13// must implement. The ProviderName returned is the name of the provider 38// must implement.
14// they satisfy. 39// ProviderName returns the name of the provider this satisfies.
40// Name returns the full name of the provider in the config.
15type GraphNodeProvider interface { 41type GraphNodeProvider interface {
16 ProviderName() string 42 ProviderName() string
43 Name() string
17} 44}
18 45
19// GraphNodeCloseProvider is an interface that nodes that can be a close 46// GraphNodeCloseProvider is an interface that nodes that can be a close
@@ -25,9 +52,12 @@ type GraphNodeCloseProvider interface {
25 52
26// GraphNodeProviderConsumer is an interface that nodes that require 53// GraphNodeProviderConsumer is an interface that nodes that require
27// a provider must implement. ProvidedBy must return the name of the provider 54// a provider must implement. ProvidedBy must return the name of the provider
28// to use. 55// to use. This may be a provider by type, type.alias or a fully resolved
56// provider name
29type GraphNodeProviderConsumer interface { 57type GraphNodeProviderConsumer interface {
30 ProvidedBy() []string 58 ProvidedBy() string
59 // Set the resolved provider address for this resource.
60 SetProvider(string)
31} 61}
32 62
33// ProviderTransformer is a GraphTransformer that maps resources to 63// ProviderTransformer is a GraphTransformer that maps resources to
@@ -41,18 +71,52 @@ func (t *ProviderTransformer) Transform(g *Graph) error {
41 m := providerVertexMap(g) 71 m := providerVertexMap(g)
42 for _, v := range g.Vertices() { 72 for _, v := range g.Vertices() {
43 if pv, ok := v.(GraphNodeProviderConsumer); ok { 73 if pv, ok := v.(GraphNodeProviderConsumer); ok {
44 for _, p := range pv.ProvidedBy() { 74 p := pv.ProvidedBy()
45 target := m[providerMapKey(p, pv)] 75
46 if target == nil { 76 key := providerMapKey(p, pv)
47 println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv))) 77 target := m[key]
48 err = multierror.Append(err, fmt.Errorf( 78
49 "%s: provider %s couldn't be found", 79 sp, ok := pv.(GraphNodeSubPath)
50 dag.VertexName(v), p)) 80 if !ok && target == nil {
51 continue 81 // no target, and no path to walk up
82 err = multierror.Append(err, fmt.Errorf(
83 "%s: provider %s couldn't be found",
84 dag.VertexName(v), p))
85 break
86 }
87
88 // if we don't have a provider at this level, walk up the path looking for one
89 for i := 1; target == nil; i++ {
90 path := normalizeModulePath(sp.Path())
91 if len(path) < i {
92 break
93 }
94
95 key = ResolveProviderName(p, path[:len(path)-i])
96 target = m[key]
97 if target != nil {
98 break
52 } 99 }
100 }
101
102 if target == nil {
103 err = multierror.Append(err, fmt.Errorf(
104 "%s: configuration for %s is not present; a provider configuration block is required for all operations",
105 dag.VertexName(v), p,
106 ))
107 break
108 }
53 109
54 g.Connect(dag.BasicEdge(v, target)) 110 // see if this in an inherited provider
111 if p, ok := target.(*graphNodeProxyProvider); ok {
112 g.Remove(p)
113 target = p.Target()
114 key = target.(GraphNodeProvider).Name()
55 } 115 }
116
117 log.Printf("[DEBUG] resource %s using provider %s", dag.VertexName(pv), key)
118 pv.SetProvider(key)
119 g.Connect(dag.BasicEdge(v, target))
56 } 120 }
57 } 121 }
58 122
@@ -67,36 +131,32 @@ type CloseProviderTransformer struct{}
67 131
68func (t *CloseProviderTransformer) Transform(g *Graph) error { 132func (t *CloseProviderTransformer) Transform(g *Graph) error {
69 pm := providerVertexMap(g) 133 pm := providerVertexMap(g)
70 cpm := closeProviderVertexMap(g) 134 cpm := make(map[string]*graphNodeCloseProvider)
71 var err error 135 var err error
72 for _, v := range g.Vertices() {
73 if pv, ok := v.(GraphNodeProviderConsumer); ok {
74 for _, p := range pv.ProvidedBy() {
75 key := p
76 source := cpm[key]
77
78 if source == nil {
79 // Create a new graphNodeCloseProvider and add it to the graph
80 source = &graphNodeCloseProvider{ProviderNameValue: p}
81 g.Add(source)
82
83 // Close node needs to depend on provider
84 provider, ok := pm[key]
85 if !ok {
86 err = multierror.Append(err, fmt.Errorf(
87 "%s: provider %s couldn't be found for closing",
88 dag.VertexName(v), p))
89 continue
90 }
91 g.Connect(dag.BasicEdge(source, provider))
92
93 // Make sure we also add the new graphNodeCloseProvider to the map
94 // so we don't create and add any duplicate graphNodeCloseProviders.
95 cpm[key] = source
96 }
97 136
98 // Close node depends on all nodes provided by the provider 137 for _, v := range pm {
99 g.Connect(dag.BasicEdge(source, v)) 138 p := v.(GraphNodeProvider)
139
140 // get the close provider of this type if we alread created it
141 closer := cpm[p.Name()]
142
143 if closer == nil {
144 // create a closer for this provider type
145 closer = &graphNodeCloseProvider{ProviderNameValue: p.Name()}
146 g.Add(closer)
147 cpm[p.Name()] = closer
148 }
149
150 // Close node depends on the provider itself
151 // this is added unconditionally, so it will connect to all instances
152 // of the provider. Extra edges will be removed by transitive
153 // reduction.
154 g.Connect(dag.BasicEdge(closer, p))
155
156 // connect all the provider's resources to the close node
157 for _, s := range g.UpEdges(p).List() {
158 if _, ok := s.(GraphNodeProviderConsumer); ok {
159 g.Connect(dag.BasicEdge(closer, s))
100 } 160 }
101 } 161 }
102 } 162 }
@@ -104,18 +164,14 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error {
104 return err 164 return err
105} 165}
106 166
107// MissingProviderTransformer is a GraphTransformer that adds nodes 167// MissingProviderTransformer is a GraphTransformer that adds nodes for all
108// for missing providers into the graph. Specifically, it creates provider 168// required providers into the graph. Specifically, it creates provider
109// configuration nodes for all the providers that we support. These are 169// configuration nodes for all the providers that we support. These are pruned
110// pruned later during an optimization pass. 170// later during an optimization pass.
111type MissingProviderTransformer struct { 171type MissingProviderTransformer struct {
112 // Providers is the list of providers we support. 172 // Providers is the list of providers we support.
113 Providers []string 173 Providers []string
114 174
115 // AllowAny will not check that a provider is supported before adding
116 // it to the graph.
117 AllowAny bool
118
119 // Concrete, if set, overrides how the providers are made. 175 // Concrete, if set, overrides how the providers are made.
120 Concrete ConcreteProviderNodeFunc 176 Concrete ConcreteProviderNodeFunc
121} 177}
@@ -128,99 +184,57 @@ func (t *MissingProviderTransformer) Transform(g *Graph) error {
128 } 184 }
129 } 185 }
130 186
131 // Create a set of our supported providers 187 var err error
132 supported := make(map[string]struct{}, len(t.Providers))
133 for _, v := range t.Providers {
134 supported[v] = struct{}{}
135 }
136
137 // Get the map of providers we already have in our graph
138 m := providerVertexMap(g) 188 m := providerVertexMap(g)
139 189 for _, v := range g.Vertices() {
140 // Go through all the provider consumers and make sure we add
141 // that provider if it is missing. We use a for loop here instead
142 // of "range" since we'll modify check as we go to add more to check.
143 check := g.Vertices()
144 for i := 0; i < len(check); i++ {
145 v := check[i]
146
147 pv, ok := v.(GraphNodeProviderConsumer) 190 pv, ok := v.(GraphNodeProviderConsumer)
148 if !ok { 191 if !ok {
149 continue 192 continue
150 } 193 }
151 194
152 // If this node has a subpath, then we use that as a prefix 195 p := pv.ProvidedBy()
153 // into our map to check for an existing provider. 196 // this may be the resolved provider from the state, so we need to get
154 var path []string 197 // the base provider name.
155 if sp, ok := pv.(GraphNodeSubPath); ok { 198 parts := strings.SplitAfter(p, "provider.")
156 raw := normalizeModulePath(sp.Path()) 199 p = parts[len(parts)-1]
157 if len(raw) > len(rootModulePath) {
158 path = raw
159 }
160 }
161 200
162 for _, p := range pv.ProvidedBy() { 201 key := ResolveProviderName(p, nil)
163 key := providerMapKey(p, pv) 202 provider := m[key]
164 if _, ok := m[key]; ok {
165 // This provider already exists as a configure node
166 continue
167 }
168 203
169 // If the provider has an alias in it, we just want the type 204 // we already have it
170 ptype := p 205 if provider != nil {
171 if idx := strings.IndexRune(p, '.'); idx != -1 { 206 continue
172 ptype = p[:idx] 207 }
173 }
174 208
175 if !t.AllowAny { 209 // we don't implicitly create aliased providers
176 if _, ok := supported[ptype]; !ok { 210 if strings.Contains(p, ".") {
177 // If we don't support the provider type, skip it. 211 log.Println("[DEBUG] not adding missing provider alias:", p)
178 // Validation later will catch this as an error. 212 continue
179 continue 213 }
180 }
181 }
182 214
183 // Add the missing provider node to the graph 215 log.Println("[DEBUG] adding missing provider:", p)
184 v := t.Concrete(&NodeAbstractProvider{
185 NameValue: p,
186 PathValue: path,
187 }).(dag.Vertex)
188 if len(path) > 0 {
189 // We'll need the parent provider as well, so let's
190 // add a dummy node to check to make sure that we add
191 // that parent provider.
192 check = append(check, &graphNodeProviderConsumerDummy{
193 ProviderValue: p,
194 PathValue: path[:len(path)-1],
195 })
196 }
197 216
198 m[key] = g.Add(v) 217 // create the misisng top-level provider
199 } 218 provider = t.Concrete(&NodeAbstractProvider{
219 NameValue: p,
220 }).(dag.Vertex)
221
222 m[key] = g.Add(provider)
200 } 223 }
201 224
202 return nil 225 return err
203} 226}
204 227
205// ParentProviderTransformer connects provider nodes to their parents. 228// ParentProviderTransformer connects provider nodes to their parents.
206// 229//
207// This works by finding nodes that are both GraphNodeProviders and 230// This works by finding nodes that are both GraphNodeProviders and
208// GraphNodeSubPath. It then connects the providers to their parent 231// GraphNodeSubPath. It then connects the providers to their parent
209// path. 232// path. The parent provider is always at the root level.
210type ParentProviderTransformer struct{} 233type ParentProviderTransformer struct{}
211 234
212func (t *ParentProviderTransformer) Transform(g *Graph) error { 235func (t *ParentProviderTransformer) Transform(g *Graph) error {
213 // Make a mapping of path to dag.Vertex, where path is: "path.name" 236 pm := providerVertexMap(g)
214 m := make(map[string]dag.Vertex) 237 for _, v := range g.Vertices() {
215
216 // Also create a map that maps a provider to its parent
217 parentMap := make(map[dag.Vertex]string)
218 for _, raw := range g.Vertices() {
219 // If it is the flat version, then make it the non-flat version.
220 // We eventually want to get rid of the flat version entirely so
221 // this is a stop-gap while it still exists.
222 var v dag.Vertex = raw
223
224 // Only care about providers 238 // Only care about providers
225 pn, ok := v.(GraphNodeProvider) 239 pn, ok := v.(GraphNodeProvider)
226 if !ok || pn.ProviderName() == "" { 240 if !ok || pn.ProviderName() == "" {
@@ -228,53 +242,48 @@ func (t *ParentProviderTransformer) Transform(g *Graph) error {
228 } 242 }
229 243
230 // Also require a subpath, if there is no subpath then we 244 // Also require a subpath, if there is no subpath then we
231 // just totally ignore it. The expectation of this transform is 245 // can't have a parent.
232 // that it is used with a graph builder that is already flattened. 246 if pn, ok := v.(GraphNodeSubPath); ok {
233 var path []string 247 if len(normalizeModulePath(pn.Path())) <= 1 {
234 if pn, ok := raw.(GraphNodeSubPath); ok { 248 continue
235 path = pn.Path() 249 }
236 }
237 path = normalizeModulePath(path)
238
239 // Build the key with path.name i.e. "child.subchild.aws"
240 key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
241 m[key] = raw
242
243 // Determine the parent if we're non-root. This is length 1 since
244 // the 0 index should be "root" since we normalize above.
245 if len(path) > 1 {
246 path = path[:len(path)-1]
247 key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
248 parentMap[raw] = key
249 } 250 }
250 }
251 251
252 // Connect! 252 // this provider may be disabled, but we can only get it's name from
253 for v, key := range parentMap { 253 // the ProviderName string
254 if parent, ok := m[key]; ok { 254 name := ResolveProviderName(strings.SplitN(pn.ProviderName(), " ", 2)[0], nil)
255 parent := pm[name]
256 if parent != nil {
255 g.Connect(dag.BasicEdge(v, parent)) 257 g.Connect(dag.BasicEdge(v, parent))
256 } 258 }
257 }
258 259
260 }
259 return nil 261 return nil
260} 262}
261 263
262// PruneProviderTransformer is a GraphTransformer that prunes all the 264// PruneProviderTransformer removes any providers that are not actually used by
263// providers that aren't needed from the graph. A provider is unneeded if 265// anything, and provider proxies. This avoids the provider being initialized
264// no resource or module is using that provider. 266// and configured. This both saves resources but also avoids errors since
267// configuration may imply initialization which may require auth.
265type PruneProviderTransformer struct{} 268type PruneProviderTransformer struct{}
266 269
267func (t *PruneProviderTransformer) Transform(g *Graph) error { 270func (t *PruneProviderTransformer) Transform(g *Graph) error {
268 for _, v := range g.Vertices() { 271 for _, v := range g.Vertices() {
269 // We only care about the providers 272 // We only care about providers
270 if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" { 273 pn, ok := v.(GraphNodeProvider)
274 if !ok || pn.ProviderName() == "" {
271 continue 275 continue
272 } 276 }
273 // Does anything depend on this? If not, then prune it. 277
274 if s := g.UpEdges(v); s.Len() == 0 { 278 // ProxyProviders will have up edges, but we're now done with them in the graph
275 if nv, ok := v.(dag.NamedVertex); ok { 279 if _, ok := v.(*graphNodeProxyProvider); ok {
276 log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name()) 280 log.Printf("[DEBUG] pruning proxy provider %s", dag.VertexName(v))
277 } 281 g.Remove(v)
282 }
283
284 // Remove providers with no dependencies.
285 if g.UpEdges(v).Len() == 0 {
286 log.Printf("[DEBUG] pruning unused provider %s", dag.VertexName(v))
278 g.Remove(v) 287 g.Remove(v)
279 } 288 }
280 } 289 }
@@ -285,23 +294,26 @@ func (t *PruneProviderTransformer) Transform(g *Graph) error {
285// providerMapKey is a helper that gives us the key to use for the 294// providerMapKey is a helper that gives us the key to use for the
286// maps returned by things such as providerVertexMap. 295// maps returned by things such as providerVertexMap.
287func providerMapKey(k string, v dag.Vertex) string { 296func providerMapKey(k string, v dag.Vertex) string {
288 pathPrefix := "" 297 if strings.Contains(k, "provider.") {
289 if sp, ok := v.(GraphNodeSubPath); ok { 298 // this is already resolved
290 raw := normalizeModulePath(sp.Path()) 299 return k
291 if len(raw) > len(rootModulePath) {
292 pathPrefix = modulePrefixStr(raw) + "."
293 }
294 } 300 }
295 301
296 return pathPrefix + k 302 // we create a dummy provider to
303 var path []string
304 if sp, ok := v.(GraphNodeSubPath); ok {
305 path = normalizeModulePath(sp.Path())
306 }
307 return ResolveProviderName(k, path)
297} 308}
298 309
299func providerVertexMap(g *Graph) map[string]dag.Vertex { 310func providerVertexMap(g *Graph) map[string]dag.Vertex {
300 m := make(map[string]dag.Vertex) 311 m := make(map[string]dag.Vertex)
301 for _, v := range g.Vertices() { 312 for _, v := range g.Vertices() {
302 if pv, ok := v.(GraphNodeProvider); ok { 313 if pv, ok := v.(GraphNodeProvider); ok {
303 key := providerMapKey(pv.ProviderName(), v) 314 // TODO: The Name may have meta info, like " (disabled)"
304 m[key] = v 315 name := strings.SplitN(pv.Name(), " ", 2)[0]
316 m[name] = v
305 } 317 }
306 } 318 }
307 319
@@ -324,7 +336,7 @@ type graphNodeCloseProvider struct {
324} 336}
325 337
326func (n *graphNodeCloseProvider) Name() string { 338func (n *graphNodeCloseProvider) Name() string {
327 return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) 339 return n.ProviderNameValue + " (close)"
328} 340}
329 341
330// GraphNodeEvalable impl. 342// GraphNodeEvalable impl.
@@ -362,19 +374,233 @@ func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool {
362 return true 374 return true
363} 375}
364 376
365// graphNodeProviderConsumerDummy is a struct that never enters the real 377// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to
366// graph (though it could to no ill effect). It implements 378// store the name and value of a provider node for inheritance between modules.
367// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force 379// These nodes are only used to store the data while loading the provider
368// certain transformations. 380// configurations, and are removed after all the resources have been connected
369type graphNodeProviderConsumerDummy struct { 381// to their providers.
370 ProviderValue string 382type graphNodeProxyProvider struct {
371 PathValue []string 383 nameValue string
384 path []string
385 target GraphNodeProvider
386}
387
388func (n *graphNodeProxyProvider) ProviderName() string {
389 return n.Target().ProviderName()
390}
391
392func (n *graphNodeProxyProvider) Name() string {
393 return ResolveProviderName(n.nameValue, n.path)
394}
395
396// find the concrete provider instance
397func (n *graphNodeProxyProvider) Target() GraphNodeProvider {
398 switch t := n.target.(type) {
399 case *graphNodeProxyProvider:
400 return t.Target()
401 default:
402 return n.target
403 }
404}
405
406// ProviderConfigTransformer adds all provider nodes from the configuration and
407// attaches the configs.
408type ProviderConfigTransformer struct {
409 Providers []string
410 Concrete ConcreteProviderNodeFunc
411
412 // each provider node is stored here so that the proxy nodes can look up
413 // their targets by name.
414 providers map[string]GraphNodeProvider
415 // record providers that can be overriden with a proxy
416 proxiable map[string]bool
417
418 // Module is the module to add resources from.
419 Module *module.Tree
372} 420}
373 421
374func (n *graphNodeProviderConsumerDummy) Path() []string { 422func (t *ProviderConfigTransformer) Transform(g *Graph) error {
375 return n.PathValue 423 // If no module is given, we don't do anything
424 if t.Module == nil {
425 return nil
426 }
427
428 // If the module isn't loaded, that is simply an error
429 if !t.Module.Loaded() {
430 return errors.New("module must be loaded for ProviderConfigTransformer")
431 }
432
433 t.providers = make(map[string]GraphNodeProvider)
434 t.proxiable = make(map[string]bool)
435
436 // Start the transformation process
437 if err := t.transform(g, t.Module); err != nil {
438 return err
439 }
440
441 // finally attach the configs to the new nodes
442 return t.attachProviderConfigs(g)
376} 443}
377 444
378func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string { 445func (t *ProviderConfigTransformer) transform(g *Graph, m *module.Tree) error {
379 return []string{n.ProviderValue} 446 // If no config, do nothing
447 if m == nil {
448 return nil
449 }
450
451 // Add our resources
452 if err := t.transformSingle(g, m); err != nil {
453 return err
454 }
455
456 // Transform all the children.
457 for _, c := range m.Children() {
458 if err := t.transform(g, c); err != nil {
459 return err
460 }
461 }
462 return nil
463}
464
465func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error {
466 log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path())
467
468 // Get the configuration for this module
469 conf := m.Config()
470
471 // Build the path we're at
472 path := m.Path()
473 if len(path) > 0 {
474 path = append([]string{RootModuleName}, path...)
475 }
476
477 // add all providers from the configuration
478 for _, p := range conf.ProviderConfigs {
479 name := p.Name
480 if p.Alias != "" {
481 name += "." + p.Alias
482 }
483
484 v := t.Concrete(&NodeAbstractProvider{
485 NameValue: name,
486 PathValue: path,
487 })
488
489 // Add it to the graph
490 g.Add(v)
491 fullName := ResolveProviderName(name, path)
492 t.providers[fullName] = v.(GraphNodeProvider)
493 t.proxiable[fullName] = len(p.RawConfig.RawMap()) == 0
494 }
495
496 // Now replace the provider nodes with proxy nodes if a provider was being
497 // passed in, and create implicit proxies if there was no config. Any extra
498 // proxies will be removed in the prune step.
499 return t.addProxyProviders(g, m)
500}
501
502func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) error {
503 path := m.Path()
504
505 // can't add proxies at the root
506 if len(path) == 0 {
507 return nil
508 }
509
510 parentPath := path[:len(path)-1]
511 parent := t.Module.Child(parentPath)
512 if parent == nil {
513 return nil
514 }
515
516 var parentCfg *config.Module
517 for _, mod := range parent.Config().Modules {
518 if mod.Name == m.Name() {
519 parentCfg = mod
520 break
521 }
522 }
523
524 if parentCfg == nil {
525 // this can't really happen during normal execution.
526 return fmt.Errorf("parent module config not found for %s", m.Name())
527 }
528
529 // Go through all the providers the parent is passing in, and add proxies to
530 // the parent provider nodes.
531 for name, parentName := range parentCfg.Providers {
532 fullName := ResolveProviderName(name, path)
533 fullParentName := ResolveProviderName(parentName, parentPath)
534
535 parentProvider := t.providers[fullParentName]
536
537 if parentProvider == nil {
538 return fmt.Errorf("missing provider %s", fullParentName)
539 }
540
541 proxy := &graphNodeProxyProvider{
542 nameValue: name,
543 path: path,
544 target: parentProvider,
545 }
546
547 concreteProvider := t.providers[fullName]
548
549 // replace the concrete node with the provider passed in
550 if concreteProvider != nil && t.proxiable[fullName] {
551 g.Replace(concreteProvider, proxy)
552 t.providers[fullName] = proxy
553 continue
554 }
555
556 // aliased providers can't be implicitly passed in
557 if strings.Contains(name, ".") {
558 continue
559 }
560
561 // There was no concrete provider, so add this as an implicit provider.
562 // The extra proxy will be pruned later if it's unused.
563 g.Add(proxy)
564 t.providers[fullName] = proxy
565 }
566 return nil
567}
568
569func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error {
570 for _, v := range g.Vertices() {
571 // Only care about GraphNodeAttachProvider implementations
572 apn, ok := v.(GraphNodeAttachProvider)
573 if !ok {
574 continue
575 }
576
577 // Determine what we're looking for
578 path := normalizeModulePath(apn.Path())[1:]
579 name := apn.ProviderName()
580 log.Printf("[TRACE] Attach provider request: %#v %s", path, name)
581
582 // Get the configuration.
583 tree := t.Module.Child(path)
584 if tree == nil {
585 continue
586 }
587
588 // Go through the provider configs to find the matching config
589 for _, p := range tree.Config().ProviderConfigs {
590 // Build the name, which is "name.alias" if an alias exists
591 current := p.Name
592 if p.Alias != "" {
593 current += "." + p.Alias
594 }
595
596 // If the configs match then attach!
597 if current == name {
598 log.Printf("[TRACE] Attaching provider config: %#v", p)
599 apn.AttachProvider(p)
600 break
601 }
602 }
603 }
604
605 return nil
380} 606}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
deleted file mode 100644
index d9919f3..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
+++ /dev/null
@@ -1,50 +0,0 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// DisableProviderTransformer "disables" any providers that are not actually
10// used by anything. This avoids the provider being initialized and configured.
11// This both saves resources but also avoids errors since configuration
12// may imply initialization which may require auth.
13type DisableProviderTransformer struct{}
14
15func (t *DisableProviderTransformer) Transform(g *Graph) error {
16 for _, v := range g.Vertices() {
17 // We only care about providers
18 pn, ok := v.(GraphNodeProvider)
19 if !ok || pn.ProviderName() == "" {
20 continue
21 }
22
23 // If we have dependencies, then don't disable
24 if g.UpEdges(v).Len() > 0 {
25 continue
26 }
27
28 // Get the path
29 var path []string
30 if pn, ok := v.(GraphNodeSubPath); ok {
31 path = pn.Path()
32 }
33
34 // Disable the provider by replacing it with a "disabled" provider
35 disabled := &NodeDisabledProvider{
36 NodeAbstractProvider: &NodeAbstractProvider{
37 NameValue: pn.ProviderName(),
38 PathValue: path,
39 },
40 }
41
42 if !g.Replace(v, disabled) {
43 panic(fmt.Sprintf(
44 "vertex disappeared from under us: %s",
45 dag.VertexName(v)))
46 }
47 }
48
49 return nil
50}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
index c545235..be8c7f9 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
@@ -76,6 +76,85 @@ func (t *ReferenceTransformer) Transform(g *Graph) error {
76 return nil 76 return nil
77} 77}
78 78
79// DestroyReferenceTransformer is a GraphTransformer that reverses the edges
80// for locals and outputs that depend on other nodes which will be
81// removed during destroy. If a destroy node is evaluated before the local or
82// output value, it will be removed from the state, and the later interpolation
83// will fail.
84type DestroyValueReferenceTransformer struct{}
85
86func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error {
87 vs := g.Vertices()
88 for _, v := range vs {
89 switch v.(type) {
90 case *NodeApplyableOutput, *NodeLocal:
91 // OK
92 default:
93 continue
94 }
95
96 // reverse any outgoing edges so that the value is evaluated first.
97 for _, e := range g.EdgesFrom(v) {
98 target := e.Target()
99
100 // only destroy nodes will be evaluated in reverse
101 if _, ok := target.(GraphNodeDestroyer); !ok {
102 continue
103 }
104
105 log.Printf("[TRACE] output dep: %s", dag.VertexName(target))
106
107 g.RemoveEdge(e)
108 g.Connect(&DestroyEdge{S: target, T: v})
109 }
110 }
111
112 return nil
113}
114
115// PruneUnusedValuesTransformer is s GraphTransformer that removes local and
116// output values which are not referenced in the graph. Since outputs and
117// locals always need to be evaluated, if they reference a resource that is not
118// available in the state the interpolation could fail.
119type PruneUnusedValuesTransformer struct{}
120
121func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {
122 // this might need multiple runs in order to ensure that pruning a value
123 // doesn't effect a previously checked value.
124 for removed := 0; ; removed = 0 {
125 for _, v := range g.Vertices() {
126 switch v.(type) {
127 case *NodeApplyableOutput, *NodeLocal:
128 // OK
129 default:
130 continue
131 }
132
133 dependants := g.UpEdges(v)
134
135 switch dependants.Len() {
136 case 0:
137 // nothing at all depends on this
138 g.Remove(v)
139 removed++
140 case 1:
141 // because an output's destroy node always depends on the output,
142 // we need to check for the case of a single destroy node.
143 d := dependants.List()[0]
144 if _, ok := d.(*NodeDestroyableOutput); ok {
145 g.Remove(v)
146 removed++
147 }
148 }
149 }
150 if removed == 0 {
151 break
152 }
153 }
154
155 return nil
156}
157
79// ReferenceMap is a structure that can be used to efficiently check 158// ReferenceMap is a structure that can be used to efficiently check
80// for references on a graph. 159// for references on a graph.
81type ReferenceMap struct { 160type ReferenceMap struct {
@@ -96,6 +175,7 @@ func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) {
96 var matches []dag.Vertex 175 var matches []dag.Vertex
97 var missing []string 176 var missing []string
98 prefix := m.prefix(v) 177 prefix := m.prefix(v)
178
99 for _, ns := range rn.References() { 179 for _, ns := range rn.References() {
100 found := false 180 found := false
101 for _, n := range strings.Split(ns, "/") { 181 for _, n := range strings.Split(ns, "/") {
@@ -108,19 +188,14 @@ func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) {
108 // Mark that we found a match 188 // Mark that we found a match
109 found = true 189 found = true
110 190
111 // Make sure this isn't a self reference, which isn't included
112 selfRef := false
113 for _, p := range parents { 191 for _, p := range parents {
192 // don't include self-references
114 if p == v { 193 if p == v {
115 selfRef = true 194 continue
116 break
117 } 195 }
118 } 196 matches = append(matches, p)
119 if selfRef {
120 continue
121 } 197 }
122 198
123 matches = append(matches, parents...)
124 break 199 break
125 } 200 }
126 201
@@ -296,14 +371,21 @@ func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string {
296 return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)} 371 return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)}
297 case *config.UserVariable: 372 case *config.UserVariable:
298 return []string{fmt.Sprintf("var.%s", v.Name)} 373 return []string{fmt.Sprintf("var.%s", v.Name)}
374 case *config.LocalVariable:
375 return []string{fmt.Sprintf("local.%s", v.Name)}
299 default: 376 default:
300 return nil 377 return nil
301 } 378 }
302} 379}
303 380
304func modulePrefixStr(p []string) string { 381func modulePrefixStr(p []string) string {
382 // strip "root"
383 if len(p) > 0 && p[0] == rootModulePath[0] {
384 p = p[1:]
385 }
386
305 parts := make([]string, 0, len(p)*2) 387 parts := make([]string, 0, len(p)*2)
306 for _, p := range p[1:] { 388 for _, p := range p {
307 parts = append(parts, "module", p) 389 parts = append(parts, "module", p)
308 } 390 }
309 391
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go
new file mode 100644
index 0000000..2e05edb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go
@@ -0,0 +1,32 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config/module"
7)
8
9// RemoveModuleTransformer implements GraphTransformer to add nodes indicating
10// when a module was removed from the configuration.
11type RemovedModuleTransformer struct {
12 Module *module.Tree // root module
13 State *State
14}
15
16func (t *RemovedModuleTransformer) Transform(g *Graph) error {
17 // nothing to remove if there's no state!
18 if t.State == nil {
19 return nil
20 }
21
22 for _, m := range t.State.Modules {
23 c := t.Module.Child(m.Path[1:])
24 if c != nil {
25 continue
26 }
27
28 log.Printf("[DEBUG] module %s no longer in config\n", modulePrefixStr(m.Path))
29 g.Add(&NodeModuleRemoved{PathValue: m.Path})
30 }
31 return nil
32}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
index cda35cb..e528b37 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
@@ -37,7 +37,9 @@ func (t *ResourceCountTransformer) Transform(g *Graph) error {
37 addr.Index = index 37 addr.Index = index
38 38
39 // Build the abstract node and the concrete one 39 // Build the abstract node and the concrete one
40 abstract := &NodeAbstractResource{Addr: addr} 40 abstract := &NodeAbstractResource{
41 Addr: addr,
42 }
41 var node dag.Vertex = abstract 43 var node dag.Vertex = abstract
42 if f := t.Concrete; f != nil { 44 if f := t.Concrete; f != nil {
43 node = f(abstract) 45 node = f(abstract)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
index 4f117b4..af6defe 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
@@ -73,9 +73,11 @@ func (t *TargetsTransformer) Transform(g *Graph) error {
73 if _, ok := v.(GraphNodeResource); ok { 73 if _, ok := v.(GraphNodeResource); ok {
74 removable = true 74 removable = true
75 } 75 }
76
76 if vr, ok := v.(RemovableIfNotTargeted); ok { 77 if vr, ok := v.(RemovableIfNotTargeted); ok {
77 removable = vr.RemoveIfNotTargeted() 78 removable = vr.RemoveIfNotTargeted()
78 } 79 }
80
79 if removable && !targetedNodes.Include(v) { 81 if removable && !targetedNodes.Include(v) {
80 log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) 82 log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v))
81 g.Remove(v) 83 g.Remove(v)
@@ -135,7 +137,10 @@ func (t *TargetsTransformer) selectTargetedNodes(
135 } 137 }
136 } 138 }
137 } 139 }
140 return t.addDependencies(targetedNodes, g)
141}
138 142
143func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (*dag.Set, error) {
139 // Handle nodes that need to be included if their dependencies are included. 144 // Handle nodes that need to be included if their dependencies are included.
140 // This requires multiple passes since we need to catch transitive 145 // This requires multiple passes since we need to catch transitive
141 // dependencies if and only if they are via other nodes that also 146 // dependencies if and only if they are via other nodes that also
@@ -157,11 +162,6 @@ func (t *TargetsTransformer) selectTargetedNodes(
157 } 162 }
158 163
159 dependers = dependers.Filter(func(dv interface{}) bool { 164 dependers = dependers.Filter(func(dv interface{}) bool {
160 // Can ignore nodes that are already targeted
161 /*if targetedNodes.Include(dv) {
162 return false
163 }*/
164
165 _, ok := dv.(GraphNodeTargetDownstream) 165 _, ok := dv.(GraphNodeTargetDownstream)
166 return ok 166 return ok
167 }) 167 })
@@ -180,6 +180,7 @@ func (t *TargetsTransformer) selectTargetedNodes(
180 // depending on in case that informs its decision about whether 180 // depending on in case that informs its decision about whether
181 // it is safe to be targeted. 181 // it is safe to be targeted.
182 deps := g.DownEdges(v) 182 deps := g.DownEdges(v)
183
183 depsTargeted := deps.Intersection(targetedNodes) 184 depsTargeted := deps.Intersection(targetedNodes)
184 depsUntargeted := deps.Difference(depsTargeted) 185 depsUntargeted := deps.Difference(depsTargeted)
185 186
@@ -193,7 +194,50 @@ func (t *TargetsTransformer) selectTargetedNodes(
193 } 194 }
194 } 195 }
195 196
196 return targetedNodes, nil 197 return targetedNodes.Filter(func(dv interface{}) bool {
198 return filterPartialOutputs(dv, targetedNodes, g)
199 }), nil
200}
201
202// Outputs may have been included transitively, but if any of their
203// dependencies have been pruned they won't be resolvable.
204// If nothing depends on the output, and the output is missing any
205// dependencies, remove it from the graph.
206// This essentially maintains the previous behavior where interpolation in
207// outputs would fail silently, but can now surface errors where the output
208// is required.
209func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool {
210 // should this just be done with TargetDownstream?
211 if _, ok := v.(*NodeApplyableOutput); !ok {
212 return true
213 }
214
215 dependers := g.UpEdges(v)
216 for _, d := range dependers.List() {
217 if _, ok := d.(*NodeCountBoundary); ok {
218 continue
219 }
220
221 if !targetedNodes.Include(d) {
222 // this one is going to be removed, so it doesn't count
223 continue
224 }
225
226 // as soon as we see a real dependency, we mark this as
227 // non-removable
228 return true
229 }
230
231 depends := g.DownEdges(v)
232
233 for _, d := range depends.List() {
234 if !targetedNodes.Include(d) {
235 log.Printf("[WARN] %s missing targeted dependency %s, removing from the graph",
236 dag.VertexName(v), dag.VertexName(d))
237 return false
238 }
239 }
240 return true
197} 241}
198 242
199func (t *TargetsTransformer) nodeIsTarget( 243func (t *TargetsTransformer) nodeIsTarget(
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
index 7852bc4..d828c92 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
@@ -1,13 +1,18 @@
1package terraform 1package terraform
2 2
3import "sync"
4
3// MockUIOutput is an implementation of UIOutput that can be used for tests. 5// MockUIOutput is an implementation of UIOutput that can be used for tests.
4type MockUIOutput struct { 6type MockUIOutput struct {
7 sync.Mutex
5 OutputCalled bool 8 OutputCalled bool
6 OutputMessage string 9 OutputMessage string
7 OutputFn func(string) 10 OutputFn func(string)
8} 11}
9 12
10func (o *MockUIOutput) Output(v string) { 13func (o *MockUIOutput) Output(v string) {
14 o.Lock()
15 defer o.Unlock()
11 o.OutputCalled = true 16 o.OutputCalled = true
12 o.OutputMessage = v 17 o.OutputMessage = v
13 if o.OutputFn != nil { 18 if o.OutputFn != nil {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go
index 700be2a..a42613e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go
@@ -1,14 +1,13 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/terraform/httpclient"
5 "runtime"
6) 5)
7 6
8// The standard Terraform User-Agent format
9const UserAgent = "Terraform %s (%s)"
10
11// Generate a UserAgent string 7// Generate a UserAgent string
8//
9// Deprecated: Use httpclient.UserAgentString if you are setting your
10// own User-Agent header.
12func UserAgentString() string { 11func UserAgentString() string {
13 return fmt.Sprintf(UserAgent, VersionString(), runtime.Version()) 12 return httpclient.UserAgentString()
14} 13}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go
index d61b11e..ac73015 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/version.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/version.go
@@ -1,31 +1,10 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/terraform/version"
5
6 "github.com/hashicorp/go-version"
7) 5)
8 6
9// The main version number that is being run at the moment. 7// TODO: update providers to use the version package directly
10const Version = "0.10.0"
11
12// A pre-release marker for the version. If this is "" (empty string)
13// then it means that it is a final release. Otherwise, this is a pre-release
14// such as "dev" (in development), "beta", "rc1", etc.
15var VersionPrerelease = "dev"
16
17// SemVersion is an instance of version.Version. This has the secondary
18// benefit of verifying during tests and init time that our version is a
19// proper semantic version, which should always be the case.
20var SemVersion = version.Must(version.NewVersion(Version))
21
22// VersionHeader is the header name used to send the current terraform version
23// in http requests.
24const VersionHeader = "Terraform-Version"
25
26func VersionString() string { 8func VersionString() string {
27 if VersionPrerelease != "" { 9 return version.String()
28 return fmt.Sprintf("%s-%s", Version, VersionPrerelease)
29 }
30 return Version
31} 10}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
index 3cbbf56..1f43045 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/version_required.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
@@ -6,19 +6,21 @@ import (
6 "github.com/hashicorp/go-version" 6 "github.com/hashicorp/go-version"
7 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/config/module" 8 "github.com/hashicorp/terraform/config/module"
9
10 tfversion "github.com/hashicorp/terraform/version"
9) 11)
10 12
11// checkRequiredVersion verifies that any version requirements specified by 13// CheckRequiredVersion verifies that any version requirements specified by
12// the configuration are met. 14// the configuration are met.
13// 15//
14// This checks the root module as well as any additional version requirements 16// This checks the root module as well as any additional version requirements
15// from child modules. 17// from child modules.
16// 18//
17// This is tested in context_test.go. 19// This is tested in context_test.go.
18func checkRequiredVersion(m *module.Tree) error { 20func CheckRequiredVersion(m *module.Tree) error {
19 // Check any children 21 // Check any children
20 for _, c := range m.Children() { 22 for _, c := range m.Children() {
21 if err := checkRequiredVersion(c); err != nil { 23 if err := CheckRequiredVersion(c); err != nil {
22 return err 24 return err
23 } 25 }
24 } 26 }
@@ -49,7 +51,7 @@ func checkRequiredVersion(m *module.Tree) error {
49 tf.RequiredVersion, err) 51 tf.RequiredVersion, err)
50 } 52 }
51 53
52 if !cs.Check(SemVersion) { 54 if !cs.Check(tfversion.SemVer) {
53 return fmt.Errorf( 55 return fmt.Errorf(
54 "The currently running version of Terraform doesn't meet the\n"+ 56 "The currently running version of Terraform doesn't meet the\n"+
55 "version requirements explicitly specified by the configuration.\n"+ 57 "version requirements explicitly specified by the configuration.\n"+
@@ -62,7 +64,7 @@ func checkRequiredVersion(m *module.Tree) error {
62 " Current version: %s", 64 " Current version: %s",
63 module, 65 module,
64 tf.RequiredVersion, 66 tf.RequiredVersion,
65 SemVersion) 67 tfversion.SemVer)
66 } 68 }
67 69
68 return nil 70 return nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
index cbd78dd..4cfc528 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
@@ -2,7 +2,7 @@
2 2
3package terraform 3package terraform
4 4
5import "fmt" 5import "strconv"
6 6
7const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" 7const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport"
8 8
@@ -10,7 +10,7 @@ var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96}
10 10
11func (i walkOperation) String() string { 11func (i walkOperation) String() string {
12 if i >= walkOperation(len(_walkOperation_index)-1) { 12 if i >= walkOperation(len(_walkOperation_index)-1) {
13 return fmt.Sprintf("walkOperation(%d)", i) 13 return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")"
14 } 14 }
15 return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] 15 return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]]
16} 16}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go
new file mode 100644
index 0000000..2c23f76
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go
@@ -0,0 +1,26 @@
1package tfdiags
2
3type Diagnostic interface {
4 Severity() Severity
5 Description() Description
6 Source() Source
7}
8
9type Severity rune
10
11//go:generate stringer -type=Severity
12
13const (
14 Error Severity = 'E'
15 Warning Severity = 'W'
16)
17
18type Description struct {
19 Summary string
20 Detail string
21}
22
23type Source struct {
24 Subject *SourceRange
25 Context *SourceRange
26}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go
new file mode 100644
index 0000000..667ba80
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go
@@ -0,0 +1,181 @@
1package tfdiags
2
3import (
4 "bytes"
5 "fmt"
6
7 "github.com/hashicorp/errwrap"
8 multierror "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/hcl2/hcl"
10)
11
12// Diagnostics is a list of diagnostics. Diagnostics is intended to be used
13// where a Go "error" might normally be used, allowing richer information
14// to be conveyed (more context, support for warnings).
15//
16// A nil Diagnostics is a valid, empty diagnostics list, thus allowing
17// heap allocation to be avoided in the common case where there are no
18// diagnostics to report at all.
19type Diagnostics []Diagnostic
20
21// Append is the main interface for constructing Diagnostics lists, taking
22// an existing list (which may be nil) and appending the new objects to it
23// after normalizing them to be implementations of Diagnostic.
24//
25// The usual pattern for a function that natively "speaks" diagnostics is:
26//
27// // Create a nil Diagnostics at the start of the function
28// var diags diag.Diagnostics
29//
30// // At later points, build on it if errors / warnings occur:
31// foo, err := DoSomethingRisky()
32// if err != nil {
33// diags = diags.Append(err)
34// }
35//
36// // Eventually return the result and diagnostics in place of error
37// return result, diags
38//
39// Append accepts a variety of different diagnostic-like types, including
40// native Go errors and HCL diagnostics. It also knows how to unwrap
41// a multierror.Error into separate error diagnostics. It can be passed
42// another Diagnostics to concatenate the two lists. If given something
43// it cannot handle, this function will panic.
44func (diags Diagnostics) Append(new ...interface{}) Diagnostics {
45 for _, item := range new {
46 if item == nil {
47 continue
48 }
49
50 switch ti := item.(type) {
51 case Diagnostic:
52 diags = append(diags, ti)
53 case Diagnostics:
54 diags = append(diags, ti...) // flatten
55 case diagnosticsAsError:
56 diags = diags.Append(ti.Diagnostics) // unwrap
57 case hcl.Diagnostics:
58 for _, hclDiag := range ti {
59 diags = append(diags, hclDiagnostic{hclDiag})
60 }
61 case *hcl.Diagnostic:
62 diags = append(diags, hclDiagnostic{ti})
63 case *multierror.Error:
64 for _, err := range ti.Errors {
65 diags = append(diags, nativeError{err})
66 }
67 case error:
68 switch {
69 case errwrap.ContainsType(ti, Diagnostics(nil)):
70 // If we have an errwrap wrapper with a Diagnostics hiding
71 // inside then we'll unpick it here to get access to the
72 // individual diagnostics.
73 diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil)))
74 case errwrap.ContainsType(ti, hcl.Diagnostics(nil)):
75 // Likewise, if we have HCL diagnostics we'll unpick that too.
76 diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil)))
77 default:
78 diags = append(diags, nativeError{ti})
79 }
80 default:
81 panic(fmt.Errorf("can't construct diagnostic(s) from %T", item))
82 }
83 }
84
85 // Given the above, we should never end up with a non-nil empty slice
86 // here, but we'll make sure of that so callers can rely on empty == nil
87 if len(diags) == 0 {
88 return nil
89 }
90
91 return diags
92}
93
94// HasErrors returns true if any of the diagnostics in the list have
95// a severity of Error.
96func (diags Diagnostics) HasErrors() bool {
97 for _, diag := range diags {
98 if diag.Severity() == Error {
99 return true
100 }
101 }
102 return false
103}
104
105// ForRPC returns a version of the receiver that has been simplified so that
106// it is friendly to RPC protocols.
107//
108// Currently this means that it can be serialized with encoding/gob and
109// subsequently re-inflated. It may later grow to include other serialization
110// formats.
111//
112// Note that this loses information about the original objects used to
113// construct the diagnostics, so e.g. the errwrap API will not work as
114// expected on an error-wrapped Diagnostics that came from ForRPC.
115func (diags Diagnostics) ForRPC() Diagnostics {
116 ret := make(Diagnostics, len(diags))
117 for i := range diags {
118 ret[i] = makeRPCFriendlyDiag(diags[i])
119 }
120 return ret
121}
122
123// Err flattens a diagnostics list into a single Go error, or to nil
124// if the diagnostics list does not include any error-level diagnostics.
125//
126// This can be used to smuggle diagnostics through an API that deals in
127// native errors, but unfortunately it will lose naked warnings (warnings
128// that aren't accompanied by at least one error) since such APIs have no
129// mechanism through which to report these.
130//
131// return result, diags.Error()
132func (diags Diagnostics) Err() error {
133 if !diags.HasErrors() {
134 return nil
135 }
136 return diagnosticsAsError{diags}
137}
138
139type diagnosticsAsError struct {
140 Diagnostics
141}
142
143func (dae diagnosticsAsError) Error() string {
144 diags := dae.Diagnostics
145 switch {
146 case len(diags) == 0:
147 // should never happen, since we don't create this wrapper if
148 // there are no diagnostics in the list.
149 return "no errors"
150 case len(diags) == 1:
151 desc := diags[0].Description()
152 if desc.Detail == "" {
153 return desc.Summary
154 }
155 return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail)
156 default:
157 var ret bytes.Buffer
158 fmt.Fprintf(&ret, "%d problems:\n", len(diags))
159 for _, diag := range dae.Diagnostics {
160 desc := diag.Description()
161 if desc.Detail == "" {
162 fmt.Fprintf(&ret, "\n- %s", desc.Summary)
163 } else {
164 fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail)
165 }
166 }
167 return ret.String()
168 }
169}
170
171// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped
172// diagnostics object can be picked apart by errwrap-aware code.
173func (dae diagnosticsAsError) WrappedErrors() []error {
174 var errs []error
175 for _, diag := range dae.Diagnostics {
176 if wrapper, isErr := diag.(nativeError); isErr {
177 errs = append(errs, wrapper.err)
178 }
179 }
180 return errs
181}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform/tfdiags/doc.go
new file mode 100644
index 0000000..c427879
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/doc.go
@@ -0,0 +1,16 @@
1// Package tfdiags is a utility package for representing errors and
2// warnings in a manner that allows us to produce good messages for the
3// user.
4//
5// "diag" is short for "diagnostics", and is meant as a general word for
6// feedback to a user about potential or actual problems.
7//
8// A design goal for this package is for it to be able to provide rich
9// messaging where possible but to also be pragmatic about dealing with
10// generic errors produced by system components that _can't_ provide
11// such rich messaging. As a consequence, the main types in this package --
12// Diagnostics and Diagnostic -- are designed so that they can be "smuggled"
13// over an error channel and then be unpacked at the other end, so that
14// error diagnostics (at least) can transit through APIs that are not
15// aware of this package.
16package tfdiags
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/error.go b/vendor/github.com/hashicorp/terraform/tfdiags/error.go
new file mode 100644
index 0000000..35edc30
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/error.go
@@ -0,0 +1,23 @@
1package tfdiags
2
3// nativeError is a Diagnostic implementation that wraps a normal Go error
4type nativeError struct {
5 err error
6}
7
8var _ Diagnostic = nativeError{}
9
10func (e nativeError) Severity() Severity {
11 return Error
12}
13
14func (e nativeError) Description() Description {
15 return Description{
16 Summary: e.err.Error(),
17 }
18}
19
20func (e nativeError) Source() Source {
21 // No source information available for a native error
22 return Source{}
23}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go
new file mode 100644
index 0000000..24851f4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go
@@ -0,0 +1,77 @@
1package tfdiags
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic
8type hclDiagnostic struct {
9 diag *hcl.Diagnostic
10}
11
12var _ Diagnostic = hclDiagnostic{}
13
14func (d hclDiagnostic) Severity() Severity {
15 switch d.diag.Severity {
16 case hcl.DiagWarning:
17 return Warning
18 default:
19 return Error
20 }
21}
22
23func (d hclDiagnostic) Description() Description {
24 return Description{
25 Summary: d.diag.Summary,
26 Detail: d.diag.Detail,
27 }
28}
29
30func (d hclDiagnostic) Source() Source {
31 var ret Source
32 if d.diag.Subject != nil {
33 rng := SourceRangeFromHCL(*d.diag.Subject)
34 ret.Subject = &rng
35 }
36 if d.diag.Context != nil {
37 rng := SourceRangeFromHCL(*d.diag.Context)
38 ret.Context = &rng
39 }
40 return ret
41}
42
43// SourceRangeFromHCL constructs a SourceRange from the corresponding range
44// type within the HCL package.
45func SourceRangeFromHCL(hclRange hcl.Range) SourceRange {
46 return SourceRange{
47 Filename: hclRange.Filename,
48 Start: SourcePos{
49 Line: hclRange.Start.Line,
50 Column: hclRange.Start.Column,
51 Byte: hclRange.Start.Byte,
52 },
53 End: SourcePos{
54 Line: hclRange.End.Line,
55 Column: hclRange.End.Column,
56 Byte: hclRange.End.Byte,
57 },
58 }
59}
60
61// ToHCL constructs a HCL Range from the receiving SourceRange. This is the
62// opposite of SourceRangeFromHCL.
63func (r SourceRange) ToHCL() hcl.Range {
64 return hcl.Range{
65 Filename: r.Filename,
66 Start: hcl.Pos{
67 Line: r.Start.Line,
68 Column: r.Start.Column,
69 Byte: r.Start.Byte,
70 },
71 End: hcl.Pos{
72 Line: r.End.Line,
73 Column: r.End.Column,
74 Byte: r.End.Byte,
75 },
76 }
77}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go
new file mode 100644
index 0000000..6cc95cc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go
@@ -0,0 +1,53 @@
1package tfdiags
2
3import (
4 "encoding/gob"
5)
6
7type rpcFriendlyDiag struct {
8 Severity_ Severity
9 Summary_ string
10 Detail_ string
11 Subject_ *SourceRange
12 Context_ *SourceRange
13}
14
15// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to
16// RPC.
17//
18// In particular, it currently returns an object that can be serialized and
19// later re-inflated using gob. This definition may grow to include other
20// serializations later.
21func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic {
22 desc := diag.Description()
23 source := diag.Source()
24 return &rpcFriendlyDiag{
25 Severity_: diag.Severity(),
26 Summary_: desc.Summary,
27 Detail_: desc.Detail,
28 Subject_: source.Subject,
29 Context_: source.Context,
30 }
31}
32
33func (d *rpcFriendlyDiag) Severity() Severity {
34 return d.Severity_
35}
36
37func (d *rpcFriendlyDiag) Description() Description {
38 return Description{
39 Summary: d.Summary_,
40 Detail: d.Detail_,
41 }
42}
43
44func (d *rpcFriendlyDiag) Source() Source {
45 return Source{
46 Subject: d.Subject_,
47 Context: d.Context_,
48 }
49}
50
51func init() {
52 gob.Register((*rpcFriendlyDiag)(nil))
53}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go
new file mode 100644
index 0000000..0b1249b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go
@@ -0,0 +1,21 @@
1// Code generated by "stringer -type=Severity"; DO NOT EDIT.
2
3package tfdiags
4
5import "strconv"
6
7const (
8 _Severity_name_0 = "Error"
9 _Severity_name_1 = "Warning"
10)
11
12func (i Severity) String() string {
13 switch {
14 case i == 69:
15 return _Severity_name_0
16 case i == 87:
17 return _Severity_name_1
18 default:
19 return "Severity(" + strconv.FormatInt(int64(i), 10) + ")"
20 }
21}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go
new file mode 100644
index 0000000..fb3ac98
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go
@@ -0,0 +1,25 @@
1package tfdiags
2
3type simpleWarning string
4
5var _ Diagnostic = simpleWarning("")
6
7// SimpleWarning constructs a simple (summary-only) warning diagnostic.
8func SimpleWarning(msg string) Diagnostic {
9 return simpleWarning(msg)
10}
11
12func (e simpleWarning) Severity() Severity {
13 return Warning
14}
15
16func (e simpleWarning) Description() Description {
17 return Description{
18 Summary: string(e),
19 }
20}
21
22func (e simpleWarning) Source() Source {
23 // No source information available for a native error
24 return Source{}
25}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go
new file mode 100644
index 0000000..3031168
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go
@@ -0,0 +1,35 @@
1package tfdiags
2
3import (
4 "fmt"
5 "os"
6 "path/filepath"
7)
8
9type SourceRange struct {
10 Filename string
11 Start, End SourcePos
12}
13
14type SourcePos struct {
15 Line, Column, Byte int
16}
17
18// StartString returns a string representation of the start of the range,
19// including the filename and the line and column numbers.
20func (r SourceRange) StartString() string {
21 filename := r.Filename
22
23 // We'll try to relative-ize our filename here so it's less verbose
24 // in the common case of being in the current working directory. If not,
25 // we'll just show the full path.
26 wd, err := os.Getwd()
27 if err == nil {
28 relFn, err := filepath.Rel(wd, filename)
29 if err == nil {
30 filename = relFn
31 }
32 }
33
34 return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column)
35}
diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go
new file mode 100644
index 0000000..b21b297
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/version/version.go
@@ -0,0 +1,36 @@
1// The version package provides a location to set the release versions for all
2// packages to consume, without creating import cycles.
3//
4// This package should not import any other terraform packages.
5package version
6
7import (
8 "fmt"
9
10 version "github.com/hashicorp/go-version"
11)
12
13// The main version number that is being run at the moment.
14var Version = "0.11.12"
15
16// A pre-release marker for the version. If this is "" (empty string)
17// then it means that it is a final release. Otherwise, this is a pre-release
18// such as "dev" (in development), "beta", "rc1", etc.
19var Prerelease = "dev"
20
21// SemVer is an instance of version.Version. This has the secondary
22// benefit of verifying during tests and init time that our version is a
23// proper semantic version, which should always be the case.
24var SemVer = version.Must(version.NewVersion(Version))
25
26// Header is the header name used to send the current terraform version
27// in http requests.
28const Header = "Terraform-Version"
29
30// String returns the complete version string, including prerelease
31func String() string {
32 if Prerelease != "" {
33 return fmt.Sprintf("%s-%s", Version, Prerelease)
34 }
35 return Version
36}
diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE
new file mode 100644
index 0000000..65dc692
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/LICENSE
@@ -0,0 +1,9 @@
1Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
2
3MIT License (Expat)
4
5Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6
7The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8
9THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md
new file mode 100644
index 0000000..74845de
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/README.md
@@ -0,0 +1,37 @@
1# go-isatty
2
3isatty for golang
4
5## Usage
6
7```go
8package main
9
10import (
11 "fmt"
12 "github.com/mattn/go-isatty"
13 "os"
14)
15
16func main() {
17 if isatty.IsTerminal(os.Stdout.Fd()) {
18 fmt.Println("Is Terminal")
19 } else {
20 fmt.Println("Is Not Terminal")
21 }
22}
23```
24
25## Installation
26
27```
28$ go get github.com/mattn/go-isatty
29```
30
31# License
32
33MIT
34
35# Author
36
37Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go
new file mode 100644
index 0000000..17d4f90
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/doc.go
@@ -0,0 +1,2 @@
1// Package isatty implements interface to isatty
2package isatty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
new file mode 100644
index 0000000..83c5887
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
@@ -0,0 +1,9 @@
1// +build appengine
2
3package isatty
4
5// IsTerminal returns true if the file descriptor is terminal which
6// is always false on on appengine classic which is a sandboxed PaaS.
7func IsTerminal(fd uintptr) bool {
8 return false
9}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
new file mode 100644
index 0000000..42f2514
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -0,0 +1,18 @@
1// +build darwin freebsd openbsd netbsd dragonfly
2// +build !appengine
3
4package isatty
5
6import (
7 "syscall"
8 "unsafe"
9)
10
11const ioctlReadTermios = syscall.TIOCGETA
12
13// IsTerminal return true if the file descriptor is terminal.
14func IsTerminal(fd uintptr) bool {
15 var termios syscall.Termios
16 _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
17 return err == 0
18}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go
new file mode 100644
index 0000000..9d24bac
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go
@@ -0,0 +1,18 @@
1// +build linux
2// +build !appengine
3
4package isatty
5
6import (
7 "syscall"
8 "unsafe"
9)
10
11const ioctlReadTermios = syscall.TCGETS
12
13// IsTerminal return true if the file descriptor is terminal.
14func IsTerminal(fd uintptr) bool {
15 var termios syscall.Termios
16 _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
17 return err == 0
18}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
new file mode 100644
index 0000000..1f0c6bf
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
@@ -0,0 +1,16 @@
1// +build solaris
2// +build !appengine
3
4package isatty
5
6import (
7 "golang.org/x/sys/unix"
8)
9
10// IsTerminal returns true if the given file descriptor is a terminal.
11// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
12func IsTerminal(fd uintptr) bool {
13 var termio unix.Termio
14 err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
15 return err == nil
16}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
new file mode 100644
index 0000000..83c398b
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -0,0 +1,19 @@
1// +build windows
2// +build !appengine
3
4package isatty
5
6import (
7 "syscall"
8 "unsafe"
9)
10
11var kernel32 = syscall.NewLazyDLL("kernel32.dll")
12var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
13
14// IsTerminal return true if the file descriptor is terminal.
15func IsTerminal(fd uintptr) bool {
16 var st uint32
17 r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
18 return r != 0 && e == 0
19}
diff --git a/vendor/github.com/mitchellh/cli/.travis.yml b/vendor/github.com/mitchellh/cli/.travis.yml
new file mode 100644
index 0000000..974234b
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/.travis.yml
@@ -0,0 +1,13 @@
1sudo: false
2
3language: go
4
5go:
6 - 1.8
7 - 1.9
8
9branches:
10 only:
11 - master
12
13script: make updatedeps test testrace
diff --git a/vendor/github.com/mitchellh/cli/LICENSE b/vendor/github.com/mitchellh/cli/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/LICENSE
@@ -0,0 +1,354 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
354
diff --git a/vendor/github.com/mitchellh/cli/Makefile b/vendor/github.com/mitchellh/cli/Makefile
new file mode 100644
index 0000000..4874b00
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/Makefile
@@ -0,0 +1,20 @@
1TEST?=./...
2
3default: test
4
5# test runs the test suite and vets the code
6test:
7 go list $(TEST) | xargs -n1 go test -timeout=60s -parallel=10 $(TESTARGS)
8
9# testrace runs the race checker
10testrace:
11 go list $(TEST) | xargs -n1 go test -race $(TESTARGS)
12
13# updatedeps installs all the dependencies to run and build
14updatedeps:
15 go list ./... \
16 | xargs go list -f '{{ join .Deps "\n" }}{{ printf "\n" }}{{ join .TestImports "\n" }}' \
17 | grep -v github.com/mitchellh/cli \
18 | xargs go get -f -u -v
19
20.PHONY: test testrace updatedeps
diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md
new file mode 100644
index 0000000..8f02cdd
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/README.md
@@ -0,0 +1,67 @@
1# Go CLI Library [![GoDoc](https://godoc.org/github.com/mitchellh/cli?status.png)](https://godoc.org/github.com/mitchellh/cli)
2
3cli is a library for implementing powerful command-line interfaces in Go.
4cli is the library that powers the CLI for
5[Packer](https://github.com/mitchellh/packer),
6[Serf](https://github.com/hashicorp/serf),
7[Consul](https://github.com/hashicorp/consul),
8[Vault](https://github.com/hashicorp/vault),
9[Terraform](https://github.com/hashicorp/terraform), and
10[Nomad](https://github.com/hashicorp/nomad).
11
12## Features
13
14* Easy sub-command based CLIs: `cli foo`, `cli bar`, etc.
15
16* Support for nested subcommands such as `cli foo bar`.
17
18* Optional support for default subcommands so `cli` does something
19 other than error.
20
21* Support for shell autocompletion of subcommands, flags, and arguments
22 with callbacks in Go. You don't need to write any shell code.
23
24* Automatic help generation for listing subcommands
25
26* Automatic help flag recognition of `-h`, `--help`, etc.
27
28* Automatic version flag recognition of `-v`, `--version`.
29
30* Helpers for interacting with the terminal, such as outputting information,
31 asking for input, etc. These are optional, you can always interact with the
32 terminal however you choose.
33
34* Use of Go interfaces/types makes augmenting various parts of the library a
35 piece of cake.
36
37## Example
38
39Below is a simple example of creating and running a CLI
40
41```go
42package main
43
44import (
45 "log"
46 "os"
47
48 "github.com/mitchellh/cli"
49)
50
51func main() {
52 c := cli.NewCLI("app", "1.0.0")
53 c.Args = os.Args[1:]
54 c.Commands = map[string]cli.CommandFactory{
55 "foo": fooCommandFactory,
56 "bar": barCommandFactory,
57 }
58
59 exitStatus, err := c.Run()
60 if err != nil {
61 log.Println(err)
62 }
63
64 os.Exit(exitStatus)
65}
66```
67
diff --git a/vendor/github.com/mitchellh/cli/autocomplete.go b/vendor/github.com/mitchellh/cli/autocomplete.go
new file mode 100644
index 0000000..3bec625
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/autocomplete.go
@@ -0,0 +1,43 @@
1package cli
2
3import (
4 "github.com/posener/complete/cmd/install"
5)
6
7// autocompleteInstaller is an interface to be implemented to perform the
8// autocomplete installation and uninstallation with a CLI.
9//
10// This interface is not exported because it only exists for unit tests
11// to be able to test that the installation is called properly.
12type autocompleteInstaller interface {
13 Install(string) error
14 Uninstall(string) error
15}
16
17// realAutocompleteInstaller uses the real install package to do the
18// install/uninstall.
19type realAutocompleteInstaller struct{}
20
21func (i *realAutocompleteInstaller) Install(cmd string) error {
22 return install.Install(cmd)
23}
24
25func (i *realAutocompleteInstaller) Uninstall(cmd string) error {
26 return install.Uninstall(cmd)
27}
28
29// mockAutocompleteInstaller is used for tests to record the install/uninstall.
30type mockAutocompleteInstaller struct {
31 InstallCalled bool
32 UninstallCalled bool
33}
34
35func (i *mockAutocompleteInstaller) Install(cmd string) error {
36 i.InstallCalled = true
37 return nil
38}
39
40func (i *mockAutocompleteInstaller) Uninstall(cmd string) error {
41 i.UninstallCalled = true
42 return nil
43}
diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go
new file mode 100644
index 0000000..a25a582
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/cli.go
@@ -0,0 +1,715 @@
1package cli
2
3import (
4 "fmt"
5 "io"
6 "os"
7 "regexp"
8 "sort"
9 "strings"
10 "sync"
11 "text/template"
12
13 "github.com/armon/go-radix"
14 "github.com/posener/complete"
15)
16
17// CLI contains the state necessary to run subcommands and parse the
18// command line arguments.
19//
20// CLI also supports nested subcommands, such as "cli foo bar". To use
21// nested subcommands, the key in the Commands mapping below contains the
22// full subcommand. In this example, it would be "foo bar".
23//
24// If you use a CLI with nested subcommands, some semantics change due to
25// ambiguities:
26//
27// * We use longest prefix matching to find a matching subcommand. This
28// means if you register "foo bar" and the user executes "cli foo qux",
29// the "foo" command will be executed with the arg "qux". It is up to
30// you to handle these args. One option is to just return the special
31// help return code `RunResultHelp` to display help and exit.
32//
33// * The help flag "-h" or "-help" will look at all args to determine
34// the help function. For example: "otto apps list -h" will show the
35// help for "apps list" but "otto apps -h" will show it for "apps".
36// In the normal CLI, only the first subcommand is used.
37//
38// * The help flag will list any subcommands that a command takes
39// as well as the command's help itself. If there are no subcommands,
40// it will note this. If the CLI itself has no subcommands, this entire
41// section is omitted.
42//
43// * Any parent commands that don't exist are automatically created as
44// no-op commands that just show help for other subcommands. For example,
45// if you only register "foo bar", then "foo" is automatically created.
46//
47type CLI struct {
48 // Args is the list of command-line arguments received excluding
49 // the name of the app. For example, if the command "./cli foo bar"
50 // was invoked, then Args should be []string{"foo", "bar"}.
51 Args []string
52
53 // Commands is a mapping of subcommand names to a factory function
54 // for creating that Command implementation. If there is a command
55 // with a blank string "", then it will be used as the default command
56 // if no subcommand is specified.
57 //
58 // If the key has a space in it, this will create a nested subcommand.
59 // For example, if the key is "foo bar", then to access it our CLI
60 // must be accessed with "./cli foo bar". See the docs for CLI for
61 // notes on how this changes some other behavior of the CLI as well.
62 //
63 // The factory should be as cheap as possible, ideally only allocating
64 // a struct. The factory may be called multiple times in the course
65 // of a command execution and certain events such as help require the
66 // instantiation of all commands. Expensive initialization should be
67 // deferred to function calls within the interface implementation.
68 Commands map[string]CommandFactory
69
70 // HiddenCommands is a list of commands that are "hidden". Hidden
71 // commands are not given to the help function callback and do not
72 // show up in autocomplete. The values in the slice should be equivalent
73 // to the keys in the command map.
74 HiddenCommands []string
75
76 // Name defines the name of the CLI.
77 Name string
78
79 // Version of the CLI.
80 Version string
81
82 // Autocomplete enables or disables subcommand auto-completion support.
83 // This is enabled by default when NewCLI is called. Otherwise, this
84 // must enabled explicitly.
85 //
86 // Autocomplete requires the "Name" option to be set on CLI. This name
87 // should be set exactly to the binary name that is autocompleted.
88 //
89 // Autocompletion is supported via the github.com/posener/complete
90 // library. This library supports both bash and zsh. To add support
91 // for other shells, please see that library.
92 //
93 // AutocompleteInstall and AutocompleteUninstall are the global flag
94 // names for installing and uninstalling the autocompletion handlers
95 // for the user's shell. The flag should omit the hyphen(s) in front of
96 // the value. Both single and double hyphens will automatically be supported
97 // for the flag name. These default to `autocomplete-install` and
98 // `autocomplete-uninstall` respectively.
99 //
100 // AutocompleteNoDefaultFlags is a boolean which controls if the default auto-
101 // complete flags like -help and -version are added to the output.
102 //
103 // AutocompleteGlobalFlags are a mapping of global flags for
104 // autocompletion. The help and version flags are automatically added.
105 Autocomplete bool
106 AutocompleteInstall string
107 AutocompleteUninstall string
108 AutocompleteNoDefaultFlags bool
109 AutocompleteGlobalFlags complete.Flags
110 autocompleteInstaller autocompleteInstaller // For tests
111
112 // HelpFunc and HelpWriter are used to output help information, if
113 // requested.
114 //
115 // HelpFunc is the function called to generate the generic help
116 // text that is shown if help must be shown for the CLI that doesn't
117 // pertain to a specific command.
118 //
119 // HelpWriter is the Writer where the help text is outputted to. If
120 // not specified, it will default to Stderr.
121 HelpFunc HelpFunc
122 HelpWriter io.Writer
123
124 //---------------------------------------------------------------
125 // Internal fields set automatically
126
127 once sync.Once
128 autocomplete *complete.Complete
129 commandTree *radix.Tree
130 commandNested bool
131 commandHidden map[string]struct{}
132 subcommand string
133 subcommandArgs []string
134 topFlags []string
135
136 // These are true when special global flags are set. We can/should
137 // probably use a bitset for this one day.
138 isHelp bool
139 isVersion bool
140 isAutocompleteInstall bool
141 isAutocompleteUninstall bool
142}
143
144// NewClI returns a new CLI instance with sensible defaults.
145func NewCLI(app, version string) *CLI {
146 return &CLI{
147 Name: app,
148 Version: version,
149 HelpFunc: BasicHelpFunc(app),
150 Autocomplete: true,
151 }
152
153}
154
155// IsHelp returns whether or not the help flag is present within the
156// arguments.
157func (c *CLI) IsHelp() bool {
158 c.once.Do(c.init)
159 return c.isHelp
160}
161
162// IsVersion returns whether or not the version flag is present within the
163// arguments.
164func (c *CLI) IsVersion() bool {
165 c.once.Do(c.init)
166 return c.isVersion
167}
168
169// Run runs the actual CLI based on the arguments given.
170func (c *CLI) Run() (int, error) {
171 c.once.Do(c.init)
172
173 // If this is a autocompletion request, satisfy it. This must be called
174 // first before anything else since its possible to be autocompleting
175 // -help or -version or other flags and we want to show completions
176 // and not actually write the help or version.
177 if c.Autocomplete && c.autocomplete.Complete() {
178 return 0, nil
179 }
180
181 // Just show the version and exit if instructed.
182 if c.IsVersion() && c.Version != "" {
183 c.HelpWriter.Write([]byte(c.Version + "\n"))
184 return 0, nil
185 }
186
187 // Just print the help when only '-h' or '--help' is passed.
188 if c.IsHelp() && c.Subcommand() == "" {
189 c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.Subcommand())) + "\n"))
190 return 0, nil
191 }
192
193 // If we're attempting to install or uninstall autocomplete then handle
194 if c.Autocomplete {
195 // Autocomplete requires the "Name" to be set so that we know what
196 // command to setup the autocomplete on.
197 if c.Name == "" {
198 return 1, fmt.Errorf(
199 "internal error: CLI.Name must be specified for autocomplete to work")
200 }
201
202 // If both install and uninstall flags are specified, then error
203 if c.isAutocompleteInstall && c.isAutocompleteUninstall {
204 return 1, fmt.Errorf(
205 "Either the autocomplete install or uninstall flag may " +
206 "be specified, but not both.")
207 }
208
209 // If the install flag is specified, perform the install or uninstall
210 if c.isAutocompleteInstall {
211 if err := c.autocompleteInstaller.Install(c.Name); err != nil {
212 return 1, err
213 }
214
215 return 0, nil
216 }
217
218 if c.isAutocompleteUninstall {
219 if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil {
220 return 1, err
221 }
222
223 return 0, nil
224 }
225 }
226
227 // Attempt to get the factory function for creating the command
228 // implementation. If the command is invalid or blank, it is an error.
229 raw, ok := c.commandTree.Get(c.Subcommand())
230 if !ok {
231 c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n"))
232 return 127, nil
233 }
234
235 command, err := raw.(CommandFactory)()
236 if err != nil {
237 return 1, err
238 }
239
240 // If we've been instructed to just print the help, then print it
241 if c.IsHelp() {
242 c.commandHelp(command)
243 return 0, nil
244 }
245
246 // If there is an invalid flag, then error
247 if len(c.topFlags) > 0 {
248 c.HelpWriter.Write([]byte(
249 "Invalid flags before the subcommand. If these flags are for\n" +
250 "the subcommand, please put them after the subcommand.\n\n"))
251 c.commandHelp(command)
252 return 1, nil
253 }
254
255 code := command.Run(c.SubcommandArgs())
256 if code == RunResultHelp {
257 // Requesting help
258 c.commandHelp(command)
259 return 1, nil
260 }
261
262 return code, nil
263}
264
265// Subcommand returns the subcommand that the CLI would execute. For
266// example, a CLI from "--version version --help" would return a Subcommand
267// of "version"
268func (c *CLI) Subcommand() string {
269 c.once.Do(c.init)
270 return c.subcommand
271}
272
273// SubcommandArgs returns the arguments that will be passed to the
274// subcommand.
275func (c *CLI) SubcommandArgs() []string {
276 c.once.Do(c.init)
277 return c.subcommandArgs
278}
279
280// subcommandParent returns the parent of this subcommand, if there is one.
281// If there isn't on, "" is returned.
282func (c *CLI) subcommandParent() string {
283 // Get the subcommand, if it is "" alread just return
284 sub := c.Subcommand()
285 if sub == "" {
286 return sub
287 }
288
289 // Clear any trailing spaces and find the last space
290 sub = strings.TrimRight(sub, " ")
291 idx := strings.LastIndex(sub, " ")
292
293 if idx == -1 {
294 // No space means our parent is root
295 return ""
296 }
297
298 return sub[:idx]
299}
300
301func (c *CLI) init() {
302 if c.HelpFunc == nil {
303 c.HelpFunc = BasicHelpFunc("app")
304
305 if c.Name != "" {
306 c.HelpFunc = BasicHelpFunc(c.Name)
307 }
308 }
309
310 if c.HelpWriter == nil {
311 c.HelpWriter = os.Stderr
312 }
313
314 // Build our hidden commands
315 if len(c.HiddenCommands) > 0 {
316 c.commandHidden = make(map[string]struct{})
317 for _, h := range c.HiddenCommands {
318 c.commandHidden[h] = struct{}{}
319 }
320 }
321
322 // Build our command tree
323 c.commandTree = radix.New()
324 c.commandNested = false
325 for k, v := range c.Commands {
326 k = strings.TrimSpace(k)
327 c.commandTree.Insert(k, v)
328 if strings.ContainsRune(k, ' ') {
329 c.commandNested = true
330 }
331 }
332
333 // Go through the key and fill in any missing parent commands
334 if c.commandNested {
335 var walkFn radix.WalkFn
336 toInsert := make(map[string]struct{})
337 walkFn = func(k string, raw interface{}) bool {
338 idx := strings.LastIndex(k, " ")
339 if idx == -1 {
340 // If there is no space, just ignore top level commands
341 return false
342 }
343
344 // Trim up to that space so we can get the expected parent
345 k = k[:idx]
346 if _, ok := c.commandTree.Get(k); ok {
347 // Yay we have the parent!
348 return false
349 }
350
351 // We're missing the parent, so let's insert this
352 toInsert[k] = struct{}{}
353
354 // Call the walk function recursively so we check this one too
355 return walkFn(k, nil)
356 }
357
358 // Walk!
359 c.commandTree.Walk(walkFn)
360
361 // Insert any that we're missing
362 for k := range toInsert {
363 var f CommandFactory = func() (Command, error) {
364 return &MockCommand{
365 HelpText: "This command is accessed by using one of the subcommands below.",
366 RunResult: RunResultHelp,
367 }, nil
368 }
369
370 c.commandTree.Insert(k, f)
371 }
372 }
373
374 // Setup autocomplete if we have it enabled. We have to do this after
375 // the command tree is setup so we can use the radix tree to easily find
376 // all subcommands.
377 if c.Autocomplete {
378 c.initAutocomplete()
379 }
380
381 // Process the args
382 c.processArgs()
383}
384
385func (c *CLI) initAutocomplete() {
386 if c.AutocompleteInstall == "" {
387 c.AutocompleteInstall = defaultAutocompleteInstall
388 }
389
390 if c.AutocompleteUninstall == "" {
391 c.AutocompleteUninstall = defaultAutocompleteUninstall
392 }
393
394 if c.autocompleteInstaller == nil {
395 c.autocompleteInstaller = &realAutocompleteInstaller{}
396 }
397
398 // Build the root command
399 cmd := c.initAutocompleteSub("")
400
401 // For the root, we add the global flags to the "Flags". This way
402 // they don't show up on every command.
403 if !c.AutocompleteNoDefaultFlags {
404 cmd.Flags = map[string]complete.Predictor{
405 "-" + c.AutocompleteInstall: complete.PredictNothing,
406 "-" + c.AutocompleteUninstall: complete.PredictNothing,
407 "-help": complete.PredictNothing,
408 "-version": complete.PredictNothing,
409 }
410 }
411 cmd.GlobalFlags = c.AutocompleteGlobalFlags
412
413 c.autocomplete = complete.New(c.Name, cmd)
414}
415
416// initAutocompleteSub creates the complete.Command for a subcommand with
417// the given prefix. This will continue recursively for all subcommands.
418// The prefix "" (empty string) can be used for the root command.
419func (c *CLI) initAutocompleteSub(prefix string) complete.Command {
420 var cmd complete.Command
421 walkFn := func(k string, raw interface{}) bool {
422 // Keep track of the full key so that we can nest further if necessary
423 fullKey := k
424
425 if len(prefix) > 0 {
426 // If we have a prefix, trim the prefix + 1 (for the space)
427 // Example: turns "sub one" to "one" with prefix "sub"
428 k = k[len(prefix)+1:]
429 }
430
431 if idx := strings.Index(k, " "); idx >= 0 {
432 // If there is a space, we trim up to the space. This turns
433 // "sub sub2 sub3" into "sub". The prefix trim above will
434 // trim our current depth properly.
435 k = k[:idx]
436 }
437
438 if _, ok := cmd.Sub[k]; ok {
439 // If we already tracked this subcommand then ignore
440 return false
441 }
442
443 // If the command is hidden, don't record it at all
444 if _, ok := c.commandHidden[fullKey]; ok {
445 return false
446 }
447
448 if cmd.Sub == nil {
449 cmd.Sub = complete.Commands(make(map[string]complete.Command))
450 }
451 subCmd := c.initAutocompleteSub(fullKey)
452
453 // Instantiate the command so that we can check if the command is
454 // a CommandAutocomplete implementation. If there is an error
455 // creating the command, we just ignore it since that will be caught
456 // later.
457 impl, err := raw.(CommandFactory)()
458 if err != nil {
459 impl = nil
460 }
461
462 // Check if it implements ComandAutocomplete. If so, setup the autocomplete
463 if c, ok := impl.(CommandAutocomplete); ok {
464 subCmd.Args = c.AutocompleteArgs()
465 subCmd.Flags = c.AutocompleteFlags()
466 }
467
468 cmd.Sub[k] = subCmd
469 return false
470 }
471
472 walkPrefix := prefix
473 if walkPrefix != "" {
474 walkPrefix += " "
475 }
476
477 c.commandTree.WalkPrefix(walkPrefix, walkFn)
478 return cmd
479}
480
481func (c *CLI) commandHelp(command Command) {
482 // Get the template to use
483 tpl := strings.TrimSpace(defaultHelpTemplate)
484 if t, ok := command.(CommandHelpTemplate); ok {
485 tpl = t.HelpTemplate()
486 }
487 if !strings.HasSuffix(tpl, "\n") {
488 tpl += "\n"
489 }
490
491 // Parse it
492 t, err := template.New("root").Parse(tpl)
493 if err != nil {
494 t = template.Must(template.New("root").Parse(fmt.Sprintf(
495 "Internal error! Failed to parse command help template: %s\n", err)))
496 }
497
498 // Template data
499 data := map[string]interface{}{
500 "Name": c.Name,
501 "Help": command.Help(),
502 }
503
504 // Build subcommand list if we have it
505 var subcommandsTpl []map[string]interface{}
506 if c.commandNested {
507 // Get the matching keys
508 subcommands := c.helpCommands(c.Subcommand())
509 keys := make([]string, 0, len(subcommands))
510 for k := range subcommands {
511 keys = append(keys, k)
512 }
513
514 // Sort the keys
515 sort.Strings(keys)
516
517 // Figure out the padding length
518 var longest int
519 for _, k := range keys {
520 if v := len(k); v > longest {
521 longest = v
522 }
523 }
524
525 // Go through and create their structures
526 subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands))
527 for _, k := range keys {
528 // Get the command
529 raw, ok := subcommands[k]
530 if !ok {
531 c.HelpWriter.Write([]byte(fmt.Sprintf(
532 "Error getting subcommand %q", k)))
533 }
534 sub, err := raw()
535 if err != nil {
536 c.HelpWriter.Write([]byte(fmt.Sprintf(
537 "Error instantiating %q: %s", k, err)))
538 }
539
540 // Find the last space and make sure we only include that last part
541 name := k
542 if idx := strings.LastIndex(k, " "); idx > -1 {
543 name = name[idx+1:]
544 }
545
546 subcommandsTpl = append(subcommandsTpl, map[string]interface{}{
547 "Name": name,
548 "NameAligned": name + strings.Repeat(" ", longest-len(k)),
549 "Help": sub.Help(),
550 "Synopsis": sub.Synopsis(),
551 })
552 }
553 }
554 data["Subcommands"] = subcommandsTpl
555
556 // Write
557 err = t.Execute(c.HelpWriter, data)
558 if err == nil {
559 return
560 }
561
562 // An error, just output...
563 c.HelpWriter.Write([]byte(fmt.Sprintf(
564 "Internal error rendering help: %s", err)))
565}
566
567// helpCommands returns the subcommands for the HelpFunc argument.
568// This will only contain immediate subcommands.
569func (c *CLI) helpCommands(prefix string) map[string]CommandFactory {
570 // If our prefix isn't empty, make sure it ends in ' '
571 if prefix != "" && prefix[len(prefix)-1] != ' ' {
572 prefix += " "
573 }
574
575 // Get all the subkeys of this command
576 var keys []string
577 c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool {
578 // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar"
579 if !strings.Contains(k[len(prefix):], " ") {
580 keys = append(keys, k)
581 }
582
583 return false
584 })
585
586 // For each of the keys return that in the map
587 result := make(map[string]CommandFactory, len(keys))
588 for _, k := range keys {
589 raw, ok := c.commandTree.Get(k)
590 if !ok {
591 // We just got it via WalkPrefix above, so we just panic
592 panic("not found: " + k)
593 }
594
595 // If this is a hidden command, don't show it
596 if _, ok := c.commandHidden[k]; ok {
597 continue
598 }
599
600 result[k] = raw.(CommandFactory)
601 }
602
603 return result
604}
605
606func (c *CLI) processArgs() {
607 for i, arg := range c.Args {
608 if arg == "--" {
609 break
610 }
611
612 // Check for help flags.
613 if arg == "-h" || arg == "-help" || arg == "--help" {
614 c.isHelp = true
615 continue
616 }
617
618 // Check for autocomplete flags
619 if c.Autocomplete {
620 if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall {
621 c.isAutocompleteInstall = true
622 continue
623 }
624
625 if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall {
626 c.isAutocompleteUninstall = true
627 continue
628 }
629 }
630
631 if c.subcommand == "" {
632 // Check for version flags if not in a subcommand.
633 if arg == "-v" || arg == "-version" || arg == "--version" {
634 c.isVersion = true
635 continue
636 }
637
638 if arg != "" && arg[0] == '-' {
639 // Record the arg...
640 c.topFlags = append(c.topFlags, arg)
641 }
642 }
643
644 // If we didn't find a subcommand yet and this is the first non-flag
645 // argument, then this is our subcommand.
646 if c.subcommand == "" && arg != "" && arg[0] != '-' {
647 c.subcommand = arg
648 if c.commandNested {
649 // If the command has a space in it, then it is invalid.
650 // Set a blank command so that it fails.
651 if strings.ContainsRune(arg, ' ') {
652 c.subcommand = ""
653 return
654 }
655
656 // Determine the argument we look to to end subcommands.
657 // We look at all arguments until one has a space. This
658 // disallows commands like: ./cli foo "bar baz". An argument
659 // with a space is always an argument.
660 j := 0
661 for k, v := range c.Args[i:] {
662 if strings.ContainsRune(v, ' ') {
663 break
664 }
665
666 j = i + k + 1
667 }
668
669 // Nested CLI, the subcommand is actually the entire
670 // arg list up to a flag that is still a valid subcommand.
671 searchKey := strings.Join(c.Args[i:j], " ")
672 k, _, ok := c.commandTree.LongestPrefix(searchKey)
673 if ok {
674 // k could be a prefix that doesn't contain the full
675 // command such as "foo" instead of "foobar", so we
676 // need to verify that we have an entire key. To do that,
677 // we look for an ending in a space or an end of string.
678 reVerify := regexp.MustCompile(regexp.QuoteMeta(k) + `( |$)`)
679 if reVerify.MatchString(searchKey) {
680 c.subcommand = k
681 i += strings.Count(k, " ")
682 }
683 }
684 }
685
686 // The remaining args the subcommand arguments
687 c.subcommandArgs = c.Args[i+1:]
688 }
689 }
690
691 // If we never found a subcommand and support a default command, then
692 // switch to using that.
693 if c.subcommand == "" {
694 if _, ok := c.Commands[""]; ok {
695 args := c.topFlags
696 args = append(args, c.subcommandArgs...)
697 c.topFlags = nil
698 c.subcommandArgs = args
699 }
700 }
701}
702
703// defaultAutocompleteInstall and defaultAutocompleteUninstall are the
704// default values for the autocomplete install and uninstall flags.
705const defaultAutocompleteInstall = "autocomplete-install"
706const defaultAutocompleteUninstall = "autocomplete-uninstall"
707
708const defaultHelpTemplate = `
709{{.Help}}{{if gt (len .Subcommands) 0}}
710
711Subcommands:
712{{- range $value := .Subcommands }}
713 {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }}
714{{- end }}
715`
diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go
new file mode 100644
index 0000000..bed11fa
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/command.go
@@ -0,0 +1,67 @@
1package cli
2
3import (
4 "github.com/posener/complete"
5)
6
7const (
8 // RunResultHelp is a value that can be returned from Run to signal
9 // to the CLI to render the help output.
10 RunResultHelp = -18511
11)
12
13// A command is a runnable sub-command of a CLI.
14type Command interface {
15 // Help should return long-form help text that includes the command-line
16 // usage, a brief few sentences explaining the function of the command,
17 // and the complete list of flags the command accepts.
18 Help() string
19
20 // Run should run the actual command with the given CLI instance and
21 // command-line arguments. It should return the exit status when it is
22 // finished.
23 //
24 // There are a handful of special exit codes this can return documented
25 // above that change behavior.
26 Run(args []string) int
27
28 // Synopsis should return a one-line, short synopsis of the command.
29 // This should be less than 50 characters ideally.
30 Synopsis() string
31}
32
33// CommandAutocomplete is an extension of Command that enables fine-grained
34// autocompletion. Subcommand autocompletion will work even if this interface
35// is not implemented. By implementing this interface, more advanced
36// autocompletion is enabled.
37type CommandAutocomplete interface {
38 // AutocompleteArgs returns the argument predictor for this command.
39 // If argument completion is not supported, this should return
40 // complete.PredictNothing.
41 AutocompleteArgs() complete.Predictor
42
43 // AutocompleteFlags returns a mapping of supported flags and autocomplete
44 // options for this command. The map key for the Flags map should be the
45 // complete flag such as "-foo" or "--foo".
46 AutocompleteFlags() complete.Flags
47}
48
49// CommandHelpTemplate is an extension of Command that also has a function
50// for returning a template for the help rather than the help itself. In
51// this scenario, both Help and HelpTemplate should be implemented.
52//
53// If CommandHelpTemplate isn't implemented, the Help is output as-is.
54type CommandHelpTemplate interface {
55 // HelpTemplate is the template in text/template format to use for
56 // displaying the Help. The keys available are:
57 //
58 // * ".Help" - The help text itself
59 // * ".Subcommands"
60 //
61 HelpTemplate() string
62}
63
64// CommandFactory is a type of function that is a factory for commands.
65// We need a factory because we may need to setup some state on the
66// struct that implements the command itself.
67type CommandFactory func() (Command, error)
diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go
new file mode 100644
index 0000000..7a584b7
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/command_mock.go
@@ -0,0 +1,63 @@
1package cli
2
3import (
4 "github.com/posener/complete"
5)
6
7// MockCommand is an implementation of Command that can be used for tests.
8// It is publicly exported from this package in case you want to use it
9// externally.
10type MockCommand struct {
11 // Settable
12 HelpText string
13 RunResult int
14 SynopsisText string
15
16 // Set by the command
17 RunCalled bool
18 RunArgs []string
19}
20
21func (c *MockCommand) Help() string {
22 return c.HelpText
23}
24
25func (c *MockCommand) Run(args []string) int {
26 c.RunCalled = true
27 c.RunArgs = args
28
29 return c.RunResult
30}
31
32func (c *MockCommand) Synopsis() string {
33 return c.SynopsisText
34}
35
36// MockCommandAutocomplete is an implementation of CommandAutocomplete.
37type MockCommandAutocomplete struct {
38 MockCommand
39
40 // Settable
41 AutocompleteArgsValue complete.Predictor
42 AutocompleteFlagsValue complete.Flags
43}
44
45func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor {
46 return c.AutocompleteArgsValue
47}
48
49func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags {
50 return c.AutocompleteFlagsValue
51}
52
53// MockCommandHelpTemplate is an implementation of CommandHelpTemplate.
54type MockCommandHelpTemplate struct {
55 MockCommand
56
57 // Settable
58 HelpTemplateText string
59}
60
61func (c *MockCommandHelpTemplate) HelpTemplate() string {
62 return c.HelpTemplateText
63}
diff --git a/vendor/github.com/mitchellh/cli/help.go b/vendor/github.com/mitchellh/cli/help.go
new file mode 100644
index 0000000..f5ca58f
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/help.go
@@ -0,0 +1,79 @@
1package cli
2
3import (
4 "bytes"
5 "fmt"
6 "log"
7 "sort"
8 "strings"
9)
10
11// HelpFunc is the type of the function that is responsible for generating
12// the help output when the CLI must show the general help text.
13type HelpFunc func(map[string]CommandFactory) string
14
15// BasicHelpFunc generates some basic help output that is usually good enough
16// for most CLI applications.
17func BasicHelpFunc(app string) HelpFunc {
18 return func(commands map[string]CommandFactory) string {
19 var buf bytes.Buffer
20 buf.WriteString(fmt.Sprintf(
21 "Usage: %s [--version] [--help] <command> [<args>]\n\n",
22 app))
23 buf.WriteString("Available commands are:\n")
24
25 // Get the list of keys so we can sort them, and also get the maximum
26 // key length so they can be aligned properly.
27 keys := make([]string, 0, len(commands))
28 maxKeyLen := 0
29 for key := range commands {
30 if len(key) > maxKeyLen {
31 maxKeyLen = len(key)
32 }
33
34 keys = append(keys, key)
35 }
36 sort.Strings(keys)
37
38 for _, key := range keys {
39 commandFunc, ok := commands[key]
40 if !ok {
41 // This should never happen since we JUST built the list of
42 // keys.
43 panic("command not found: " + key)
44 }
45
46 command, err := commandFunc()
47 if err != nil {
48 log.Printf("[ERR] cli: Command '%s' failed to load: %s",
49 key, err)
50 continue
51 }
52
53 key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key)))
54 buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis()))
55 }
56
57 return buf.String()
58 }
59}
60
61// FilteredHelpFunc will filter the commands to only include the keys
62// in the include parameter.
63func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc {
64 return func(commands map[string]CommandFactory) string {
65 set := make(map[string]struct{})
66 for _, k := range include {
67 set[k] = struct{}{}
68 }
69
70 filtered := make(map[string]CommandFactory)
71 for k, f := range commands {
72 if _, ok := set[k]; ok {
73 filtered[k] = f
74 }
75 }
76
77 return f(filtered)
78 }
79}
diff --git a/vendor/github.com/mitchellh/cli/ui.go b/vendor/github.com/mitchellh/cli/ui.go
new file mode 100644
index 0000000..a2d6f94
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/ui.go
@@ -0,0 +1,187 @@
1package cli
2
3import (
4 "bufio"
5 "errors"
6 "fmt"
7 "io"
8 "os"
9 "os/signal"
10 "strings"
11
12 "github.com/bgentry/speakeasy"
13 "github.com/mattn/go-isatty"
14)
15
16// Ui is an interface for interacting with the terminal, or "interface"
17// of a CLI. This abstraction doesn't have to be used, but helps provide
18// a simple, layerable way to manage user interactions.
19type Ui interface {
20 // Ask asks the user for input using the given query. The response is
21 // returned as the given string, or an error.
22 Ask(string) (string, error)
23
24 // AskSecret asks the user for input using the given query, but does not echo
25 // the keystrokes to the terminal.
26 AskSecret(string) (string, error)
27
28 // Output is called for normal standard output.
29 Output(string)
30
31 // Info is called for information related to the previous output.
32 // In general this may be the exact same as Output, but this gives
33 // Ui implementors some flexibility with output formats.
34 Info(string)
35
36 // Error is used for any error messages that might appear on standard
37 // error.
38 Error(string)
39
40 // Warn is used for any warning messages that might appear on standard
41 // error.
42 Warn(string)
43}
44
45// BasicUi is an implementation of Ui that just outputs to the given
46// writer. This UI is not threadsafe by default, but you can wrap it
47// in a ConcurrentUi to make it safe.
48type BasicUi struct {
49 Reader io.Reader
50 Writer io.Writer
51 ErrorWriter io.Writer
52}
53
54func (u *BasicUi) Ask(query string) (string, error) {
55 return u.ask(query, false)
56}
57
58func (u *BasicUi) AskSecret(query string) (string, error) {
59 return u.ask(query, true)
60}
61
62func (u *BasicUi) ask(query string, secret bool) (string, error) {
63 if _, err := fmt.Fprint(u.Writer, query+" "); err != nil {
64 return "", err
65 }
66
67 // Register for interrupts so that we can catch it and immediately
68 // return...
69 sigCh := make(chan os.Signal, 1)
70 signal.Notify(sigCh, os.Interrupt)
71 defer signal.Stop(sigCh)
72
73 // Ask for input in a go-routine so that we can ignore it.
74 errCh := make(chan error, 1)
75 lineCh := make(chan string, 1)
76 go func() {
77 var line string
78 var err error
79 if secret && isatty.IsTerminal(os.Stdin.Fd()) {
80 line, err = speakeasy.Ask("")
81 } else {
82 r := bufio.NewReader(u.Reader)
83 line, err = r.ReadString('\n')
84 }
85 if err != nil {
86 errCh <- err
87 return
88 }
89
90 lineCh <- strings.TrimRight(line, "\r\n")
91 }()
92
93 select {
94 case err := <-errCh:
95 return "", err
96 case line := <-lineCh:
97 return line, nil
98 case <-sigCh:
99 // Print a newline so that any further output starts properly
100 // on a new line.
101 fmt.Fprintln(u.Writer)
102
103 return "", errors.New("interrupted")
104 }
105}
106
107func (u *BasicUi) Error(message string) {
108 w := u.Writer
109 if u.ErrorWriter != nil {
110 w = u.ErrorWriter
111 }
112
113 fmt.Fprint(w, message)
114 fmt.Fprint(w, "\n")
115}
116
117func (u *BasicUi) Info(message string) {
118 u.Output(message)
119}
120
121func (u *BasicUi) Output(message string) {
122 fmt.Fprint(u.Writer, message)
123 fmt.Fprint(u.Writer, "\n")
124}
125
126func (u *BasicUi) Warn(message string) {
127 u.Error(message)
128}
129
130// PrefixedUi is an implementation of Ui that prefixes messages.
131type PrefixedUi struct {
132 AskPrefix string
133 AskSecretPrefix string
134 OutputPrefix string
135 InfoPrefix string
136 ErrorPrefix string
137 WarnPrefix string
138 Ui Ui
139}
140
141func (u *PrefixedUi) Ask(query string) (string, error) {
142 if query != "" {
143 query = fmt.Sprintf("%s%s", u.AskPrefix, query)
144 }
145
146 return u.Ui.Ask(query)
147}
148
149func (u *PrefixedUi) AskSecret(query string) (string, error) {
150 if query != "" {
151 query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query)
152 }
153
154 return u.Ui.AskSecret(query)
155}
156
157func (u *PrefixedUi) Error(message string) {
158 if message != "" {
159 message = fmt.Sprintf("%s%s", u.ErrorPrefix, message)
160 }
161
162 u.Ui.Error(message)
163}
164
165func (u *PrefixedUi) Info(message string) {
166 if message != "" {
167 message = fmt.Sprintf("%s%s", u.InfoPrefix, message)
168 }
169
170 u.Ui.Info(message)
171}
172
173func (u *PrefixedUi) Output(message string) {
174 if message != "" {
175 message = fmt.Sprintf("%s%s", u.OutputPrefix, message)
176 }
177
178 u.Ui.Output(message)
179}
180
181func (u *PrefixedUi) Warn(message string) {
182 if message != "" {
183 message = fmt.Sprintf("%s%s", u.WarnPrefix, message)
184 }
185
186 u.Ui.Warn(message)
187}
diff --git a/vendor/github.com/mitchellh/cli/ui_colored.go b/vendor/github.com/mitchellh/cli/ui_colored.go
new file mode 100644
index 0000000..e3d5131
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/ui_colored.go
@@ -0,0 +1,69 @@
1package cli
2
3import (
4 "fmt"
5)
6
7// UiColor is a posix shell color code to use.
8type UiColor struct {
9 Code int
10 Bold bool
11}
12
13// A list of colors that are useful. These are all non-bolded by default.
14var (
15 UiColorNone UiColor = UiColor{-1, false}
16 UiColorRed = UiColor{31, false}
17 UiColorGreen = UiColor{32, false}
18 UiColorYellow = UiColor{33, false}
19 UiColorBlue = UiColor{34, false}
20 UiColorMagenta = UiColor{35, false}
21 UiColorCyan = UiColor{36, false}
22)
23
24// ColoredUi is a Ui implementation that colors its output according
25// to the given color schemes for the given type of output.
26type ColoredUi struct {
27 OutputColor UiColor
28 InfoColor UiColor
29 ErrorColor UiColor
30 WarnColor UiColor
31 Ui Ui
32}
33
34func (u *ColoredUi) Ask(query string) (string, error) {
35 return u.Ui.Ask(u.colorize(query, u.OutputColor))
36}
37
38func (u *ColoredUi) AskSecret(query string) (string, error) {
39 return u.Ui.AskSecret(u.colorize(query, u.OutputColor))
40}
41
42func (u *ColoredUi) Output(message string) {
43 u.Ui.Output(u.colorize(message, u.OutputColor))
44}
45
46func (u *ColoredUi) Info(message string) {
47 u.Ui.Info(u.colorize(message, u.InfoColor))
48}
49
50func (u *ColoredUi) Error(message string) {
51 u.Ui.Error(u.colorize(message, u.ErrorColor))
52}
53
54func (u *ColoredUi) Warn(message string) {
55 u.Ui.Warn(u.colorize(message, u.WarnColor))
56}
57
58func (u *ColoredUi) colorize(message string, color UiColor) string {
59 if color.Code == -1 {
60 return message
61 }
62
63 attr := 0
64 if color.Bold {
65 attr = 1
66 }
67
68 return fmt.Sprintf("\033[%d;%dm%s\033[0m", attr, color.Code, message)
69}
diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent.go b/vendor/github.com/mitchellh/cli/ui_concurrent.go
new file mode 100644
index 0000000..b4f4dbf
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/ui_concurrent.go
@@ -0,0 +1,54 @@
1package cli
2
3import (
4 "sync"
5)
6
7// ConcurrentUi is a wrapper around a Ui interface (and implements that
8// interface) making the underlying Ui concurrency safe.
9type ConcurrentUi struct {
10 Ui Ui
11 l sync.Mutex
12}
13
14func (u *ConcurrentUi) Ask(query string) (string, error) {
15 u.l.Lock()
16 defer u.l.Unlock()
17
18 return u.Ui.Ask(query)
19}
20
21func (u *ConcurrentUi) AskSecret(query string) (string, error) {
22 u.l.Lock()
23 defer u.l.Unlock()
24
25 return u.Ui.AskSecret(query)
26}
27
28func (u *ConcurrentUi) Error(message string) {
29 u.l.Lock()
30 defer u.l.Unlock()
31
32 u.Ui.Error(message)
33}
34
35func (u *ConcurrentUi) Info(message string) {
36 u.l.Lock()
37 defer u.l.Unlock()
38
39 u.Ui.Info(message)
40}
41
42func (u *ConcurrentUi) Output(message string) {
43 u.l.Lock()
44 defer u.l.Unlock()
45
46 u.Ui.Output(message)
47}
48
49func (u *ConcurrentUi) Warn(message string) {
50 u.l.Lock()
51 defer u.l.Unlock()
52
53 u.Ui.Warn(message)
54}
diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go
new file mode 100644
index 0000000..0bfe0a1
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/ui_mock.go
@@ -0,0 +1,111 @@
1package cli
2
3import (
4 "bytes"
5 "fmt"
6 "io"
7 "sync"
8)
9
10// NewMockUi returns a fully initialized MockUi instance
11// which is safe for concurrent use.
12func NewMockUi() *MockUi {
13 m := new(MockUi)
14 m.once.Do(m.init)
15 return m
16}
17
18// MockUi is a mock UI that is used for tests and is exported publicly
19// for use in external tests if needed as well. Do not instantite this
20// directly since the buffers will be initialized on the first write. If
21// there is no write then you will get a nil panic. Please use the
22// NewMockUi() constructor function instead. You can fix your code with
23//
24// sed -i -e 's/new(cli.MockUi)/cli.NewMockUi()/g' *_test.go
25type MockUi struct {
26 InputReader io.Reader
27 ErrorWriter *syncBuffer
28 OutputWriter *syncBuffer
29
30 once sync.Once
31}
32
33func (u *MockUi) Ask(query string) (string, error) {
34 u.once.Do(u.init)
35
36 var result string
37 fmt.Fprint(u.OutputWriter, query)
38 if _, err := fmt.Fscanln(u.InputReader, &result); err != nil {
39 return "", err
40 }
41
42 return result, nil
43}
44
45func (u *MockUi) AskSecret(query string) (string, error) {
46 return u.Ask(query)
47}
48
49func (u *MockUi) Error(message string) {
50 u.once.Do(u.init)
51
52 fmt.Fprint(u.ErrorWriter, message)
53 fmt.Fprint(u.ErrorWriter, "\n")
54}
55
56func (u *MockUi) Info(message string) {
57 u.Output(message)
58}
59
60func (u *MockUi) Output(message string) {
61 u.once.Do(u.init)
62
63 fmt.Fprint(u.OutputWriter, message)
64 fmt.Fprint(u.OutputWriter, "\n")
65}
66
67func (u *MockUi) Warn(message string) {
68 u.once.Do(u.init)
69
70 fmt.Fprint(u.ErrorWriter, message)
71 fmt.Fprint(u.ErrorWriter, "\n")
72}
73
74func (u *MockUi) init() {
75 u.ErrorWriter = new(syncBuffer)
76 u.OutputWriter = new(syncBuffer)
77}
78
79type syncBuffer struct {
80 sync.RWMutex
81 b bytes.Buffer
82}
83
84func (b *syncBuffer) Write(data []byte) (int, error) {
85 b.Lock()
86 defer b.Unlock()
87 return b.b.Write(data)
88}
89
90func (b *syncBuffer) Read(data []byte) (int, error) {
91 b.RLock()
92 defer b.RUnlock()
93 return b.b.Read(data)
94}
95
96func (b *syncBuffer) Reset() {
97 b.Lock()
98 b.b.Reset()
99 b.Unlock()
100}
101
102func (b *syncBuffer) String() string {
103 return string(b.Bytes())
104}
105
106func (b *syncBuffer) Bytes() []byte {
107 b.RLock()
108 data := b.b.Bytes()
109 b.RUnlock()
110 return data
111}
diff --git a/vendor/github.com/mitchellh/cli/ui_writer.go b/vendor/github.com/mitchellh/cli/ui_writer.go
new file mode 100644
index 0000000..1e1db3c
--- /dev/null
+++ b/vendor/github.com/mitchellh/cli/ui_writer.go
@@ -0,0 +1,18 @@
1package cli
2
3// UiWriter is an io.Writer implementation that can be used with
4// loggers that writes every line of log output data to a Ui at the
5// Info level.
6type UiWriter struct {
7 Ui Ui
8}
9
10func (w *UiWriter) Write(p []byte) (n int, err error) {
11 n = len(p)
12 if n > 0 && p[n-1] == '\n' {
13 p = p[:n-1]
14 }
15
16 w.Ui.Info(string(p))
17 return n, nil
18}
diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go
index 0e725ea..1404352 100644
--- a/vendor/github.com/mitchellh/copystructure/copystructure.go
+++ b/vendor/github.com/mitchellh/copystructure/copystructure.go
@@ -156,9 +156,13 @@ func (w *walker) Exit(l reflectwalk.Location) error {
156 } 156 }
157 157
158 switch l { 158 switch l {
159 case reflectwalk.Array:
160 fallthrough
159 case reflectwalk.Map: 161 case reflectwalk.Map:
160 fallthrough 162 fallthrough
161 case reflectwalk.Slice: 163 case reflectwalk.Slice:
164 w.replacePointerMaybe()
165
162 // Pop map off our container 166 // Pop map off our container
163 w.cs = w.cs[:len(w.cs)-1] 167 w.cs = w.cs[:len(w.cs)-1]
164 case reflectwalk.MapValue: 168 case reflectwalk.MapValue:
@@ -171,16 +175,27 @@ func (w *walker) Exit(l reflectwalk.Location) error {
171 // or in this case never adds it. We need to create a properly typed 175 // or in this case never adds it. We need to create a properly typed
172 // zero value so that this key can be set. 176 // zero value so that this key can be set.
173 if !mv.IsValid() { 177 if !mv.IsValid() {
174 mv = reflect.Zero(m.Type().Elem()) 178 mv = reflect.Zero(m.Elem().Type().Elem())
179 }
180 m.Elem().SetMapIndex(mk, mv)
181 case reflectwalk.ArrayElem:
182 // Pop off the value and the index and set it on the array
183 v := w.valPop()
184 i := w.valPop().Interface().(int)
185 if v.IsValid() {
186 a := w.cs[len(w.cs)-1]
187 ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call
188 if ae.CanSet() {
189 ae.Set(v)
190 }
175 } 191 }
176 m.SetMapIndex(mk, mv)
177 case reflectwalk.SliceElem: 192 case reflectwalk.SliceElem:
178 // Pop off the value and the index and set it on the slice 193 // Pop off the value and the index and set it on the slice
179 v := w.valPop() 194 v := w.valPop()
180 i := w.valPop().Interface().(int) 195 i := w.valPop().Interface().(int)
181 if v.IsValid() { 196 if v.IsValid() {
182 s := w.cs[len(w.cs)-1] 197 s := w.cs[len(w.cs)-1]
183 se := s.Index(i) 198 se := s.Elem().Index(i)
184 if se.CanSet() { 199 if se.CanSet() {
185 se.Set(v) 200 se.Set(v)
186 } 201 }
@@ -220,9 +235,9 @@ func (w *walker) Map(m reflect.Value) error {
220 // Create the map. If the map itself is nil, then just make a nil map 235 // Create the map. If the map itself is nil, then just make a nil map
221 var newMap reflect.Value 236 var newMap reflect.Value
222 if m.IsNil() { 237 if m.IsNil() {
223 newMap = reflect.Indirect(reflect.New(m.Type())) 238 newMap = reflect.New(m.Type())
224 } else { 239 } else {
225 newMap = reflect.MakeMap(m.Type()) 240 newMap = wrapPtr(reflect.MakeMap(m.Type()))
226 } 241 }
227 242
228 w.cs = append(w.cs, newMap) 243 w.cs = append(w.cs, newMap)
@@ -287,9 +302,9 @@ func (w *walker) Slice(s reflect.Value) error {
287 302
288 var newS reflect.Value 303 var newS reflect.Value
289 if s.IsNil() { 304 if s.IsNil() {
290 newS = reflect.Indirect(reflect.New(s.Type())) 305 newS = reflect.New(s.Type())
291 } else { 306 } else {
292 newS = reflect.MakeSlice(s.Type(), s.Len(), s.Cap()) 307 newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap()))
293 } 308 }
294 309
295 w.cs = append(w.cs, newS) 310 w.cs = append(w.cs, newS)
@@ -309,6 +324,31 @@ func (w *walker) SliceElem(i int, elem reflect.Value) error {
309 return nil 324 return nil
310} 325}
311 326
327func (w *walker) Array(a reflect.Value) error {
328 if w.ignoring() {
329 return nil
330 }
331 w.lock(a)
332
333 newA := reflect.New(a.Type())
334
335 w.cs = append(w.cs, newA)
336 w.valPush(newA)
337 return nil
338}
339
340func (w *walker) ArrayElem(i int, elem reflect.Value) error {
341 if w.ignoring() {
342 return nil
343 }
344
345 // We don't write the array here because elem might still be
346 // arbitrarily complex. Just record the index and continue on.
347 w.valPush(reflect.ValueOf(i))
348
349 return nil
350}
351
312func (w *walker) Struct(s reflect.Value) error { 352func (w *walker) Struct(s reflect.Value) error {
313 if w.ignoring() { 353 if w.ignoring() {
314 return nil 354 return nil
@@ -326,7 +366,10 @@ func (w *walker) Struct(s reflect.Value) error {
326 return err 366 return err
327 } 367 }
328 368
329 v = reflect.ValueOf(dup) 369 // We need to put a pointer to the value on the value stack,
370 // so allocate a new pointer and set it.
371 v = reflect.New(s.Type())
372 reflect.Indirect(v).Set(reflect.ValueOf(dup))
330 } else { 373 } else {
331 // No copier, we copy ourselves and allow reflectwalk to guide 374 // No copier, we copy ourselves and allow reflectwalk to guide
332 // us deeper into the structure for copying. 375 // us deeper into the structure for copying.
@@ -405,6 +448,23 @@ func (w *walker) replacePointerMaybe() {
405 } 448 }
406 449
407 v := w.valPop() 450 v := w.valPop()
451
452 // If the expected type is a pointer to an interface of any depth,
453 // such as *interface{}, **interface{}, etc., then we need to convert
454 // the value "v" from *CONCRETE to *interface{} so types match for
455 // Set.
456 //
457 // Example if v is type *Foo where Foo is a struct, v would become
458 // *interface{} instead. This only happens if we have an interface expectation
459 // at this depth.
460 //
461 // For more info, see GH-16
462 if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
463 y := reflect.New(iType) // Create *interface{}
464 y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
465 v = y // v is now typed *interface{} (where *v = Foo)
466 }
467
408 for i := 1; i < w.ps[w.depth]; i++ { 468 for i := 1; i < w.ps[w.depth]; i++ {
409 if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { 469 if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
410 iface := reflect.New(iType).Elem() 470 iface := reflect.New(iType).Elem()
@@ -475,3 +535,14 @@ func (w *walker) lock(v reflect.Value) {
475 locker.Lock() 535 locker.Lock()
476 w.locks[w.depth] = locker 536 w.locks[w.depth] = locker
477} 537}
538
539// wrapPtr is a helper that takes v and always make it *v. copystructure
540// stores things internally as pointers until the last moment before unwrapping
541func wrapPtr(v reflect.Value) reflect.Value {
542 if !v.IsValid() {
543 return v
544 }
545 vPtr := reflect.New(v.Type())
546 vPtr.Elem().Set(v)
547 return vPtr
548}
diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml
new file mode 100644
index 0000000..4c83109
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml
@@ -0,0 +1,12 @@
1language: go
2
3go:
4 - 1.8
5 - tip
6
7script:
8 - go test
9
10matrix:
11 allow_failures:
12 - go: tip
diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE
new file mode 100644
index 0000000..a3866a2
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE
@@ -0,0 +1,21 @@
1The MIT License (MIT)
2
3Copyright (c) 2016 Mitchell Hashimoto
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in
13all copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md
new file mode 100644
index 0000000..26781bb
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-testing-interface/README.md
@@ -0,0 +1,52 @@
1# go-testing-interface
2
3go-testing-interface is a Go library that exports an interface that
4`*testing.T` implements as well as a runtime version you can use in its
5place.
6
7The purpose of this library is so that you can export test helpers as a
8public API without depending on the "testing" package, since you can't
9create a `*testing.T` struct manually. This lets you, for example, use the
10public testing APIs to generate mock data at runtime, rather than just at
11test time.
12
13## Usage & Example
14
15For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface).
16
17Given a test helper written using `go-testing-interface` like this:
18
19 import "github.com/mitchellh/go-testing-interface"
20
21 func TestHelper(t testing.T) {
22 t.Fatal("I failed")
23 }
24
25You can call the test helper in a real test easily:
26
27 import "testing"
28
29 func TestThing(t *testing.T) {
30 TestHelper(t)
31 }
32
33You can also call the test helper at runtime if needed:
34
35 import "github.com/mitchellh/go-testing-interface"
36
37 func main() {
38 TestHelper(&testing.RuntimeT{})
39 }
40
41## Why?!
42
43**Why would I call a test helper that takes a *testing.T at runtime?**
44
45You probably shouldn't. The only use case I've seen (and I've had) for this
46is to implement a "dev mode" for a service where the test helpers are used
47to populate mock data, create a mock DB, perhaps run service dependencies
48in-memory, etc.
49
50Outside of a "dev mode", I've never seen a use case for this and I think
51there shouldn't be one since the point of the `testing.T` interface is that
52you can fail immediately.
diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go
new file mode 100644
index 0000000..204afb4
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go
@@ -0,0 +1,84 @@
1// +build !go1.9
2
3package testing
4
5import (
6 "fmt"
7 "log"
8)
9
10// T is the interface that mimics the standard library *testing.T.
11//
12// In unit tests you can just pass a *testing.T struct. At runtime, outside
13// of tests, you can pass in a RuntimeT struct from this package.
14type T interface {
15 Error(args ...interface{})
16 Errorf(format string, args ...interface{})
17 Fail()
18 FailNow()
19 Failed() bool
20 Fatal(args ...interface{})
21 Fatalf(format string, args ...interface{})
22 Log(args ...interface{})
23 Logf(format string, args ...interface{})
24 Name() string
25 Skip(args ...interface{})
26 SkipNow()
27 Skipf(format string, args ...interface{})
28 Skipped() bool
29}
30
31// RuntimeT implements T and can be instantiated and run at runtime to
32// mimic *testing.T behavior. Unlike *testing.T, this will simply panic
33// for calls to Fatal. For calls to Error, you'll have to check the errors
34// list to determine whether to exit yourself. Name and Skip methods are
35// unimplemented noops.
36type RuntimeT struct {
37 failed bool
38}
39
40func (t *RuntimeT) Error(args ...interface{}) {
41 log.Println(fmt.Sprintln(args...))
42 t.Fail()
43}
44
45func (t *RuntimeT) Errorf(format string, args ...interface{}) {
46 log.Println(fmt.Sprintf(format, args...))
47 t.Fail()
48}
49
50func (t *RuntimeT) Fatal(args ...interface{}) {
51 log.Println(fmt.Sprintln(args...))
52 t.FailNow()
53}
54
55func (t *RuntimeT) Fatalf(format string, args ...interface{}) {
56 log.Println(fmt.Sprintf(format, args...))
57 t.FailNow()
58}
59
60func (t *RuntimeT) Fail() {
61 t.failed = true
62}
63
64func (t *RuntimeT) FailNow() {
65 panic("testing.T failed, see logs for output (if any)")
66}
67
68func (t *RuntimeT) Failed() bool {
69 return t.failed
70}
71
72func (t *RuntimeT) Log(args ...interface{}) {
73 log.Println(fmt.Sprintln(args...))
74}
75
76func (t *RuntimeT) Logf(format string, args ...interface{}) {
77 log.Println(fmt.Sprintf(format, args...))
78}
79
80func (t *RuntimeT) Name() string { return "" }
81func (t *RuntimeT) Skip(args ...interface{}) {}
82func (t *RuntimeT) SkipNow() {}
83func (t *RuntimeT) Skipf(format string, args ...interface{}) {}
84func (t *RuntimeT) Skipped() bool { return false }
diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go
new file mode 100644
index 0000000..07fbcb5
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go
@@ -0,0 +1,80 @@
1// +build go1.9
2
3// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition
4// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC
5// and is set for release shortly. We'll support this on master as the default
6// as soon as 1.9 is released.
7
8package testing
9
10import (
11 "fmt"
12 "log"
13)
14
15// T is the interface that mimics the standard library *testing.T.
16//
17// In unit tests you can just pass a *testing.T struct. At runtime, outside
18// of tests, you can pass in a RuntimeT struct from this package.
19type T interface {
20 Error(args ...interface{})
21 Errorf(format string, args ...interface{})
22 Fatal(args ...interface{})
23 Fatalf(format string, args ...interface{})
24 Fail()
25 FailNow()
26 Failed() bool
27 Helper()
28 Log(args ...interface{})
29 Logf(format string, args ...interface{})
30}
31
32// RuntimeT implements T and can be instantiated and run at runtime to
33// mimic *testing.T behavior. Unlike *testing.T, this will simply panic
34// for calls to Fatal. For calls to Error, you'll have to check the errors
35// list to determine whether to exit yourself.
36type RuntimeT struct {
37 failed bool
38}
39
40func (t *RuntimeT) Error(args ...interface{}) {
41 log.Println(fmt.Sprintln(args...))
42 t.Fail()
43}
44
45func (t *RuntimeT) Errorf(format string, args ...interface{}) {
46 log.Println(fmt.Sprintf(format, args...))
47 t.Fail()
48}
49
50func (t *RuntimeT) Fatal(args ...interface{}) {
51 log.Println(fmt.Sprintln(args...))
52 t.FailNow()
53}
54
55func (t *RuntimeT) Fatalf(format string, args ...interface{}) {
56 log.Println(fmt.Sprintf(format, args...))
57 t.FailNow()
58}
59
60func (t *RuntimeT) Fail() {
61 t.failed = true
62}
63
64func (t *RuntimeT) FailNow() {
65 panic("testing.T failed, see logs for output (if any)")
66}
67
68func (t *RuntimeT) Failed() bool {
69 return t.failed
70}
71
72func (t *RuntimeT) Helper() {}
73
74func (t *RuntimeT) Log(args ...interface{}) {
75 log.Println(fmt.Sprintln(args...))
76}
77
78func (t *RuntimeT) Logf(format string, args ...interface{}) {
79 log.Println(fmt.Sprintf(format, args...))
80}
diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
new file mode 100644
index 0000000..2298515
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
@@ -0,0 +1,21 @@
1The MIT License (MIT)
2
3Copyright (c) 2014 Mitchell Hashimoto
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in
13all copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md
new file mode 100644
index 0000000..60ae311
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/README.md
@@ -0,0 +1,39 @@
1# go-wordwrap
2
3`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that
4automatically wraps words into multiple lines. The primary use case for this
5is in formatting CLI output, but of course word wrapping is a generally useful
6thing to do.
7
8## Installation and Usage
9
10Install using `go get github.com/mitchellh/go-wordwrap`.
11
12Full documentation is available at
13http://godoc.org/github.com/mitchellh/go-wordwrap
14
15Below is an example of its usage ignoring errors:
16
17```go
18wrapped := wordwrap.WrapString("foo bar baz", 3)
19fmt.Println(wrapped)
20```
21
22Would output:
23
24```
25foo
26bar
27baz
28```
29
30## Word Wrap Algorithm
31
32This library doesn't use any clever algorithm for word wrapping. The wrapping
33is actually very naive: whenever there is whitespace or an explicit linebreak.
34The goal of this library is for word wrapping CLI output, so the input is
35typically pretty well controlled human language. Because of this, the naive
36approach typically works just fine.
37
38In the future, we'd like to make the algorithm more advanced. We would do
39so without breaking the API.
diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
new file mode 100644
index 0000000..ac67205
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
@@ -0,0 +1,73 @@
1package wordwrap
2
3import (
4 "bytes"
5 "unicode"
6)
7
8// WrapString wraps the given string within lim width in characters.
9//
10// Wrapping is currently naive and only happens at white-space. A future
11// version of the library will implement smarter wrapping. This means that
12// pathological cases can dramatically reach past the limit, such as a very
13// long word.
14func WrapString(s string, lim uint) string {
15 // Initialize a buffer with a slightly larger size to account for breaks
16 init := make([]byte, 0, len(s))
17 buf := bytes.NewBuffer(init)
18
19 var current uint
20 var wordBuf, spaceBuf bytes.Buffer
21
22 for _, char := range s {
23 if char == '\n' {
24 if wordBuf.Len() == 0 {
25 if current+uint(spaceBuf.Len()) > lim {
26 current = 0
27 } else {
28 current += uint(spaceBuf.Len())
29 spaceBuf.WriteTo(buf)
30 }
31 spaceBuf.Reset()
32 } else {
33 current += uint(spaceBuf.Len() + wordBuf.Len())
34 spaceBuf.WriteTo(buf)
35 spaceBuf.Reset()
36 wordBuf.WriteTo(buf)
37 wordBuf.Reset()
38 }
39 buf.WriteRune(char)
40 current = 0
41 } else if unicode.IsSpace(char) {
42 if spaceBuf.Len() == 0 || wordBuf.Len() > 0 {
43 current += uint(spaceBuf.Len() + wordBuf.Len())
44 spaceBuf.WriteTo(buf)
45 spaceBuf.Reset()
46 wordBuf.WriteTo(buf)
47 wordBuf.Reset()
48 }
49
50 spaceBuf.WriteRune(char)
51 } else {
52
53 wordBuf.WriteRune(char)
54
55 if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim {
56 buf.WriteRune('\n')
57 current = 0
58 spaceBuf.Reset()
59 }
60 }
61 }
62
63 if wordBuf.Len() == 0 {
64 if current+uint(spaceBuf.Len()) <= lim {
65 spaceBuf.WriteTo(buf)
66 }
67 } else {
68 spaceBuf.WriteTo(buf)
69 wordBuf.WriteTo(buf)
70 }
71
72 return buf.String()
73}
diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go
index 7c59d76..6a7f176 100644
--- a/vendor/github.com/mitchellh/reflectwalk/location.go
+++ b/vendor/github.com/mitchellh/reflectwalk/location.go
@@ -11,6 +11,8 @@ const (
11 MapValue 11 MapValue
12 Slice 12 Slice
13 SliceElem 13 SliceElem
14 Array
15 ArrayElem
14 Struct 16 Struct
15 StructField 17 StructField
16 WalkLoc 18 WalkLoc
diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go
index d3cfe85..70760cf 100644
--- a/vendor/github.com/mitchellh/reflectwalk/location_string.go
+++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go
@@ -1,15 +1,15 @@
1// generated by stringer -type=Location location.go; DO NOT EDIT 1// Code generated by "stringer -type=Location location.go"; DO NOT EDIT.
2 2
3package reflectwalk 3package reflectwalk
4 4
5import "fmt" 5import "fmt"
6 6
7const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc" 7const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc"
8 8
9var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59} 9var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73}
10 10
11func (i Location) String() string { 11func (i Location) String() string {
12 if i+1 >= Location(len(_Location_index)) { 12 if i >= Location(len(_Location_index)-1) {
13 return fmt.Sprintf("Location(%d)", i) 13 return fmt.Sprintf("Location(%d)", i)
14 } 14 }
15 return _Location_name[_Location_index[i]:_Location_index[i+1]] 15 return _Location_name[_Location_index[i]:_Location_index[i+1]]
diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
index ec0a623..d7ab7b6 100644
--- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
+++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
@@ -39,6 +39,13 @@ type SliceWalker interface {
39 SliceElem(int, reflect.Value) error 39 SliceElem(int, reflect.Value) error
40} 40}
41 41
42// ArrayWalker implementations are able to handle array elements found
43// within complex structures.
44type ArrayWalker interface {
45 Array(reflect.Value) error
46 ArrayElem(int, reflect.Value) error
47}
48
42// StructWalker is an interface that has methods that are called for 49// StructWalker is an interface that has methods that are called for
43// structs when a Walk is done. 50// structs when a Walk is done.
44type StructWalker interface { 51type StructWalker interface {
@@ -65,6 +72,7 @@ type PointerWalker interface {
65// SkipEntry can be returned from walk functions to skip walking 72// SkipEntry can be returned from walk functions to skip walking
66// the value of this field. This is only valid in the following functions: 73// the value of this field. This is only valid in the following functions:
67// 74//
75// - Struct: skips all fields from being walked
68// - StructField: skips walking the struct value 76// - StructField: skips walking the struct value
69// 77//
70var SkipEntry = errors.New("skip this entry") 78var SkipEntry = errors.New("skip this entry")
@@ -179,6 +187,9 @@ func walk(v reflect.Value, w interface{}) (err error) {
179 case reflect.Struct: 187 case reflect.Struct:
180 err = walkStruct(v, w) 188 err = walkStruct(v, w)
181 return 189 return
190 case reflect.Array:
191 err = walkArray(v, w)
192 return
182 default: 193 default:
183 panic("unsupported type: " + k.String()) 194 panic("unsupported type: " + k.String())
184 } 195 }
@@ -286,48 +297,99 @@ func walkSlice(v reflect.Value, w interface{}) (err error) {
286 return nil 297 return nil
287} 298}
288 299
300func walkArray(v reflect.Value, w interface{}) (err error) {
301 ew, ok := w.(EnterExitWalker)
302 if ok {
303 ew.Enter(Array)
304 }
305
306 if aw, ok := w.(ArrayWalker); ok {
307 if err := aw.Array(v); err != nil {
308 return err
309 }
310 }
311
312 for i := 0; i < v.Len(); i++ {
313 elem := v.Index(i)
314
315 if aw, ok := w.(ArrayWalker); ok {
316 if err := aw.ArrayElem(i, elem); err != nil {
317 return err
318 }
319 }
320
321 ew, ok := w.(EnterExitWalker)
322 if ok {
323 ew.Enter(ArrayElem)
324 }
325
326 if err := walk(elem, w); err != nil {
327 return err
328 }
329
330 if ok {
331 ew.Exit(ArrayElem)
332 }
333 }
334
335 ew, ok = w.(EnterExitWalker)
336 if ok {
337 ew.Exit(Array)
338 }
339
340 return nil
341}
342
289func walkStruct(v reflect.Value, w interface{}) (err error) { 343func walkStruct(v reflect.Value, w interface{}) (err error) {
290 ew, ewok := w.(EnterExitWalker) 344 ew, ewok := w.(EnterExitWalker)
291 if ewok { 345 if ewok {
292 ew.Enter(Struct) 346 ew.Enter(Struct)
293 } 347 }
294 348
349 skip := false
295 if sw, ok := w.(StructWalker); ok { 350 if sw, ok := w.(StructWalker); ok {
296 if err = sw.Struct(v); err != nil { 351 err = sw.Struct(v)
352 if err == SkipEntry {
353 skip = true
354 err = nil
355 }
356 if err != nil {
297 return 357 return
298 } 358 }
299 } 359 }
300 360
301 vt := v.Type() 361 if !skip {
302 for i := 0; i < vt.NumField(); i++ { 362 vt := v.Type()
303 sf := vt.Field(i) 363 for i := 0; i < vt.NumField(); i++ {
304 f := v.FieldByIndex([]int{i}) 364 sf := vt.Field(i)
365 f := v.FieldByIndex([]int{i})
305 366
306 if sw, ok := w.(StructWalker); ok { 367 if sw, ok := w.(StructWalker); ok {
307 err = sw.StructField(sf, f) 368 err = sw.StructField(sf, f)
308 369
309 // SkipEntry just pretends this field doesn't even exist 370 // SkipEntry just pretends this field doesn't even exist
310 if err == SkipEntry { 371 if err == SkipEntry {
311 continue 372 continue
373 }
374
375 if err != nil {
376 return
377 }
378 }
379
380 ew, ok := w.(EnterExitWalker)
381 if ok {
382 ew.Enter(StructField)
312 } 383 }
313 384
385 err = walk(f, w)
314 if err != nil { 386 if err != nil {
315 return 387 return
316 } 388 }
317 }
318
319 ew, ok := w.(EnterExitWalker)
320 if ok {
321 ew.Enter(StructField)
322 }
323 389
324 err = walk(f, w) 390 if ok {
325 if err != nil { 391 ew.Exit(StructField)
326 return 392 }
327 }
328
329 if ok {
330 ew.Exit(StructField)
331 } 393 }
332 } 394 }
333 395
diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore
new file mode 100644
index 0000000..a1338d6
--- /dev/null
+++ b/vendor/github.com/oklog/run/.gitignore
@@ -0,0 +1,14 @@
1# Binaries for programs and plugins
2*.exe
3*.dll
4*.so
5*.dylib
6
7# Test binary, build with `go test -c`
8*.test
9
10# Output of the go coverage tool, specifically when used with LiteIDE
11*.out
12
13# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
14.glide/
diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml
new file mode 100644
index 0000000..362bdd4
--- /dev/null
+++ b/vendor/github.com/oklog/run/.travis.yml
@@ -0,0 +1,12 @@
1language: go
2sudo: false
3go:
4 - 1.x
5 - tip
6install:
7 - go get -v github.com/golang/lint/golint
8 - go build ./...
9script:
10 - go vet ./...
11 - $HOME/gopath/bin/golint .
12 - go test -v -race ./...
diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/oklog/run/LICENSE
@@ -0,0 +1,201 @@
1 Apache License
2 Version 2.0, January 2004
3 http://www.apache.org/licenses/
4
5 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
7 1. Definitions.
8
9 "License" shall mean the terms and conditions for use, reproduction,
10 and distribution as defined by Sections 1 through 9 of this document.
11
12 "Licensor" shall mean the copyright owner or entity authorized by
13 the copyright owner that is granting the License.
14
15 "Legal Entity" shall mean the union of the acting entity and all
16 other entities that control, are controlled by, or are under common
17 control with that entity. For the purposes of this definition,
18 "control" means (i) the power, direct or indirect, to cause the
19 direction or management of such entity, whether by contract or
20 otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 outstanding shares, or (iii) beneficial ownership of such entity.
22
23 "You" (or "Your") shall mean an individual or Legal Entity
24 exercising permissions granted by this License.
25
26 "Source" form shall mean the preferred form for making modifications,
27 including but not limited to software source code, documentation
28 source, and configuration files.
29
30 "Object" form shall mean any form resulting from mechanical
31 transformation or translation of a Source form, including but
32 not limited to compiled object code, generated documentation,
33 and conversions to other media types.
34
35 "Work" shall mean the work of authorship, whether in Source or
36 Object form, made available under the License, as indicated by a
37 copyright notice that is included in or attached to the work
38 (an example is provided in the Appendix below).
39
40 "Derivative Works" shall mean any work, whether in Source or Object
41 form, that is based on (or derived from) the Work and for which the
42 editorial revisions, annotations, elaborations, or other modifications
43 represent, as a whole, an original work of authorship. For the purposes
44 of this License, Derivative Works shall not include works that remain
45 separable from, or merely link (or bind by name) to the interfaces of,
46 the Work and Derivative Works thereof.
47
48 "Contribution" shall mean any work of authorship, including
49 the original version of the Work and any modifications or additions
50 to that Work or Derivative Works thereof, that is intentionally
51 submitted to Licensor for inclusion in the Work by the copyright owner
52 or by an individual or Legal Entity authorized to submit on behalf of
53 the copyright owner. For the purposes of this definition, "submitted"
54 means any form of electronic, verbal, or written communication sent
55 to the Licensor or its representatives, including but not limited to
56 communication on electronic mailing lists, source code control systems,
57 and issue tracking systems that are managed by, or on behalf of, the
58 Licensor for the purpose of discussing and improving the Work, but
59 excluding communication that is conspicuously marked or otherwise
60 designated in writing by the copyright owner as "Not a Contribution."
61
62 "Contributor" shall mean Licensor and any individual or Legal Entity
63 on behalf of whom a Contribution has been received by Licensor and
64 subsequently incorporated within the Work.
65
66 2. Grant of Copyright License. Subject to the terms and conditions of
67 this License, each Contributor hereby grants to You a perpetual,
68 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 copyright license to reproduce, prepare Derivative Works of,
70 publicly display, publicly perform, sublicense, and distribute the
71 Work and such Derivative Works in Source or Object form.
72
73 3. Grant of Patent License. Subject to the terms and conditions of
74 this License, each Contributor hereby grants to You a perpetual,
75 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 (except as stated in this section) patent license to make, have made,
77 use, offer to sell, sell, import, and otherwise transfer the Work,
78 where such license applies only to those patent claims licensable
79 by such Contributor that are necessarily infringed by their
80 Contribution(s) alone or by combination of their Contribution(s)
81 with the Work to which such Contribution(s) was submitted. If You
82 institute patent litigation against any entity (including a
83 cross-claim or counterclaim in a lawsuit) alleging that the Work
84 or a Contribution incorporated within the Work constitutes direct
85 or contributory patent infringement, then any patent licenses
86 granted to You under this License for that Work shall terminate
87 as of the date such litigation is filed.
88
89 4. Redistribution. You may reproduce and distribute copies of the
90 Work or Derivative Works thereof in any medium, with or without
91 modifications, and in Source or Object form, provided that You
92 meet the following conditions:
93
94 (a) You must give any other recipients of the Work or
95 Derivative Works a copy of this License; and
96
97 (b) You must cause any modified files to carry prominent notices
98 stating that You changed the files; and
99
100 (c) You must retain, in the Source form of any Derivative Works
101 that You distribute, all copyright, patent, trademark, and
102 attribution notices from the Source form of the Work,
103 excluding those notices that do not pertain to any part of
104 the Derivative Works; and
105
106 (d) If the Work includes a "NOTICE" text file as part of its
107 distribution, then any Derivative Works that You distribute must
108 include a readable copy of the attribution notices contained
109 within such NOTICE file, excluding those notices that do not
110 pertain to any part of the Derivative Works, in at least one
111 of the following places: within a NOTICE text file distributed
112 as part of the Derivative Works; within the Source form or
113 documentation, if provided along with the Derivative Works; or,
114 within a display generated by the Derivative Works, if and
115 wherever such third-party notices normally appear. The contents
116 of the NOTICE file are for informational purposes only and
117 do not modify the License. You may add Your own attribution
118 notices within Derivative Works that You distribute, alongside
119 or as an addendum to the NOTICE text from the Work, provided
120 that such additional attribution notices cannot be construed
121 as modifying the License.
122
123 You may add Your own copyright statement to Your modifications and
124 may provide additional or different license terms and conditions
125 for use, reproduction, or distribution of Your modifications, or
126 for any such Derivative Works as a whole, provided Your use,
127 reproduction, and distribution of the Work otherwise complies with
128 the conditions stated in this License.
129
130 5. Submission of Contributions. Unless You explicitly state otherwise,
131 any Contribution intentionally submitted for inclusion in the Work
132 by You to the Licensor shall be under the terms and conditions of
133 this License, without any additional terms or conditions.
134 Notwithstanding the above, nothing herein shall supersede or modify
135 the terms of any separate license agreement you may have executed
136 with Licensor regarding such Contributions.
137
138 6. Trademarks. This License does not grant permission to use the trade
139 names, trademarks, service marks, or product names of the Licensor,
140 except as required for reasonable and customary use in describing the
141 origin of the Work and reproducing the content of the NOTICE file.
142
143 7. Disclaimer of Warranty. Unless required by applicable law or
144 agreed to in writing, Licensor provides the Work (and each
145 Contributor provides its Contributions) on an "AS IS" BASIS,
146 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 implied, including, without limitation, any warranties or conditions
148 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 PARTICULAR PURPOSE. You are solely responsible for determining the
150 appropriateness of using or redistributing the Work and assume any
151 risks associated with Your exercise of permissions under this License.
152
153 8. Limitation of Liability. In no event and under no legal theory,
154 whether in tort (including negligence), contract, or otherwise,
155 unless required by applicable law (such as deliberate and grossly
156 negligent acts) or agreed to in writing, shall any Contributor be
157 liable to You for damages, including any direct, indirect, special,
158 incidental, or consequential damages of any character arising as a
159 result of this License or out of the use or inability to use the
160 Work (including but not limited to damages for loss of goodwill,
161 work stoppage, computer failure or malfunction, or any and all
162 other commercial damages or losses), even if such Contributor
163 has been advised of the possibility of such damages.
164
165 9. Accepting Warranty or Additional Liability. While redistributing
166 the Work or Derivative Works thereof, You may choose to offer,
167 and charge a fee for, acceptance of support, warranty, indemnity,
168 or other liability obligations and/or rights consistent with this
169 License. However, in accepting such obligations, You may act only
170 on Your own behalf and on Your sole responsibility, not on behalf
171 of any other Contributor, and only if You agree to indemnify,
172 defend, and hold each Contributor harmless for any liability
173 incurred by, or claims asserted against, such Contributor by reason
174 of your accepting any such warranty or additional liability.
175
176 END OF TERMS AND CONDITIONS
177
178 APPENDIX: How to apply the Apache License to your work.
179
180 To apply the Apache License to your work, attach the following
181 boilerplate notice, with the fields enclosed by brackets "[]"
182 replaced with your own identifying information. (Don't include
183 the brackets!) The text should be enclosed in the appropriate
184 comment syntax for the file format. We also recommend that a
185 file or class name and description of purpose be included on the
186 same "printed page" as the copyright notice for easier
187 identification within third-party archives.
188
189 Copyright [yyyy] [name of copyright owner]
190
191 Licensed under the Apache License, Version 2.0 (the "License");
192 you may not use this file except in compliance with the License.
193 You may obtain a copy of the License at
194
195 http://www.apache.org/licenses/LICENSE-2.0
196
197 Unless required by applicable law or agreed to in writing, software
198 distributed under the License is distributed on an "AS IS" BASIS,
199 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 See the License for the specific language governing permissions and
201 limitations under the License.
diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md
new file mode 100644
index 0000000..a7228cd
--- /dev/null
+++ b/vendor/github.com/oklog/run/README.md
@@ -0,0 +1,73 @@
1# run
2
3[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run)
4[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run)
5[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run)
6[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE)
7
8run.Group is a universal mechanism to manage goroutine lifecycles.
9
10Create a zero-value run.Group, and then add actors to it. Actors are defined as
11a pair of functions: an **execute** function, which should run synchronously;
12and an **interrupt** function, which, when invoked, should cause the execute
13function to return. Finally, invoke Run, which blocks until the first actor
14returns. This general-purpose API allows callers to model pretty much any
15runnable task, and achieve well-defined lifecycle semantics for the group.
16
17run.Group was written to manage component lifecycles in func main for
18[OK Log](https://github.com/oklog/oklog).
19But it's useful in any circumstance where you need to orchestrate multiple
20goroutines as a unit whole.
21[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a
22video of a talk where run.Group is described.
23
24## Examples
25
26### context.Context
27
28```go
29ctx, cancel := context.WithCancel(context.Background())
30g.Add(func() error {
31 return myProcess(ctx, ...)
32}, func(error) {
33 cancel()
34})
35```
36
37### net.Listener
38
39```go
40ln, _ := net.Listen("tcp", ":8080")
41g.Add(func() error {
42 return http.Serve(ln, nil)
43}, func(error) {
44 ln.Close()
45})
46```
47
48### io.ReadCloser
49
50```go
51var conn io.ReadCloser = ...
52g.Add(func() error {
53 s := bufio.NewScanner(conn)
54 for s.Scan() {
55 println(s.Text())
56 }
57 return s.Err()
58}, func(error) {
59 conn.Close()
60})
61```
62
63## Comparisons
64
65Package run is somewhat similar to package
66[errgroup](https://godoc.org/golang.org/x/sync/errgroup),
67except it doesn't require actor goroutines to understand context semantics.
68
69It's somewhat similar to package
70[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or
71[tomb.v2](https://godoc.org/gopkg.in/tomb.v2),
72except it has a much smaller API surface, delegating e.g. staged shutdown of
73goroutines to the caller.
diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go
new file mode 100644
index 0000000..832d47d
--- /dev/null
+++ b/vendor/github.com/oklog/run/group.go
@@ -0,0 +1,62 @@
1// Package run implements an actor-runner with deterministic teardown. It is
2// somewhat similar to package errgroup, except it does not require actor
3// goroutines to understand context semantics. This makes it suitable for use in
4// more circumstances; for example, goroutines which are handling connections
5// from net.Listeners, or scanning input from a closable io.Reader.
6package run
7
8// Group collects actors (functions) and runs them concurrently.
9// When one actor (function) returns, all actors are interrupted.
10// The zero value of a Group is useful.
11type Group struct {
12 actors []actor
13}
14
15// Add an actor (function) to the group. Each actor must be pre-emptable by an
16// interrupt function. That is, if interrupt is invoked, execute should return.
17// Also, it must be safe to call interrupt even after execute has returned.
18//
19// The first actor (function) to return interrupts all running actors.
20// The error is passed to the interrupt functions, and is returned by Run.
21func (g *Group) Add(execute func() error, interrupt func(error)) {
22 g.actors = append(g.actors, actor{execute, interrupt})
23}
24
25// Run all actors (functions) concurrently.
26// When the first actor returns, all others are interrupted.
27// Run only returns when all actors have exited.
28// Run returns the error returned by the first exiting actor.
29func (g *Group) Run() error {
30 if len(g.actors) == 0 {
31 return nil
32 }
33
34 // Run each actor.
35 errors := make(chan error, len(g.actors))
36 for _, a := range g.actors {
37 go func(a actor) {
38 errors <- a.execute()
39 }(a)
40 }
41
42 // Wait for the first actor to stop.
43 err := <-errors
44
45 // Signal all actors to stop.
46 for _, a := range g.actors {
47 a.interrupt(err)
48 }
49
50 // Wait for all actors to stop.
51 for i := 1; i < cap(errors); i++ {
52 <-errors
53 }
54
55 // Return the original error.
56 return err
57}
58
59type actor struct {
60 execute func() error
61 interrupt func(error)
62}
diff --git a/vendor/github.com/posener/complete/.gitignore b/vendor/github.com/posener/complete/.gitignore
new file mode 100644
index 0000000..1363720
--- /dev/null
+++ b/vendor/github.com/posener/complete/.gitignore
@@ -0,0 +1,2 @@
1.idea
2coverage.txt
diff --git a/vendor/github.com/posener/complete/.travis.yml b/vendor/github.com/posener/complete/.travis.yml
new file mode 100644
index 0000000..c2798f8
--- /dev/null
+++ b/vendor/github.com/posener/complete/.travis.yml
@@ -0,0 +1,17 @@
1language: go
2sudo: false
3go:
4 - 1.9
5 - 1.8
6
7before_install:
8 - go get -u -t ./...
9 - go get -u gopkg.in/alecthomas/gometalinter.v1
10 - gometalinter.v1 --install
11
12script:
13 - gometalinter.v1 --config metalinter.json ./...
14 - ./test.sh
15
16after_success:
17 - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/posener/complete/LICENSE.txt b/vendor/github.com/posener/complete/LICENSE.txt
new file mode 100644
index 0000000..16249b4
--- /dev/null
+++ b/vendor/github.com/posener/complete/LICENSE.txt
@@ -0,0 +1,21 @@
1The MIT License
2
3Copyright (c) 2017 Eyal Posener
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in
13all copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21THE SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go
new file mode 100644
index 0000000..1ba4d69
--- /dev/null
+++ b/vendor/github.com/posener/complete/args.go
@@ -0,0 +1,102 @@
1package complete
2
3import (
4 "os"
5 "path/filepath"
6 "strings"
7 "unicode"
8)
9
10// Args describes command line arguments
11type Args struct {
12 // All lists of all arguments in command line (not including the command itself)
13 All []string
14 // Completed lists of all completed arguments in command line,
15 // If the last one is still being typed - no space after it,
16 // it won't appear in this list of arguments.
17 Completed []string
18 // Last argument in command line, the one being typed, if the last
19 // character in the command line is a space, this argument will be empty,
20 // otherwise this would be the last word.
21 Last string
22 // LastCompleted is the last argument that was fully typed.
23 // If the last character in the command line is space, this would be the
24 // last word, otherwise, it would be the word before that.
25 LastCompleted string
26}
27
28// Directory gives the directory of the current written
29// last argument if it represents a file name being written.
30// in case that it is not, we fall back to the current directory.
31func (a Args) Directory() string {
32 if info, err := os.Stat(a.Last); err == nil && info.IsDir() {
33 return fixPathForm(a.Last, a.Last)
34 }
35 dir := filepath.Dir(a.Last)
36 if info, err := os.Stat(dir); err != nil || !info.IsDir() {
37 return "./"
38 }
39 return fixPathForm(a.Last, dir)
40}
41
42func newArgs(line string) Args {
43 var (
44 all []string
45 completed []string
46 )
47 parts := splitFields(line)
48 if len(parts) > 0 {
49 all = parts[1:]
50 completed = removeLast(parts[1:])
51 }
52 return Args{
53 All: all,
54 Completed: completed,
55 Last: last(parts),
56 LastCompleted: last(completed),
57 }
58}
59
60func splitFields(line string) []string {
61 parts := strings.Fields(line)
62 if len(line) > 0 && unicode.IsSpace(rune(line[len(line)-1])) {
63 parts = append(parts, "")
64 }
65 parts = splitLastEqual(parts)
66 return parts
67}
68
69func splitLastEqual(line []string) []string {
70 if len(line) == 0 {
71 return line
72 }
73 parts := strings.Split(line[len(line)-1], "=")
74 return append(line[:len(line)-1], parts...)
75}
76
77func (a Args) from(i int) Args {
78 if i > len(a.All) {
79 i = len(a.All)
80 }
81 a.All = a.All[i:]
82
83 if i > len(a.Completed) {
84 i = len(a.Completed)
85 }
86 a.Completed = a.Completed[i:]
87 return a
88}
89
90func removeLast(a []string) []string {
91 if len(a) > 0 {
92 return a[:len(a)-1]
93 }
94 return a
95}
96
97func last(args []string) string {
98 if len(args) == 0 {
99 return ""
100 }
101 return args[len(args)-1]
102}
diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go
new file mode 100644
index 0000000..7137dee
--- /dev/null
+++ b/vendor/github.com/posener/complete/cmd/cmd.go
@@ -0,0 +1,128 @@
1// Package cmd used for command line options for the complete tool
2package cmd
3
4import (
5 "errors"
6 "flag"
7 "fmt"
8 "os"
9 "strings"
10
11 "github.com/posener/complete/cmd/install"
12)
13
14// CLI for command line
15type CLI struct {
16 Name string
17 InstallName string
18 UninstallName string
19
20 install bool
21 uninstall bool
22 yes bool
23}
24
25const (
26 defaultInstallName = "install"
27 defaultUninstallName = "uninstall"
28)
29
30// Run is used when running complete in command line mode.
31// this is used when the complete is not completing words, but to
32// install it or uninstall it.
33func (f *CLI) Run() bool {
34 err := f.validate()
35 if err != nil {
36 os.Stderr.WriteString(err.Error() + "\n")
37 os.Exit(1)
38 }
39
40 switch {
41 case f.install:
42 f.prompt()
43 err = install.Install(f.Name)
44 case f.uninstall:
45 f.prompt()
46 err = install.Uninstall(f.Name)
47 default:
48 // non of the action flags matched,
49 // returning false should make the real program execute
50 return false
51 }
52
53 if err != nil {
54 fmt.Printf("%s failed! %s\n", f.action(), err)
55 os.Exit(3)
56 }
57 fmt.Println("Done!")
58 return true
59}
60
61// prompt use for approval
62// exit if approval was not given
63func (f *CLI) prompt() {
64 defer fmt.Println(f.action() + "ing...")
65 if f.yes {
66 return
67 }
68 fmt.Printf("%s completion for %s? ", f.action(), f.Name)
69 var answer string
70 fmt.Scanln(&answer)
71
72 switch strings.ToLower(answer) {
73 case "y", "yes":
74 return
75 default:
76 fmt.Println("Cancelling...")
77 os.Exit(1)
78 }
79}
80
81// AddFlags adds the CLI flags to the flag set.
82// If flags is nil, the default command line flags will be taken.
83// Pass non-empty strings as installName and uninstallName to override the default
84// flag names.
85func (f *CLI) AddFlags(flags *flag.FlagSet) {
86 if flags == nil {
87 flags = flag.CommandLine
88 }
89
90 if f.InstallName == "" {
91 f.InstallName = defaultInstallName
92 }
93 if f.UninstallName == "" {
94 f.UninstallName = defaultUninstallName
95 }
96
97 if flags.Lookup(f.InstallName) == nil {
98 flags.BoolVar(&f.install, f.InstallName, false,
99 fmt.Sprintf("Install completion for %s command", f.Name))
100 }
101 if flags.Lookup(f.UninstallName) == nil {
102 flags.BoolVar(&f.uninstall, f.UninstallName, false,
103 fmt.Sprintf("Uninstall completion for %s command", f.Name))
104 }
105 if flags.Lookup("y") == nil {
106 flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes'")
107 }
108}
109
110// validate the CLI
111func (f *CLI) validate() error {
112 if f.install && f.uninstall {
113 return errors.New("Install and uninstall are mutually exclusive")
114 }
115 return nil
116}
117
118// action name according to the CLI values.
119func (f *CLI) action() string {
120 switch {
121 case f.install:
122 return "Install"
123 case f.uninstall:
124 return "Uninstall"
125 default:
126 return "unknown"
127 }
128}
diff --git a/vendor/github.com/posener/complete/cmd/install/bash.go b/vendor/github.com/posener/complete/cmd/install/bash.go
new file mode 100644
index 0000000..a287f99
--- /dev/null
+++ b/vendor/github.com/posener/complete/cmd/install/bash.go
@@ -0,0 +1,32 @@
1package install
2
3import "fmt"
4
5// (un)install in bash
6// basically adds/remove from .bashrc:
7//
8// complete -C </path/to/completion/command> <command>
9type bash struct {
10 rc string
11}
12
13func (b bash) Install(cmd, bin string) error {
14 completeCmd := b.cmd(cmd, bin)
15 if lineInFile(b.rc, completeCmd) {
16 return fmt.Errorf("already installed in %s", b.rc)
17 }
18 return appendToFile(b.rc, completeCmd)
19}
20
21func (b bash) Uninstall(cmd, bin string) error {
22 completeCmd := b.cmd(cmd, bin)
23 if !lineInFile(b.rc, completeCmd) {
24 return fmt.Errorf("does not installed in %s", b.rc)
25 }
26
27 return removeFromFile(b.rc, completeCmd)
28}
29
30func (bash) cmd(cmd, bin string) string {
31 return fmt.Sprintf("complete -C %s %s", bin, cmd)
32}
diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go
new file mode 100644
index 0000000..082a226
--- /dev/null
+++ b/vendor/github.com/posener/complete/cmd/install/install.go
@@ -0,0 +1,92 @@
1package install
2
3import (
4 "errors"
5 "os"
6 "os/user"
7 "path/filepath"
8
9 "github.com/hashicorp/go-multierror"
10)
11
12type installer interface {
13 Install(cmd, bin string) error
14 Uninstall(cmd, bin string) error
15}
16
17// Install complete command given:
18// cmd: is the command name
19func Install(cmd string) error {
20 is := installers()
21 if len(is) == 0 {
22 return errors.New("Did not find any shells to install")
23 }
24 bin, err := getBinaryPath()
25 if err != nil {
26 return err
27 }
28
29 for _, i := range is {
30 errI := i.Install(cmd, bin)
31 if errI != nil {
32 err = multierror.Append(err, errI)
33 }
34 }
35
36 return err
37}
38
39// Uninstall complete command given:
40// cmd: is the command name
41func Uninstall(cmd string) error {
42 is := installers()
43 if len(is) == 0 {
44 return errors.New("Did not find any shells to uninstall")
45 }
46 bin, err := getBinaryPath()
47 if err != nil {
48 return err
49 }
50
51 for _, i := range is {
52 errI := i.Uninstall(cmd, bin)
53 if errI != nil {
54 multierror.Append(err, errI)
55 }
56 }
57
58 return err
59}
60
61func installers() (i []installer) {
62 for _, rc := range [...]string{".bashrc", ".bash_profile", ".bash_login", ".profile"} {
63 if f := rcFile(rc); f != "" {
64 i = append(i, bash{f})
65 break
66 }
67 }
68 if f := rcFile(".zshrc"); f != "" {
69 i = append(i, zsh{f})
70 }
71 return
72}
73
74func getBinaryPath() (string, error) {
75 bin, err := os.Executable()
76 if err != nil {
77 return "", err
78 }
79 return filepath.Abs(bin)
80}
81
82func rcFile(name string) string {
83 u, err := user.Current()
84 if err != nil {
85 return ""
86 }
87 path := filepath.Join(u.HomeDir, name)
88 if _, err := os.Stat(path); err != nil {
89 return ""
90 }
91 return path
92}
diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go
new file mode 100644
index 0000000..2c8b44c
--- /dev/null
+++ b/vendor/github.com/posener/complete/cmd/install/utils.go
@@ -0,0 +1,118 @@
1package install
2
3import (
4 "bufio"
5 "fmt"
6 "io"
7 "io/ioutil"
8 "os"
9)
10
11func lineInFile(name string, lookFor string) bool {
12 f, err := os.Open(name)
13 if err != nil {
14 return false
15 }
16 defer f.Close()
17 r := bufio.NewReader(f)
18 prefix := []byte{}
19 for {
20 line, isPrefix, err := r.ReadLine()
21 if err == io.EOF {
22 return false
23 }
24 if err != nil {
25 return false
26 }
27 if isPrefix {
28 prefix = append(prefix, line...)
29 continue
30 }
31 line = append(prefix, line...)
32 if string(line) == lookFor {
33 return true
34 }
35 prefix = prefix[:0]
36 }
37}
38
39func appendToFile(name string, content string) error {
40 f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0)
41 if err != nil {
42 return err
43 }
44 defer f.Close()
45 _, err = f.WriteString(fmt.Sprintf("\n%s\n", content))
46 return err
47}
48
49func removeFromFile(name string, content string) error {
50 backup := name + ".bck"
51 err := copyFile(name, backup)
52 if err != nil {
53 return err
54 }
55 temp, err := removeContentToTempFile(name, content)
56 if err != nil {
57 return err
58 }
59
60 err = copyFile(temp, name)
61 if err != nil {
62 return err
63 }
64
65 return os.Remove(backup)
66}
67
68func removeContentToTempFile(name, content string) (string, error) {
69 rf, err := os.Open(name)
70 if err != nil {
71 return "", err
72 }
73 defer rf.Close()
74 wf, err := ioutil.TempFile("/tmp", "complete-")
75 if err != nil {
76 return "", err
77 }
78 defer wf.Close()
79
80 r := bufio.NewReader(rf)
81 prefix := []byte{}
82 for {
83 line, isPrefix, err := r.ReadLine()
84 if err == io.EOF {
85 break
86 }
87 if err != nil {
88 return "", err
89 }
90 if isPrefix {
91 prefix = append(prefix, line...)
92 continue
93 }
94 line = append(prefix, line...)
95 str := string(line)
96 if str == content {
97 continue
98 }
99 wf.WriteString(str + "\n")
100 prefix = prefix[:0]
101 }
102 return wf.Name(), nil
103}
104
105func copyFile(src string, dst string) error {
106 in, err := os.Open(src)
107 if err != nil {
108 return err
109 }
110 defer in.Close()
111 out, err := os.Create(dst)
112 if err != nil {
113 return err
114 }
115 defer out.Close()
116 _, err = io.Copy(out, in)
117 return err
118}
diff --git a/vendor/github.com/posener/complete/cmd/install/zsh.go b/vendor/github.com/posener/complete/cmd/install/zsh.go
new file mode 100644
index 0000000..a625f53
--- /dev/null
+++ b/vendor/github.com/posener/complete/cmd/install/zsh.go
@@ -0,0 +1,39 @@
1package install
2
3import "fmt"
4
5// (un)install in zsh
6// basically adds/remove from .zshrc:
7//
8// autoload -U +X bashcompinit && bashcompinit"
9// complete -C </path/to/completion/command> <command>
10type zsh struct {
11 rc string
12}
13
14func (z zsh) Install(cmd, bin string) error {
15 completeCmd := z.cmd(cmd, bin)
16 if lineInFile(z.rc, completeCmd) {
17 return fmt.Errorf("already installed in %s", z.rc)
18 }
19
20 bashCompInit := "autoload -U +X bashcompinit && bashcompinit"
21 if !lineInFile(z.rc, bashCompInit) {
22 completeCmd = bashCompInit + "\n" + completeCmd
23 }
24
25 return appendToFile(z.rc, completeCmd)
26}
27
28func (z zsh) Uninstall(cmd, bin string) error {
29 completeCmd := z.cmd(cmd, bin)
30 if !lineInFile(z.rc, completeCmd) {
31 return fmt.Errorf("does not installed in %s", z.rc)
32 }
33
34 return removeFromFile(z.rc, completeCmd)
35}
36
37func (zsh) cmd(cmd, bin string) string {
38 return fmt.Sprintf("complete -o nospace -C %s %s", bin, cmd)
39}
diff --git a/vendor/github.com/posener/complete/command.go b/vendor/github.com/posener/complete/command.go
new file mode 100644
index 0000000..82d37d5
--- /dev/null
+++ b/vendor/github.com/posener/complete/command.go
@@ -0,0 +1,111 @@
1package complete
2
3// Command represents a command line
4// It holds the data that enables auto completion of command line
5// Command can also be a sub command.
6type Command struct {
7 // Sub is map of sub commands of the current command
8 // The key refer to the sub command name, and the value is it's
9 // Command descriptive struct.
10 Sub Commands
11
12 // Flags is a map of flags that the command accepts.
13 // The key is the flag name, and the value is it's predictions.
14 Flags Flags
15
16 // GlobalFlags is a map of flags that the command accepts.
17 // Global flags that can appear also after a sub command.
18 GlobalFlags Flags
19
20 // Args are extra arguments that the command accepts, those who are
21 // given without any flag before.
22 Args Predictor
23}
24
25// Predict returns all possible predictions for args according to the command struct
26func (c *Command) Predict(a Args) []string {
27 options, _ := c.predict(a)
28 return options
29}
30
31// Commands is the type of Sub member, it maps a command name to a command struct
32type Commands map[string]Command
33
34// Predict completion of sub command names names according to command line arguments
35func (c Commands) Predict(a Args) (prediction []string) {
36 for sub := range c {
37 prediction = append(prediction, sub)
38 }
39 return
40}
41
42// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions.
43type Flags map[string]Predictor
44
45// Predict completion of flags names according to command line arguments
46func (f Flags) Predict(a Args) (prediction []string) {
47 for flag := range f {
48 // If the flag starts with a hyphen, we avoid emitting the prediction
49 // unless the last typed arg contains a hyphen as well.
50 flagHyphenStart := len(flag) != 0 && flag[0] == '-'
51 lastHyphenStart := len(a.Last) != 0 && a.Last[0] == '-'
52 if flagHyphenStart && !lastHyphenStart {
53 continue
54 }
55 prediction = append(prediction, flag)
56 }
57 return
58}
59
60// predict options
61// only is set to true if no more options are allowed to be returned
62// those are in cases of special flag that has specific completion arguments,
63// and other flags or sub commands can't come after it.
64func (c *Command) predict(a Args) (options []string, only bool) {
65
66 // search sub commands for predictions first
67 subCommandFound := false
68 for i, arg := range a.Completed {
69 if cmd, ok := c.Sub[arg]; ok {
70 subCommandFound = true
71
72 // recursive call for sub command
73 options, only = cmd.predict(a.from(i))
74 if only {
75 return
76 }
77
78 // We matched so stop searching. Continuing to search can accidentally
79 // match a subcommand with current set of commands, see issue #46.
80 break
81 }
82 }
83
84 // if last completed word is a global flag that we need to complete
85 if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil {
86 Log("Predicting according to global flag %s", a.LastCompleted)
87 return predictor.Predict(a), true
88 }
89
90 options = append(options, c.GlobalFlags.Predict(a)...)
91
92 // if a sub command was entered, we won't add the parent command
93 // completions and we return here.
94 if subCommandFound {
95 return
96 }
97
98 // if last completed word is a command flag that we need to complete
99 if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil {
100 Log("Predicting according to flag %s", a.LastCompleted)
101 return predictor.Predict(a), true
102 }
103
104 options = append(options, c.Sub.Predict(a)...)
105 options = append(options, c.Flags.Predict(a)...)
106 if c.Args != nil {
107 options = append(options, c.Args.Predict(a)...)
108 }
109
110 return
111}
diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go
new file mode 100644
index 0000000..185d1e8
--- /dev/null
+++ b/vendor/github.com/posener/complete/complete.go
@@ -0,0 +1,95 @@
1// Package complete provides a tool for bash writing bash completion in go.
2//
3// Writing bash completion scripts is a hard work. This package provides an easy way
4// to create bash completion scripts for any command, and also an easy way to install/uninstall
5// the completion of the command.
6package complete
7
8import (
9 "flag"
10 "fmt"
11 "io"
12 "os"
13
14 "github.com/posener/complete/cmd"
15 "github.com/posener/complete/match"
16)
17
18const (
19 envComplete = "COMP_LINE"
20 envDebug = "COMP_DEBUG"
21)
22
23// Complete structs define completion for a command with CLI options
24type Complete struct {
25 Command Command
26 cmd.CLI
27 Out io.Writer
28}
29
30// New creates a new complete command.
31// name is the name of command we want to auto complete.
32// IMPORTANT: it must be the same name - if the auto complete
33// completes the 'go' command, name must be equal to "go".
34// command is the struct of the command completion.
35func New(name string, command Command) *Complete {
36 return &Complete{
37 Command: command,
38 CLI: cmd.CLI{Name: name},
39 Out: os.Stdout,
40 }
41}
42
43// Run runs the completion and add installation flags beforehand.
44// The flags are added to the main flag CommandLine variable.
45func (c *Complete) Run() bool {
46 c.AddFlags(nil)
47 flag.Parse()
48 return c.Complete()
49}
50
51// Complete a command from completion line in environment variable,
52// and print out the complete options.
53// returns success if the completion ran or if the cli matched
54// any of the given flags, false otherwise
55// For installation: it assumes that flags were added and parsed before
56// it was called.
57func (c *Complete) Complete() bool {
58 line, ok := getLine()
59 if !ok {
60 // make sure flags parsed,
61 // in case they were not added in the main program
62 return c.CLI.Run()
63 }
64 Log("Completing line: %s", line)
65 a := newArgs(line)
66 Log("Completing last field: %s", a.Last)
67 options := c.Command.Predict(a)
68 Log("Options: %s", options)
69
70 // filter only options that match the last argument
71 matches := []string{}
72 for _, option := range options {
73 if match.Prefix(option, a.Last) {
74 matches = append(matches, option)
75 }
76 }
77 Log("Matches: %s", matches)
78 c.output(matches)
79 return true
80}
81
82func getLine() (string, bool) {
83 line := os.Getenv(envComplete)
84 if line == "" {
85 return "", false
86 }
87 return line, true
88}
89
90func (c *Complete) output(options []string) {
91 // stdout of program defines the complete options
92 for _, option := range options {
93 fmt.Fprintln(c.Out, option)
94 }
95}
diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go
new file mode 100644
index 0000000..797a80c
--- /dev/null
+++ b/vendor/github.com/posener/complete/log.go
@@ -0,0 +1,23 @@
1package complete
2
3import (
4 "io"
5 "io/ioutil"
6 "log"
7 "os"
8)
9
10// Log is used for debugging purposes
11// since complete is running on tab completion, it is nice to
12// have logs to the stderr (when writing your own completer)
13// to write logs, set the COMP_DEBUG environment variable and
14// use complete.Log in the complete program
15var Log = getLogger()
16
17func getLogger() func(format string, args ...interface{}) {
18 var logfile io.Writer = ioutil.Discard
19 if os.Getenv(envDebug) != "" {
20 logfile = os.Stderr
21 }
22 return log.New(logfile, "complete ", log.Flags()).Printf
23}
diff --git a/vendor/github.com/posener/complete/match/file.go b/vendor/github.com/posener/complete/match/file.go
new file mode 100644
index 0000000..051171e
--- /dev/null
+++ b/vendor/github.com/posener/complete/match/file.go
@@ -0,0 +1,19 @@
1package match
2
3import "strings"
4
5// File returns true if prefix can match the file
6func File(file, prefix string) bool {
7 // special case for current directory completion
8 if file == "./" && (prefix == "." || prefix == "") {
9 return true
10 }
11 if prefix == "." && strings.HasPrefix(file, ".") {
12 return true
13 }
14
15 file = strings.TrimPrefix(file, "./")
16 prefix = strings.TrimPrefix(prefix, "./")
17
18 return strings.HasPrefix(file, prefix)
19}
diff --git a/vendor/github.com/posener/complete/match/match.go b/vendor/github.com/posener/complete/match/match.go
new file mode 100644
index 0000000..812fcac
--- /dev/null
+++ b/vendor/github.com/posener/complete/match/match.go
@@ -0,0 +1,6 @@
1package match
2
3// Match matches two strings
4// it is used for comparing a term to the last typed
5// word, the prefix, and see if it is a possible auto complete option.
6type Match func(term, prefix string) bool
diff --git a/vendor/github.com/posener/complete/match/prefix.go b/vendor/github.com/posener/complete/match/prefix.go
new file mode 100644
index 0000000..9a01ba6
--- /dev/null
+++ b/vendor/github.com/posener/complete/match/prefix.go
@@ -0,0 +1,9 @@
1package match
2
3import "strings"
4
5// Prefix is a simple Matcher, if the word is it's prefix, there is a match
6// Match returns true if a has the prefix as prefix
7func Prefix(long, prefix string) bool {
8 return strings.HasPrefix(long, prefix)
9}
diff --git a/vendor/github.com/posener/complete/metalinter.json b/vendor/github.com/posener/complete/metalinter.json
new file mode 100644
index 0000000..799c1d0
--- /dev/null
+++ b/vendor/github.com/posener/complete/metalinter.json
@@ -0,0 +1,21 @@
1{
2 "Vendor": true,
3 "DisableAll": true,
4 "Enable": [
5 "gofmt",
6 "goimports",
7 "interfacer",
8 "goconst",
9 "misspell",
10 "unconvert",
11 "gosimple",
12 "golint",
13 "structcheck",
14 "deadcode",
15 "vet"
16 ],
17 "Exclude": [
18 "initTests is unused"
19 ],
20 "Deadline": "2m"
21}
diff --git a/vendor/github.com/posener/complete/predict.go b/vendor/github.com/posener/complete/predict.go
new file mode 100644
index 0000000..8207063
--- /dev/null
+++ b/vendor/github.com/posener/complete/predict.go
@@ -0,0 +1,41 @@
1package complete
2
3// Predictor implements a predict method, in which given
4// command line arguments returns a list of options it predicts.
5type Predictor interface {
6 Predict(Args) []string
7}
8
9// PredictOr unions two predicate functions, so that the result predicate
10// returns the union of their predication
11func PredictOr(predictors ...Predictor) Predictor {
12 return PredictFunc(func(a Args) (prediction []string) {
13 for _, p := range predictors {
14 if p == nil {
15 continue
16 }
17 prediction = append(prediction, p.Predict(a)...)
18 }
19 return
20 })
21}
22
23// PredictFunc determines what terms can follow a command or a flag
24// It is used for auto completion, given last - the last word in the already
25// in the command line, what words can complete it.
26type PredictFunc func(Args) []string
27
28// Predict invokes the predict function and implements the Predictor interface
29func (p PredictFunc) Predict(a Args) []string {
30 if p == nil {
31 return nil
32 }
33 return p(a)
34}
35
36// PredictNothing does not expect anything after.
37var PredictNothing Predictor
38
39// PredictAnything expects something, but nothing particular, such as a number
40// or arbitrary name.
41var PredictAnything = PredictFunc(func(Args) []string { return nil })
diff --git a/vendor/github.com/posener/complete/predict_files.go b/vendor/github.com/posener/complete/predict_files.go
new file mode 100644
index 0000000..c8adf7e
--- /dev/null
+++ b/vendor/github.com/posener/complete/predict_files.go
@@ -0,0 +1,108 @@
1package complete
2
3import (
4 "io/ioutil"
5 "os"
6 "path/filepath"
7 "strings"
8
9 "github.com/posener/complete/match"
10)
11
12// PredictDirs will search for directories in the given started to be typed
13// path, if no path was started to be typed, it will complete to directories
14// in the current working directory.
15func PredictDirs(pattern string) Predictor {
16 return files(pattern, false)
17}
18
19// PredictFiles will search for files matching the given pattern in the started to
20// be typed path, if no path was started to be typed, it will complete to files that
21// match the pattern in the current working directory.
22// To match any file, use "*" as pattern. To match go files use "*.go", and so on.
23func PredictFiles(pattern string) Predictor {
24 return files(pattern, true)
25}
26
27func files(pattern string, allowFiles bool) PredictFunc {
28
29 // search for files according to arguments,
30 // if only one directory has matched the result, search recursively into
31 // this directory to give more results.
32 return func(a Args) (prediction []string) {
33 prediction = predictFiles(a, pattern, allowFiles)
34
35 // if the number of prediction is not 1, we either have many results or
36 // have no results, so we return it.
37 if len(prediction) != 1 {
38 return
39 }
40
41 // only try deeper, if the one item is a directory
42 if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() {
43 return
44 }
45
46 a.Last = prediction[0]
47 return predictFiles(a, pattern, allowFiles)
48 }
49}
50
51func predictFiles(a Args, pattern string, allowFiles bool) []string {
52 if strings.HasSuffix(a.Last, "/..") {
53 return nil
54 }
55
56 dir := a.Directory()
57 files := listFiles(dir, pattern, allowFiles)
58
59 // add dir if match
60 files = append(files, dir)
61
62 return PredictFilesSet(files).Predict(a)
63}
64
65// PredictFilesSet predict according to file rules to a given set of file names
66func PredictFilesSet(files []string) PredictFunc {
67 return func(a Args) (prediction []string) {
68 // add all matching files to prediction
69 for _, f := range files {
70 f = fixPathForm(a.Last, f)
71
72 // test matching of file to the argument
73 if match.File(f, a.Last) {
74 prediction = append(prediction, f)
75 }
76 }
77 return
78 }
79}
80
81func listFiles(dir, pattern string, allowFiles bool) []string {
82 // set of all file names
83 m := map[string]bool{}
84
85 // list files
86 if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil {
87 for _, f := range files {
88 if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles {
89 m[f] = true
90 }
91 }
92 }
93
94 // list directories
95 if dirs, err := ioutil.ReadDir(dir); err == nil {
96 for _, d := range dirs {
97 if d.IsDir() {
98 m[filepath.Join(dir, d.Name())] = true
99 }
100 }
101 }
102
103 list := make([]string, 0, len(m))
104 for k := range m {
105 list = append(list, k)
106 }
107 return list
108}
diff --git a/vendor/github.com/posener/complete/predict_set.go b/vendor/github.com/posener/complete/predict_set.go
new file mode 100644
index 0000000..fa4a34a
--- /dev/null
+++ b/vendor/github.com/posener/complete/predict_set.go
@@ -0,0 +1,12 @@
1package complete
2
3// PredictSet expects specific set of terms, given in the options argument.
4func PredictSet(options ...string) Predictor {
5 return predictSet(options)
6}
7
8type predictSet []string
9
10func (p predictSet) Predict(a Args) []string {
11 return p
12}
diff --git a/vendor/github.com/posener/complete/readme.md b/vendor/github.com/posener/complete/readme.md
new file mode 100644
index 0000000..74077e3
--- /dev/null
+++ b/vendor/github.com/posener/complete/readme.md
@@ -0,0 +1,116 @@
1# complete
2
3[![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete)
4[![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete)
5[![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete)
6[![Go Report Card](https://goreportcard.com/badge/github.com/posener/complete)](https://goreportcard.com/report/github.com/posener/complete)
7
8A tool for bash writing bash completion in go.
9
10Writing bash completion scripts is a hard work. This package provides an easy way
11to create bash completion scripts for any command, and also an easy way to install/uninstall
12the completion of the command.
13
14## go command bash completion
15
16In [gocomplete](./gocomplete) there is an example for bash completion for the `go` command line.
17
18This is an example that uses the `complete` package on the `go` command - the `complete` package
19can also be used to implement any completions, see [Usage](#usage).
20
21### Install
22
231. Type in your shell:
24```
25go get -u github.com/posener/complete/gocomplete
26gocomplete -install
27```
28
292. Restart your shell
30
31Uninstall by `gocomplete -uninstall`
32
33### Features
34
35- Complete `go` command, including sub commands and all flags.
36- Complete packages names or `.go` files when necessary.
37- Complete test names after `-run` flag.
38
39## complete package
40
41Supported shells:
42
43- [x] bash
44- [x] zsh
45
46### Usage
47
48Assuming you have program called `run` and you want to have bash completion
49for it, meaning, if you type `run` then space, then press the `Tab` key,
50the shell will suggest relevant complete options.
51
52In that case, we will create a program called `runcomplete`, a go program,
53with a `func main()` and so, that will make the completion of the `run`
54program. Once the `runcomplete` will be in a binary form, we could
55`runcomplete -install` and that will add to our shell all the bash completion
56options for `run`.
57
58So here it is:
59
60```go
61import "github.com/posener/complete"
62
63func main() {
64
65 // create a Command object, that represents the command we want
66 // to complete.
67 run := complete.Command{
68
69 // Sub defines a list of sub commands of the program,
70 // this is recursive, since every command is of type command also.
71 Sub: complete.Commands{
72
73 // add a build sub command
74 "build": complete.Command {
75
76 // define flags of the build sub command
77 Flags: complete.Flags{
78 // build sub command has a flag '-cpus', which
79 // expects number of cpus after it. in that case
80 // anything could complete this flag.
81 "-cpus": complete.PredictAnything,
82 },
83 },
84 },
85
86 // define flags of the 'run' main command
87 Flags: complete.Flags{
88 // a flag -o, which expects a file ending with .out after
89 // it, the tab completion will auto complete for files matching
90 // the given pattern.
91 "-o": complete.PredictFiles("*.out"),
92 },
93
94 // define global flags of the 'run' main command
95 // those will show up also when a sub command was entered in the
96 // command line
97 GlobalFlags: complete.Flags{
98
99 // a flag '-h' which does not expects anything after it
100 "-h": complete.PredictNothing,
101 },
102 }
103
104 // run the command completion, as part of the main() function.
105 // this triggers the autocompletion when needed.
106 // name must be exactly as the binary that we want to complete.
107 complete.New("run", run).Run()
108}
109```
110
111### Self completing program
112
113In case that the program that we want to complete is written in go we
114can make it self completing.
115
116Here is an [example](./example/self/main.go)
diff --git a/vendor/github.com/posener/complete/test.sh b/vendor/github.com/posener/complete/test.sh
new file mode 100644
index 0000000..56bfcf1
--- /dev/null
+++ b/vendor/github.com/posener/complete/test.sh
@@ -0,0 +1,12 @@
1#!/usr/bin/env bash
2
3set -e
4echo "" > coverage.txt
5
6for d in $(go list ./... | grep -v vendor); do
7 go test -v -race -coverprofile=profile.out -covermode=atomic $d
8 if [ -f profile.out ]; then
9 cat profile.out >> coverage.txt
10 rm profile.out
11 fi
12done \ No newline at end of file
diff --git a/vendor/github.com/posener/complete/utils.go b/vendor/github.com/posener/complete/utils.go
new file mode 100644
index 0000000..58b8b79
--- /dev/null
+++ b/vendor/github.com/posener/complete/utils.go
@@ -0,0 +1,46 @@
1package complete
2
3import (
4 "os"
5 "path/filepath"
6 "strings"
7)
8
9// fixPathForm changes a file name to a relative name
10func fixPathForm(last string, file string) string {
11 // get wording directory for relative name
12 workDir, err := os.Getwd()
13 if err != nil {
14 return file
15 }
16
17 abs, err := filepath.Abs(file)
18 if err != nil {
19 return file
20 }
21
22 // if last is absolute, return path as absolute
23 if filepath.IsAbs(last) {
24 return fixDirPath(abs)
25 }
26
27 rel, err := filepath.Rel(workDir, abs)
28 if err != nil {
29 return file
30 }
31
32 // fix ./ prefix of path
33 if rel != "." && strings.HasPrefix(last, ".") {
34 rel = "./" + rel
35 }
36
37 return fixDirPath(rel)
38}
39
40func fixDirPath(path string) string {
41 info, err := os.Stat(path)
42 if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") {
43 path += "/"
44 }
45 return path
46}
diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml
deleted file mode 100644
index bf90ad5..0000000
--- a/vendor/github.com/satori/go.uuid/.travis.yml
+++ /dev/null
@@ -1,21 +0,0 @@
1language: go
2sudo: false
3go:
4 - 1.2
5 - 1.3
6 - 1.4
7 - 1.5
8 - 1.6
9 - 1.7
10 - tip
11matrix:
12 allow_failures:
13 - go: tip
14 fast_finish: true
15before_install:
16 - go get github.com/mattn/goveralls
17 - go get golang.org/x/tools/cmd/cover
18script:
19 - $HOME/gopath/bin/goveralls -service=travis-ci
20notifications:
21 email: false
diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE
deleted file mode 100644
index 488357b..0000000
--- a/vendor/github.com/satori/go.uuid/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
1Copyright (C) 2013-2016 by Maxim Bublis <b@codemonkey.ru>
2
3Permission is hereby granted, free of charge, to any person obtaining
4a copy of this software and associated documentation files (the
5"Software"), to deal in the Software without restriction, including
6without limitation the rights to use, copy, modify, merge, publish,
7distribute, sublicense, and/or sell copies of the Software, and to
8permit persons to whom the Software is furnished to do so, subject to
9the following conditions:
10
11The above copyright notice and this permission notice shall be
12included in all copies or substantial portions of the Software.
13
14THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md
deleted file mode 100644
index b6aad1c..0000000
--- a/vendor/github.com/satori/go.uuid/README.md
+++ /dev/null
@@ -1,65 +0,0 @@
1# UUID package for Go language
2
3[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid)
4[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid)
5[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid)
6
7This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs.
8
9With 100% test coverage and benchmarks out of box.
10
11Supported versions:
12* Version 1, based on timestamp and MAC address (RFC 4122)
13* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1)
14* Version 3, based on MD5 hashing (RFC 4122)
15* Version 4, based on random numbers (RFC 4122)
16* Version 5, based on SHA-1 hashing (RFC 4122)
17
18## Installation
19
20Use the `go` command:
21
22 $ go get github.com/satori/go.uuid
23
24## Requirements
25
26UUID package requires Go >= 1.2.
27
28## Example
29
30```go
31package main
32
33import (
34 "fmt"
35 "github.com/satori/go.uuid"
36)
37
38func main() {
39 // Creating UUID Version 4
40 u1 := uuid.NewV4()
41 fmt.Printf("UUIDv4: %s\n", u1)
42
43 // Parsing UUID from string input
44 u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
45 if err != nil {
46 fmt.Printf("Something gone wrong: %s", err)
47 }
48 fmt.Printf("Successfully parsed: %s", u2)
49}
50```
51
52## Documentation
53
54[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project.
55
56## Links
57* [RFC 4122](http://tools.ietf.org/html/rfc4122)
58* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
59
60## Copyright
61
62Copyright (C) 2013-2016 by Maxim Bublis <b@codemonkey.ru>.
63
64UUID package released under MIT License.
65See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go
deleted file mode 100644
index 295f3fc..0000000
--- a/vendor/github.com/satori/go.uuid/uuid.go
+++ /dev/null
@@ -1,481 +0,0 @@
1// Copyright (C) 2013-2015 by Maxim Bublis <b@codemonkey.ru>
2//
3// Permission is hereby granted, free of charge, to any person obtaining
4// a copy of this software and associated documentation files (the
5// "Software"), to deal in the Software without restriction, including
6// without limitation the rights to use, copy, modify, merge, publish,
7// distribute, sublicense, and/or sell copies of the Software, and to
8// permit persons to whom the Software is furnished to do so, subject to
9// the following conditions:
10//
11// The above copyright notice and this permission notice shall be
12// included in all copies or substantial portions of the Software.
13//
14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
22// Package uuid provides implementation of Universally Unique Identifier (UUID).
23// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and
24// version 2 (as specified in DCE 1.1).
25package uuid
26
27import (
28 "bytes"
29 "crypto/md5"
30 "crypto/rand"
31 "crypto/sha1"
32 "database/sql/driver"
33 "encoding/binary"
34 "encoding/hex"
35 "fmt"
36 "hash"
37 "net"
38 "os"
39 "sync"
40 "time"
41)
42
43// UUID layout variants.
44const (
45 VariantNCS = iota
46 VariantRFC4122
47 VariantMicrosoft
48 VariantFuture
49)
50
51// UUID DCE domains.
52const (
53 DomainPerson = iota
54 DomainGroup
55 DomainOrg
56)
57
58// Difference in 100-nanosecond intervals between
59// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
60const epochStart = 122192928000000000
61
62// Used in string method conversion
63const dash byte = '-'
64
65// UUID v1/v2 storage.
66var (
67 storageMutex sync.Mutex
68 storageOnce sync.Once
69 epochFunc = unixTimeFunc
70 clockSequence uint16
71 lastTime uint64
72 hardwareAddr [6]byte
73 posixUID = uint32(os.Getuid())
74 posixGID = uint32(os.Getgid())
75)
76
77// String parse helpers.
78var (
79 urnPrefix = []byte("urn:uuid:")
80 byteGroups = []int{8, 4, 4, 4, 12}
81)
82
83func initClockSequence() {
84 buf := make([]byte, 2)
85 safeRandom(buf)
86 clockSequence = binary.BigEndian.Uint16(buf)
87}
88
89func initHardwareAddr() {
90 interfaces, err := net.Interfaces()
91 if err == nil {
92 for _, iface := range interfaces {
93 if len(iface.HardwareAddr) >= 6 {
94 copy(hardwareAddr[:], iface.HardwareAddr)
95 return
96 }
97 }
98 }
99
100 // Initialize hardwareAddr randomly in case
101 // of real network interfaces absence
102 safeRandom(hardwareAddr[:])
103
104 // Set multicast bit as recommended in RFC 4122
105 hardwareAddr[0] |= 0x01
106}
107
108func initStorage() {
109 initClockSequence()
110 initHardwareAddr()
111}
112
113func safeRandom(dest []byte) {
114 if _, err := rand.Read(dest); err != nil {
115 panic(err)
116 }
117}
118
119// Returns difference in 100-nanosecond intervals between
120// UUID epoch (October 15, 1582) and current time.
121// This is default epoch calculation function.
122func unixTimeFunc() uint64 {
123 return epochStart + uint64(time.Now().UnixNano()/100)
124}
125
126// UUID representation compliant with specification
127// described in RFC 4122.
128type UUID [16]byte
129
130// NullUUID can be used with the standard sql package to represent a
131// UUID value that can be NULL in the database
132type NullUUID struct {
133 UUID UUID
134 Valid bool
135}
136
137// The nil UUID is special form of UUID that is specified to have all
138// 128 bits set to zero.
139var Nil = UUID{}
140
141// Predefined namespace UUIDs.
142var (
143 NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
144 NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
145 NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
146 NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
147)
148
149// And returns result of binary AND of two UUIDs.
150func And(u1 UUID, u2 UUID) UUID {
151 u := UUID{}
152 for i := 0; i < 16; i++ {
153 u[i] = u1[i] & u2[i]
154 }
155 return u
156}
157
158// Or returns result of binary OR of two UUIDs.
159func Or(u1 UUID, u2 UUID) UUID {
160 u := UUID{}
161 for i := 0; i < 16; i++ {
162 u[i] = u1[i] | u2[i]
163 }
164 return u
165}
166
167// Equal returns true if u1 and u2 equals, otherwise returns false.
168func Equal(u1 UUID, u2 UUID) bool {
169 return bytes.Equal(u1[:], u2[:])
170}
171
172// Version returns algorithm version used to generate UUID.
173func (u UUID) Version() uint {
174 return uint(u[6] >> 4)
175}
176
177// Variant returns UUID layout variant.
178func (u UUID) Variant() uint {
179 switch {
180 case (u[8] & 0x80) == 0x00:
181 return VariantNCS
182 case (u[8]&0xc0)|0x80 == 0x80:
183 return VariantRFC4122
184 case (u[8]&0xe0)|0xc0 == 0xc0:
185 return VariantMicrosoft
186 }
187 return VariantFuture
188}
189
190// Bytes returns bytes slice representation of UUID.
191func (u UUID) Bytes() []byte {
192 return u[:]
193}
194
195// Returns canonical string representation of UUID:
196// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
197func (u UUID) String() string {
198 buf := make([]byte, 36)
199
200 hex.Encode(buf[0:8], u[0:4])
201 buf[8] = dash
202 hex.Encode(buf[9:13], u[4:6])
203 buf[13] = dash
204 hex.Encode(buf[14:18], u[6:8])
205 buf[18] = dash
206 hex.Encode(buf[19:23], u[8:10])
207 buf[23] = dash
208 hex.Encode(buf[24:], u[10:])
209
210 return string(buf)
211}
212
213// SetVersion sets version bits.
214func (u *UUID) SetVersion(v byte) {
215 u[6] = (u[6] & 0x0f) | (v << 4)
216}
217
218// SetVariant sets variant bits as described in RFC 4122.
219func (u *UUID) SetVariant() {
220 u[8] = (u[8] & 0xbf) | 0x80
221}
222
223// MarshalText implements the encoding.TextMarshaler interface.
224// The encoding is the same as returned by String.
225func (u UUID) MarshalText() (text []byte, err error) {
226 text = []byte(u.String())
227 return
228}
229
230// UnmarshalText implements the encoding.TextUnmarshaler interface.
231// Following formats are supported:
232// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
233// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
234// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
235func (u *UUID) UnmarshalText(text []byte) (err error) {
236 if len(text) < 32 {
237 err = fmt.Errorf("uuid: UUID string too short: %s", text)
238 return
239 }
240
241 t := text[:]
242 braced := false
243
244 if bytes.Equal(t[:9], urnPrefix) {
245 t = t[9:]
246 } else if t[0] == '{' {
247 braced = true
248 t = t[1:]
249 }
250
251 b := u[:]
252
253 for i, byteGroup := range byteGroups {
254 if i > 0 {
255 if t[0] != '-' {
256 err = fmt.Errorf("uuid: invalid string format")
257 return
258 }
259 t = t[1:]
260 }
261
262 if len(t) < byteGroup {
263 err = fmt.Errorf("uuid: UUID string too short: %s", text)
264 return
265 }
266
267 if i == 4 && len(t) > byteGroup &&
268 ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) {
269 err = fmt.Errorf("uuid: UUID string too long: %s", text)
270 return
271 }
272
273 _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup])
274 if err != nil {
275 return
276 }
277
278 t = t[byteGroup:]
279 b = b[byteGroup/2:]
280 }
281
282 return
283}
284
285// MarshalBinary implements the encoding.BinaryMarshaler interface.
286func (u UUID) MarshalBinary() (data []byte, err error) {
287 data = u.Bytes()
288 return
289}
290
291// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
292// It will return error if the slice isn't 16 bytes long.
293func (u *UUID) UnmarshalBinary(data []byte) (err error) {
294 if len(data) != 16 {
295 err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
296 return
297 }
298 copy(u[:], data)
299
300 return
301}
302
303// Value implements the driver.Valuer interface.
304func (u UUID) Value() (driver.Value, error) {
305 return u.String(), nil
306}
307
308// Scan implements the sql.Scanner interface.
309// A 16-byte slice is handled by UnmarshalBinary, while
310// a longer byte slice or a string is handled by UnmarshalText.
311func (u *UUID) Scan(src interface{}) error {
312 switch src := src.(type) {
313 case []byte:
314 if len(src) == 16 {
315 return u.UnmarshalBinary(src)
316 }
317 return u.UnmarshalText(src)
318
319 case string:
320 return u.UnmarshalText([]byte(src))
321 }
322
323 return fmt.Errorf("uuid: cannot convert %T to UUID", src)
324}
325
326// Value implements the driver.Valuer interface.
327func (u NullUUID) Value() (driver.Value, error) {
328 if !u.Valid {
329 return nil, nil
330 }
331 // Delegate to UUID Value function
332 return u.UUID.Value()
333}
334
335// Scan implements the sql.Scanner interface.
336func (u *NullUUID) Scan(src interface{}) error {
337 if src == nil {
338 u.UUID, u.Valid = Nil, false
339 return nil
340 }
341
342 // Delegate to UUID Scan function
343 u.Valid = true
344 return u.UUID.Scan(src)
345}
346
347// FromBytes returns UUID converted from raw byte slice input.
348// It will return error if the slice isn't 16 bytes long.
349func FromBytes(input []byte) (u UUID, err error) {
350 err = u.UnmarshalBinary(input)
351 return
352}
353
354// FromBytesOrNil returns UUID converted from raw byte slice input.
355// Same behavior as FromBytes, but returns a Nil UUID on error.
356func FromBytesOrNil(input []byte) UUID {
357 uuid, err := FromBytes(input)
358 if err != nil {
359 return Nil
360 }
361 return uuid
362}
363
364// FromString returns UUID parsed from string input.
365// Input is expected in a form accepted by UnmarshalText.
366func FromString(input string) (u UUID, err error) {
367 err = u.UnmarshalText([]byte(input))
368 return
369}
370
371// FromStringOrNil returns UUID parsed from string input.
372// Same behavior as FromString, but returns a Nil UUID on error.
373func FromStringOrNil(input string) UUID {
374 uuid, err := FromString(input)
375 if err != nil {
376 return Nil
377 }
378 return uuid
379}
380
381// Returns UUID v1/v2 storage state.
382// Returns epoch timestamp, clock sequence, and hardware address.
383func getStorage() (uint64, uint16, []byte) {
384 storageOnce.Do(initStorage)
385
386 storageMutex.Lock()
387 defer storageMutex.Unlock()
388
389 timeNow := epochFunc()
390 // Clock changed backwards since last UUID generation.
391 // Should increase clock sequence.
392 if timeNow <= lastTime {
393 clockSequence++
394 }
395 lastTime = timeNow
396
397 return timeNow, clockSequence, hardwareAddr[:]
398}
399
400// NewV1 returns UUID based on current timestamp and MAC address.
401func NewV1() UUID {
402 u := UUID{}
403
404 timeNow, clockSeq, hardwareAddr := getStorage()
405
406 binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
407 binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
408 binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
409 binary.BigEndian.PutUint16(u[8:], clockSeq)
410
411 copy(u[10:], hardwareAddr)
412
413 u.SetVersion(1)
414 u.SetVariant()
415
416 return u
417}
418
419// NewV2 returns DCE Security UUID based on POSIX UID/GID.
420func NewV2(domain byte) UUID {
421 u := UUID{}
422
423 timeNow, clockSeq, hardwareAddr := getStorage()
424
425 switch domain {
426 case DomainPerson:
427 binary.BigEndian.PutUint32(u[0:], posixUID)
428 case DomainGroup:
429 binary.BigEndian.PutUint32(u[0:], posixGID)
430 }
431
432 binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
433 binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
434 binary.BigEndian.PutUint16(u[8:], clockSeq)
435 u[9] = domain
436
437 copy(u[10:], hardwareAddr)
438
439 u.SetVersion(2)
440 u.SetVariant()
441
442 return u
443}
444
445// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
446func NewV3(ns UUID, name string) UUID {
447 u := newFromHash(md5.New(), ns, name)
448 u.SetVersion(3)
449 u.SetVariant()
450
451 return u
452}
453
454// NewV4 returns random generated UUID.
455func NewV4() UUID {
456 u := UUID{}
457 safeRandom(u[:])
458 u.SetVersion(4)
459 u.SetVariant()
460
461 return u
462}
463
464// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
465func NewV5(ns UUID, name string) UUID {
466 u := newFromHash(sha1.New(), ns, name)
467 u.SetVersion(5)
468 u.SetVariant()
469
470 return u
471}
472
473// Returns UUID based on hashing of namespace UUID and name.
474func newFromHash(h hash.Hash, ns UUID, name string) UUID {
475 u := UUID{}
476 h.Write(ns[:])
477 h.Write([]byte(name))
478 copy(u[:], h.Sum(nil))
479
480 return u
481}
diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore
new file mode 100644
index 0000000..e3c2fc2
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/.gitignore
@@ -0,0 +1,25 @@
1# .gitignore
2
3TODO.html
4README.html
5
6lzma/writer.txt
7lzma/reader.txt
8
9cmd/gxz/gxz
10cmd/xb/xb
11
12# test executables
13*.test
14
15# profile files
16*.out
17
18# vim swap file
19.*.swp
20
21# executables on windows
22*.exe
23
24# default compression test file
25enwik8*
diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE
new file mode 100644
index 0000000..58ebdc1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/LICENSE
@@ -0,0 +1,26 @@
1Copyright (c) 2014-2016 Ulrich Kunitz
2All rights reserved.
3
4Redistribution and use in source and binary forms, with or without
5modification, are permitted provided that the following conditions are met:
6
7* Redistributions of source code must retain the above copyright notice, this
8 list of conditions and the following disclaimer.
9
10* Redistributions in binary form must reproduce the above copyright notice,
11 this list of conditions and the following disclaimer in the documentation
12 and/or other materials provided with the distribution.
13
14* My name, Ulrich Kunitz, may not be used to endorse or promote products
15 derived from this software without specific prior written permission.
16
17THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
21FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md
new file mode 100644
index 0000000..969ae7a
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/README.md
@@ -0,0 +1,71 @@
1# Package xz
2
3This Go language package supports the reading and writing of xz
4compressed streams. It includes also a gxz command for compressing and
5decompressing data. The package is completely written in Go and doesn't
6have any dependency on any C code.
7
8The package is currently under development. There might be bugs and APIs
9are not considered stable. At this time the package cannot compete with
10the xz tool regarding compression speed and size. The algorithms there
11have been developed over a long time and are highly optimized. However
12there are a number of improvements planned and I'm very optimistic about
13parallel compression and decompression. Stay tuned!
14
15# Using the API
16
17The following example program shows how to use the API.
18
19 package main
20
21 import (
22 "bytes"
23 "io"
24 "log"
25 "os"
26
27 "github.com/ulikunitz/xz"
28 )
29
30 func main() {
31 const text = "The quick brown fox jumps over the lazy dog.\n"
32 var buf bytes.Buffer
33 // compress text
34 w, err := xz.NewWriter(&buf)
35 if err != nil {
36 log.Fatalf("xz.NewWriter error %s", err)
37 }
38 if _, err := io.WriteString(w, text); err != nil {
39 log.Fatalf("WriteString error %s", err)
40 }
41 if err := w.Close(); err != nil {
42 log.Fatalf("w.Close error %s", err)
43 }
44 // decompress buffer and write output to stdout
45 r, err := xz.NewReader(&buf)
46 if err != nil {
47 log.Fatalf("NewReader error %s", err)
48 }
49 if _, err = io.Copy(os.Stdout, r); err != nil {
50 log.Fatalf("io.Copy error %s", err)
51 }
52 }
53
54# Using the gxz compression tool
55
56The package includes a gxz command line utility for compression and
57decompression.
58
59Use following command for installation:
60
61 $ go get github.com/ulikunitz/xz/cmd/gxz
62
63To test it call the following command.
64
65 $ gxz bigfile
66
67After some time a much smaller file bigfile.xz will replace bigfile.
68To decompress it use the following command.
69
70 $ gxz -d bigfile.xz
71
diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md
new file mode 100644
index 0000000..7b34c0c
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/TODO.md
@@ -0,0 +1,315 @@
1# TODO list
2
3## Release v0.6
4
51. Review encoder and check for lzma improvements under xz.
62. Fix binary tree matcher.
73. Compare compression ratio with xz tool using comparable parameters
8 and optimize parameters
94. Do some optimizations
10 - rename operation action and make it a simple type of size 8
11 - make maxMatches, wordSize parameters
12 - stop searching after a certain length is found (parameter sweetLen)
13
14## Release v0.7
15
161. Optimize code
172. Do statistical analysis to get linear presets.
183. Test sync.Pool compatability for xz and lzma Writer and Reader
193. Fuzz optimized code.
20
21## Release v0.8
22
231. Support parallel go routines for writing and reading xz files.
242. Support a ReaderAt interface for xz files with small block sizes.
253. Improve compatibility between gxz and xz
264. Provide manual page for gxz
27
28## Release v0.9
29
301. Improve documentation
312. Fuzz again
32
33## Release v1.0
34
351. Full functioning gxz
362. Add godoc URL to README.md (godoc.org)
373. Resolve all issues.
384. Define release candidates.
395. Public announcement.
40
41## Package lzma
42
43### Release v0.6
44
45- Rewrite Encoder into a simple greedy one-op-at-a-time encoder
46 including
47 + simple scan at the dictionary head for the same byte
48 + use the killer byte (requiring matches to get longer, the first
49 test should be the byte that would make the match longer)
50
51
52## Optimizations
53
54- There may be a lot of false sharing in lzma.State; check whether this
55 can be improved by reorganizing the internal structure of it.
56- Check whether batching encoding and decoding improves speed.
57
58### DAG optimizations
59
60- Use full buffer to create minimal bit-length above range encoder.
61- Might be too slow (see v0.4)
62
63### Different match finders
64
65- hashes with 2, 3 characters additional to 4 characters
66- binary trees with 2-7 characters (uint64 as key, use uint32 as
67 pointers into a an array)
68- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers
69 into an array with bit-steeling for the colors)
70
71## Release Procedure
72
73- execute goch -l for all packages; probably with lower param like 0.5.
74- check orthography with gospell
75- Write release notes in doc/relnotes.
76- Update README.md
77- xb copyright . in xz directory to ensure all new files have Copyright
78 header
79- VERSION=<version> go generate github.com/ulikunitz/xz/... to update
80 version files
81- Execute test for Linux/amd64, Linux/x86 and Windows/amd64.
82- Update TODO.md - write short log entry
83- git checkout master && git merge dev
84- git tag -a <version>
85- git push
86
87## Log
88
89### 2017-06-05
90
91Release v0.5.4 fixes issues #15 of another problem with the padding size
92check for the xz block header. I removed the check completely.
93
94### 2017-02-15
95
96Release v0.5.3 fixes issue #12 regarding the decompression of an empty
97XZ stream. Many thanks to Tomasz Kłak, who reported the issue.
98
99### 2016-12-02
100
101Release v0.5.2 became necessary to allow the decoding of xz files with
1024-byte padding in the block header. Many thanks to Greg, who reported
103the issue.
104
105### 2016-07-23
106
107Release v0.5.1 became necessary to fix problems with 32-bit platforms.
108Many thanks to Bruno Brigas, who reported the issue.
109
110### 2016-07-04
111
112Release v0.5 provides improvements to the compressor and provides support for
113the decompression of xz files with multiple xz streams.
114
115### 2016-01-31
116
117Another compression rate increase by checking the byte at length of the
118best match first, before checking the whole prefix. This makes the
119compressor even faster. We have now a large time budget to beat the
120compression ratio of the xz tool. For enwik8 we have now over 40 seconds
121to reduce the compressed file size for another 7 MiB.
122
123### 2016-01-30
124
125I simplified the encoder. Speed and compression rate increased
126dramatically. A high compression rate affects also the decompression
127speed. The approach with the buffer and optimizing for operation
128compression rate has not been successful. Going for the maximum length
129appears to be the best approach.
130
131### 2016-01-28
132
133The release v0.4 is ready. It provides a working xz implementation,
134which is rather slow, but works and is interoperable with the xz tool.
135It is an important milestone.
136
137### 2016-01-10
138
139I have the first working implementation of an xz reader and writer. I'm
140happy about reaching this milestone.
141
142### 2015-12-02
143
144I'm now ready to implement xz because, I have a working LZMA2
145implementation. I decided today that v0.4 will use the slow encoder
146using the operations buffer to be able to go back, if I intend to do so.
147
148### 2015-10-21
149
150I have restarted the work on the library. While trying to implement
151LZMA2, I discovered that I need to resimplify the encoder and decoder
152functions. The option approach is too complicated. Using a limited byte
153writer and not caring for written bytes at all and not to try to handle
154uncompressed data simplifies the LZMA encoder and decoder much.
155Processing uncompressed data and handling limits is a feature of the
156LZMA2 format not of LZMA.
157
158I learned an interesting method from the LZO format. If the last copy is
159too far away they are moving the head one 2 bytes and not 1 byte to
160reduce processing times.
161
162### 2015-08-26
163
164I have now reimplemented the lzma package. The code is reasonably fast,
165but can still be optimized. The next step is to implement LZMA2 and then
166xz.
167
168### 2015-07-05
169
170Created release v0.3. The version is the foundation for a full xz
171implementation that is the target of v0.4.
172
173### 2015-06-11
174
175The gflag package has been developed because I couldn't use flag and
176pflag for a fully compatible support of gzip's and lzma's options. It
177seems to work now quite nicely.
178
179### 2015-06-05
180
181The overflow issue was interesting to research, however Henry S. Warren
182Jr. Hacker's Delight book was very helpful as usual and had the issue
183explained perfectly. Fefe's information on his website was based on the
184C FAQ and quite bad, because it didn't address the issue of -MININT ==
185MININT.
186
187### 2015-06-04
188
189It has been a productive day. I improved the interface of lzma.Reader
190and lzma.Writer and fixed the error handling.
191
192### 2015-06-01
193
194By computing the bit length of the LZMA operations I was able to
195improve the greedy algorithm implementation. By using an 8 MByte buffer
196the compression rate was not as good as for xz but already better then
197gzip default.
198
199Compression is currently slow, but this is something we will be able to
200improve over time.
201
202### 2015-05-26
203
204Checked the license of ogier/pflag. The binary lzmago binary should
205include the license terms for the pflag library.
206
207I added the endorsement clause as used by Google for the Go sources the
208LICENSE file.
209
210### 2015-05-22
211
212The package lzb contains now the basic implementation for creating or
213reading LZMA byte streams. It allows the support for the implementation
214of the DAG-shortest-path algorithm for the compression function.
215
216### 2015-04-23
217
218Completed yesterday the lzbase classes. I'm a little bit concerned that
219using the components may require too much code, but on the other hand
220there is a lot of flexibility.
221
222### 2015-04-22
223
224Implemented Reader and Writer during the Bayern game against Porto. The
225second half gave me enough time.
226
227### 2015-04-21
228
229While showering today morning I discovered that the design for OpEncoder
230and OpDecoder doesn't work, because encoding/decoding might depend on
231the current status of the dictionary. This is not exactly the right way
232to start the day.
233
234Therefore we need to keep the Reader and Writer design. This time around
235we simplify it by ignoring size limits. These can be added by wrappers
236around the Reader and Writer interfaces. The Parameters type isn't
237needed anymore.
238
239However I will implement a ReaderState and WriterState type to use
240static typing to ensure the right State object is combined with the
241right lzbase.Reader and lzbase.Writer.
242
243As a start I have implemented ReaderState and WriterState to ensure
244that the state for reading is only used by readers and WriterState only
245used by Writers.
246
247### 2015-04-20
248
249Today I implemented the OpDecoder and tested OpEncoder and OpDecoder.
250
251### 2015-04-08
252
253Came up with a new simplified design for lzbase. I implemented already
254the type State that replaces OpCodec.
255
256### 2015-04-06
257
258The new lzma package is now fully usable and lzmago is using it now. The
259old lzma package has been completely removed.
260
261### 2015-04-05
262
263Implemented lzma.Reader and tested it.
264
265### 2015-04-04
266
267Implemented baseReader by adapting code form lzma.Reader.
268
269### 2015-04-03
270
271The opCodec has been copied yesterday to lzma2. opCodec has a high
272number of dependencies on other files in lzma2. Therefore I had to copy
273almost all files from lzma.
274
275### 2015-03-31
276
277Removed only a TODO item.
278
279However in Francesco Campoy's presentation "Go for Javaneros
280(Javaïstes?)" is the the idea that using an embedded field E, all the
281methods of E will be defined on T. If E is an interface T satisfies E.
282
283https://talks.golang.org/2014/go4java.slide#51
284
285I have never used this, but it seems to be a cool idea.
286
287### 2015-03-30
288
289Finished the type writerDict and wrote a simple test.
290
291### 2015-03-25
292
293I started to implement the writerDict.
294
295### 2015-03-24
296
297After thinking long about the LZMA2 code and several false starts, I
298have now a plan to create a self-sufficient lzma2 package that supports
299the classic LZMA format as well as LZMA2. The core idea is to support a
300baseReader and baseWriter type that support the basic LZMA stream
301without any headers. Both types must support the reuse of dictionaries
302and the opCodec.
303
304### 2015-01-10
305
3061. Implemented simple lzmago tool
3072. Tested tool against large 4.4G file
308 - compression worked correctly; tested decompression with lzma
309 - decompression hits a full buffer condition
3103. Fixed a bug in the compressor and wrote a test for it
3114. Executed full cycle for 4.4 GB file; performance can be improved ;-)
312
313### 2015-01-11
314
315- Release v0.2 because of the working LZMA encoder and decoder
diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go
new file mode 100644
index 0000000..fadc1a5
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/bits.go
@@ -0,0 +1,74 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package xz
6
7import (
8 "errors"
9 "io"
10)
11
12// putUint32LE puts the little-endian representation of x into the first
13// four bytes of p.
14func putUint32LE(p []byte, x uint32) {
15 p[0] = byte(x)
16 p[1] = byte(x >> 8)
17 p[2] = byte(x >> 16)
18 p[3] = byte(x >> 24)
19}
20
21// putUint64LE puts the little-endian representation of x into the first
22// eight bytes of p.
23func putUint64LE(p []byte, x uint64) {
24 p[0] = byte(x)
25 p[1] = byte(x >> 8)
26 p[2] = byte(x >> 16)
27 p[3] = byte(x >> 24)
28 p[4] = byte(x >> 32)
29 p[5] = byte(x >> 40)
30 p[6] = byte(x >> 48)
31 p[7] = byte(x >> 56)
32}
33
34// uint32LE converts a little endian representation to an uint32 value.
35func uint32LE(p []byte) uint32 {
36 return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 |
37 uint32(p[3])<<24
38}
39
40// putUvarint puts a uvarint representation of x into the byte slice.
41func putUvarint(p []byte, x uint64) int {
42 i := 0
43 for x >= 0x80 {
44 p[i] = byte(x) | 0x80
45 x >>= 7
46 i++
47 }
48 p[i] = byte(x)
49 return i + 1
50}
51
52// errOverflow indicates an overflow of the 64-bit unsigned integer.
53var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer")
54
55// readUvarint reads a uvarint from the given byte reader.
56func readUvarint(r io.ByteReader) (x uint64, n int, err error) {
57 var s uint
58 i := 0
59 for {
60 b, err := r.ReadByte()
61 if err != nil {
62 return x, i, err
63 }
64 i++
65 if b < 0x80 {
66 if i > 10 || i == 10 && b > 1 {
67 return x, i, errOverflowU64
68 }
69 return x | uint64(b)<<s, i, nil
70 }
71 x |= uint64(b&0x7f) << s
72 s += 7
73 }
74}
diff --git a/vendor/github.com/ulikunitz/xz/crc.go b/vendor/github.com/ulikunitz/xz/crc.go
new file mode 100644
index 0000000..b44dca9
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/crc.go
@@ -0,0 +1,54 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package xz
6
7import (
8 "hash"
9 "hash/crc32"
10 "hash/crc64"
11)
12
13// crc32Hash implements the hash.Hash32 interface with Sum returning the
14// crc32 value in little-endian encoding.
15type crc32Hash struct {
16 hash.Hash32
17}
18
19// Sum returns the crc32 value as little endian.
20func (h crc32Hash) Sum(b []byte) []byte {
21 p := make([]byte, 4)
22 putUint32LE(p, h.Hash32.Sum32())
23 b = append(b, p...)
24 return b
25}
26
27// newCRC32 returns a CRC-32 hash that returns the 64-bit value in
28// little-endian encoding using the IEEE polynomial.
29func newCRC32() hash.Hash {
30 return crc32Hash{Hash32: crc32.NewIEEE()}
31}
32
33// crc64Hash implements the Hash64 interface with Sum returning the
34// CRC-64 value in little-endian encoding.
35type crc64Hash struct {
36 hash.Hash64
37}
38
39// Sum returns the CRC-64 value in little-endian encoding.
40func (h crc64Hash) Sum(b []byte) []byte {
41 p := make([]byte, 8)
42 putUint64LE(p, h.Hash64.Sum64())
43 b = append(b, p...)
44 return b
45}
46
47// crc64Table is used to create a CRC-64 hash.
48var crc64Table = crc64.MakeTable(crc64.ECMA)
49
50// newCRC64 returns a CRC-64 hash that returns the 64-bit value in
51// little-endian encoding using the ECMA polynomial.
52func newCRC64() hash.Hash {
53 return crc64Hash{Hash64: crc64.New(crc64Table)}
54}
diff --git a/vendor/github.com/ulikunitz/xz/example.go b/vendor/github.com/ulikunitz/xz/example.go
new file mode 100644
index 0000000..855e60a
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/example.go
@@ -0,0 +1,40 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// +build ignore
6
7package main
8
9import (
10 "bytes"
11 "io"
12 "log"
13 "os"
14
15 "github.com/ulikunitz/xz"
16)
17
18func main() {
19 const text = "The quick brown fox jumps over the lazy dog.\n"
20 var buf bytes.Buffer
21 // compress text
22 w, err := xz.NewWriter(&buf)
23 if err != nil {
24 log.Fatalf("xz.NewWriter error %s", err)
25 }
26 if _, err := io.WriteString(w, text); err != nil {
27 log.Fatalf("WriteString error %s", err)
28 }
29 if err := w.Close(); err != nil {
30 log.Fatalf("w.Close error %s", err)
31 }
32 // decompress buffer and write output to stdout
33 r, err := xz.NewReader(&buf)
34 if err != nil {
35 log.Fatalf("NewReader error %s", err)
36 }
37 if _, err = io.Copy(os.Stdout, r); err != nil {
38 log.Fatalf("io.Copy error %s", err)
39 }
40}
diff --git a/vendor/github.com/ulikunitz/xz/format.go b/vendor/github.com/ulikunitz/xz/format.go
new file mode 100644
index 0000000..798159c
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/format.go
@@ -0,0 +1,728 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package xz
6
7import (
8 "bytes"
9 "crypto/sha256"
10 "errors"
11 "fmt"
12 "hash"
13 "hash/crc32"
14 "io"
15
16 "github.com/ulikunitz/xz/lzma"
17)
18
19// allZeros checks whether a given byte slice has only zeros.
20func allZeros(p []byte) bool {
21 for _, c := range p {
22 if c != 0 {
23 return false
24 }
25 }
26 return true
27}
28
29// padLen returns the length of the padding required for the given
30// argument.
31func padLen(n int64) int {
32 k := int(n % 4)
33 if k > 0 {
34 k = 4 - k
35 }
36 return k
37}
38
39/*** Header ***/
40
41// headerMagic stores the magic bytes for the header
42var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00}
43
44// HeaderLen provides the length of the xz file header.
45const HeaderLen = 12
46
47// Constants for the checksum methods supported by xz.
48const (
49 CRC32 byte = 0x1
50 CRC64 = 0x4
51 SHA256 = 0xa
52)
53
54// errInvalidFlags indicates that flags are invalid.
55var errInvalidFlags = errors.New("xz: invalid flags")
56
57// verifyFlags returns the error errInvalidFlags if the value is
58// invalid.
59func verifyFlags(flags byte) error {
60 switch flags {
61 case CRC32, CRC64, SHA256:
62 return nil
63 default:
64 return errInvalidFlags
65 }
66}
67
68// flagstrings maps flag values to strings.
69var flagstrings = map[byte]string{
70 CRC32: "CRC-32",
71 CRC64: "CRC-64",
72 SHA256: "SHA-256",
73}
74
75// flagString returns the string representation for the given flags.
76func flagString(flags byte) string {
77 s, ok := flagstrings[flags]
78 if !ok {
79 return "invalid"
80 }
81 return s
82}
83
84// newHashFunc returns a function that creates hash instances for the
85// hash method encoded in flags.
86func newHashFunc(flags byte) (newHash func() hash.Hash, err error) {
87 switch flags {
88 case CRC32:
89 newHash = newCRC32
90 case CRC64:
91 newHash = newCRC64
92 case SHA256:
93 newHash = sha256.New
94 default:
95 err = errInvalidFlags
96 }
97 return
98}
99
100// header provides the actual content of the xz file header: the flags.
101type header struct {
102 flags byte
103}
104
105// Errors returned by readHeader.
106var errHeaderMagic = errors.New("xz: invalid header magic bytes")
107
108// ValidHeader checks whether data is a correct xz file header. The
109// length of data must be HeaderLen.
110func ValidHeader(data []byte) bool {
111 var h header
112 err := h.UnmarshalBinary(data)
113 return err == nil
114}
115
116// String returns a string representation of the flags.
117func (h header) String() string {
118 return flagString(h.flags)
119}
120
121// UnmarshalBinary reads header from the provided data slice.
122func (h *header) UnmarshalBinary(data []byte) error {
123 // header length
124 if len(data) != HeaderLen {
125 return errors.New("xz: wrong file header length")
126 }
127
128 // magic header
129 if !bytes.Equal(headerMagic, data[:6]) {
130 return errHeaderMagic
131 }
132
133 // checksum
134 crc := crc32.NewIEEE()
135 crc.Write(data[6:8])
136 if uint32LE(data[8:]) != crc.Sum32() {
137 return errors.New("xz: invalid checksum for file header")
138 }
139
140 // stream flags
141 if data[6] != 0 {
142 return errInvalidFlags
143 }
144 flags := data[7]
145 if err := verifyFlags(flags); err != nil {
146 return err
147 }
148
149 h.flags = flags
150 return nil
151}
152
153// MarshalBinary generates the xz file header.
154func (h *header) MarshalBinary() (data []byte, err error) {
155 if err = verifyFlags(h.flags); err != nil {
156 return nil, err
157 }
158
159 data = make([]byte, 12)
160 copy(data, headerMagic)
161 data[7] = h.flags
162
163 crc := crc32.NewIEEE()
164 crc.Write(data[6:8])
165 putUint32LE(data[8:], crc.Sum32())
166
167 return data, nil
168}
169
170/*** Footer ***/
171
172// footerLen defines the length of the footer.
173const footerLen = 12
174
175// footerMagic contains the footer magic bytes.
176var footerMagic = []byte{'Y', 'Z'}
177
178// footer represents the content of the xz file footer.
179type footer struct {
180 indexSize int64
181 flags byte
182}
183
184// String prints a string representation of the footer structure.
185func (f footer) String() string {
186 return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize)
187}
188
189// Minimum and maximum for the size of the index (backward size).
190const (
191 minIndexSize = 4
192 maxIndexSize = (1 << 32) * 4
193)
194
195// MarshalBinary converts footer values into an xz file footer. Note
196// that the footer value is checked for correctness.
197func (f *footer) MarshalBinary() (data []byte, err error) {
198 if err = verifyFlags(f.flags); err != nil {
199 return nil, err
200 }
201 if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) {
202 return nil, errors.New("xz: index size out of range")
203 }
204 if f.indexSize%4 != 0 {
205 return nil, errors.New(
206 "xz: index size not aligned to four bytes")
207 }
208
209 data = make([]byte, footerLen)
210
211 // backward size (index size)
212 s := (f.indexSize / 4) - 1
213 putUint32LE(data[4:], uint32(s))
214 // flags
215 data[9] = f.flags
216 // footer magic
217 copy(data[10:], footerMagic)
218
219 // CRC-32
220 crc := crc32.NewIEEE()
221 crc.Write(data[4:10])
222 putUint32LE(data, crc.Sum32())
223
224 return data, nil
225}
226
227// UnmarshalBinary sets the footer value by unmarshalling an xz file
228// footer.
229func (f *footer) UnmarshalBinary(data []byte) error {
230 if len(data) != footerLen {
231 return errors.New("xz: wrong footer length")
232 }
233
234 // magic bytes
235 if !bytes.Equal(data[10:], footerMagic) {
236 return errors.New("xz: footer magic invalid")
237 }
238
239 // CRC-32
240 crc := crc32.NewIEEE()
241 crc.Write(data[4:10])
242 if uint32LE(data) != crc.Sum32() {
243 return errors.New("xz: footer checksum error")
244 }
245
246 var g footer
247 // backward size (index size)
248 g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4
249
250 // flags
251 if data[8] != 0 {
252 return errInvalidFlags
253 }
254 g.flags = data[9]
255 if err := verifyFlags(g.flags); err != nil {
256 return err
257 }
258
259 *f = g
260 return nil
261}
262
263/*** Block Header ***/
264
265// blockHeader represents the content of an xz block header.
266type blockHeader struct {
267 compressedSize int64
268 uncompressedSize int64
269 filters []filter
270}
271
272// String converts the block header into a string.
273func (h blockHeader) String() string {
274 var buf bytes.Buffer
275 first := true
276 if h.compressedSize >= 0 {
277 fmt.Fprintf(&buf, "compressed size %d", h.compressedSize)
278 first = false
279 }
280 if h.uncompressedSize >= 0 {
281 if !first {
282 buf.WriteString(" ")
283 }
284 fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize)
285 first = false
286 }
287 for _, f := range h.filters {
288 if !first {
289 buf.WriteString(" ")
290 }
291 fmt.Fprintf(&buf, "filter %s", f)
292 first = false
293 }
294 return buf.String()
295}
296
297// Masks for the block flags.
298const (
299 filterCountMask = 0x03
300 compressedSizePresent = 0x40
301 uncompressedSizePresent = 0x80
302 reservedBlockFlags = 0x3C
303)
304
305// errIndexIndicator signals that an index indicator (0x00) has been found
306// instead of an expected block header indicator.
307var errIndexIndicator = errors.New("xz: found index indicator")
308
309// readBlockHeader reads the block header.
310func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) {
311 var buf bytes.Buffer
312 buf.Grow(20)
313
314 // block header size
315 z, err := io.CopyN(&buf, r, 1)
316 n = int(z)
317 if err != nil {
318 return nil, n, err
319 }
320 s := buf.Bytes()[0]
321 if s == 0 {
322 return nil, n, errIndexIndicator
323 }
324
325 // read complete header
326 headerLen := (int(s) + 1) * 4
327 buf.Grow(headerLen - 1)
328 z, err = io.CopyN(&buf, r, int64(headerLen-1))
329 n += int(z)
330 if err != nil {
331 return nil, n, err
332 }
333
334 // unmarshal block header
335 h = new(blockHeader)
336 if err = h.UnmarshalBinary(buf.Bytes()); err != nil {
337 return nil, n, err
338 }
339
340 return h, n, nil
341}
342
343// readSizeInBlockHeader reads the uncompressed or compressed size
344// fields in the block header. The present value informs the function
345// whether the respective field is actually present in the header.
346func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) {
347 if !present {
348 return -1, nil
349 }
350 x, _, err := readUvarint(r)
351 if err != nil {
352 return 0, err
353 }
354 if x >= 1<<63 {
355 return 0, errors.New("xz: size overflow in block header")
356 }
357 return int64(x), nil
358}
359
360// UnmarshalBinary unmarshals the block header.
361func (h *blockHeader) UnmarshalBinary(data []byte) error {
362 // Check header length
363 s := data[0]
364 if data[0] == 0 {
365 return errIndexIndicator
366 }
367 headerLen := (int(s) + 1) * 4
368 if len(data) != headerLen {
369 return fmt.Errorf("xz: data length %d; want %d", len(data),
370 headerLen)
371 }
372 n := headerLen - 4
373
374 // Check CRC-32
375 crc := crc32.NewIEEE()
376 crc.Write(data[:n])
377 if crc.Sum32() != uint32LE(data[n:]) {
378 return errors.New("xz: checksum error for block header")
379 }
380
381 // Block header flags
382 flags := data[1]
383 if flags&reservedBlockFlags != 0 {
384 return errors.New("xz: reserved block header flags set")
385 }
386
387 r := bytes.NewReader(data[2:n])
388
389 // Compressed size
390 var err error
391 h.compressedSize, err = readSizeInBlockHeader(
392 r, flags&compressedSizePresent != 0)
393 if err != nil {
394 return err
395 }
396
397 // Uncompressed size
398 h.uncompressedSize, err = readSizeInBlockHeader(
399 r, flags&uncompressedSizePresent != 0)
400 if err != nil {
401 return err
402 }
403
404 h.filters, err = readFilters(r, int(flags&filterCountMask)+1)
405 if err != nil {
406 return err
407 }
408
409 // Check padding
410 // Since headerLen is a multiple of 4 we don't need to check
411 // alignment.
412 k := r.Len()
413 // The standard spec says that the padding should have not more
414 // than 3 bytes. However we found paddings of 4 or 5 in the
415 // wild. See https://github.com/ulikunitz/xz/pull/11 and
416 // https://github.com/ulikunitz/xz/issues/15
417 //
418 // The only reasonable approach seems to be to ignore the
419 // padding size. We still check that all padding bytes are zero.
420 if !allZeros(data[n-k : n]) {
421 return errPadding
422 }
423 return nil
424}
425
426// MarshalBinary marshals the binary header.
427func (h *blockHeader) MarshalBinary() (data []byte, err error) {
428 if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) {
429 return nil, errors.New("xz: filter count wrong")
430 }
431 for i, f := range h.filters {
432 if i < len(h.filters)-1 {
433 if f.id() == lzmaFilterID {
434 return nil, errors.New(
435 "xz: LZMA2 filter is not the last")
436 }
437 } else {
438 // last filter
439 if f.id() != lzmaFilterID {
440 return nil, errors.New("xz: " +
441 "last filter must be the LZMA2 filter")
442 }
443 }
444 }
445
446 var buf bytes.Buffer
447 // header size must set at the end
448 buf.WriteByte(0)
449
450 // flags
451 flags := byte(len(h.filters) - 1)
452 if h.compressedSize >= 0 {
453 flags |= compressedSizePresent
454 }
455 if h.uncompressedSize >= 0 {
456 flags |= uncompressedSizePresent
457 }
458 buf.WriteByte(flags)
459
460 p := make([]byte, 10)
461 if h.compressedSize >= 0 {
462 k := putUvarint(p, uint64(h.compressedSize))
463 buf.Write(p[:k])
464 }
465 if h.uncompressedSize >= 0 {
466 k := putUvarint(p, uint64(h.uncompressedSize))
467 buf.Write(p[:k])
468 }
469
470 for _, f := range h.filters {
471 fp, err := f.MarshalBinary()
472 if err != nil {
473 return nil, err
474 }
475 buf.Write(fp)
476 }
477
478 // padding
479 for i := padLen(int64(buf.Len())); i > 0; i-- {
480 buf.WriteByte(0)
481 }
482
483 // crc place holder
484 buf.Write(p[:4])
485
486 data = buf.Bytes()
487 if len(data)%4 != 0 {
488 panic("data length not aligned")
489 }
490 s := len(data)/4 - 1
491 if !(1 < s && s <= 255) {
492 panic("wrong block header size")
493 }
494 data[0] = byte(s)
495
496 crc := crc32.NewIEEE()
497 crc.Write(data[:len(data)-4])
498 putUint32LE(data[len(data)-4:], crc.Sum32())
499
500 return data, nil
501}
502
503// Constants used for marshalling and unmarshalling filters in the xz
504// block header.
505const (
506 minFilters = 1
507 maxFilters = 4
508 minReservedID = 1 << 62
509)
510
511// filter represents a filter in the block header.
512type filter interface {
513 id() uint64
514 UnmarshalBinary(data []byte) error
515 MarshalBinary() (data []byte, err error)
516 reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error)
517 writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error)
518 // filter must be last filter
519 last() bool
520}
521
522// readFilter reads a block filter from the block header. At this point
523// in time only the LZMA2 filter is supported.
524func readFilter(r io.Reader) (f filter, err error) {
525 br := lzma.ByteReader(r)
526
527 // index
528 id, _, err := readUvarint(br)
529 if err != nil {
530 return nil, err
531 }
532
533 var data []byte
534 switch id {
535 case lzmaFilterID:
536 data = make([]byte, lzmaFilterLen)
537 data[0] = lzmaFilterID
538 if _, err = io.ReadFull(r, data[1:]); err != nil {
539 return nil, err
540 }
541 f = new(lzmaFilter)
542 default:
543 if id >= minReservedID {
544 return nil, errors.New(
545 "xz: reserved filter id in block stream header")
546 }
547 return nil, errors.New("xz: invalid filter id")
548 }
549 if err = f.UnmarshalBinary(data); err != nil {
550 return nil, err
551 }
552 return f, err
553}
554
555// readFilters reads count filters. At this point in time only the count
556// 1 is supported.
557func readFilters(r io.Reader, count int) (filters []filter, err error) {
558 if count != 1 {
559 return nil, errors.New("xz: unsupported filter count")
560 }
561 f, err := readFilter(r)
562 if err != nil {
563 return nil, err
564 }
565 return []filter{f}, err
566}
567
568// writeFilters writes the filters.
569func writeFilters(w io.Writer, filters []filter) (n int, err error) {
570 for _, f := range filters {
571 p, err := f.MarshalBinary()
572 if err != nil {
573 return n, err
574 }
575 k, err := w.Write(p)
576 n += k
577 if err != nil {
578 return n, err
579 }
580 }
581 return n, nil
582}
583
584/*** Index ***/
585
586// record describes a block in the xz file index.
587type record struct {
588 unpaddedSize int64
589 uncompressedSize int64
590}
591
592// readRecord reads an index record.
593func readRecord(r io.ByteReader) (rec record, n int, err error) {
594 u, k, err := readUvarint(r)
595 n += k
596 if err != nil {
597 return rec, n, err
598 }
599 rec.unpaddedSize = int64(u)
600 if rec.unpaddedSize < 0 {
601 return rec, n, errors.New("xz: unpadded size negative")
602 }
603
604 u, k, err = readUvarint(r)
605 n += k
606 if err != nil {
607 return rec, n, err
608 }
609 rec.uncompressedSize = int64(u)
610 if rec.uncompressedSize < 0 {
611 return rec, n, errors.New("xz: uncompressed size negative")
612 }
613
614 return rec, n, nil
615}
616
617// MarshalBinary converts an index record in its binary encoding.
618func (rec *record) MarshalBinary() (data []byte, err error) {
619 // maximum length of a uvarint is 10
620 p := make([]byte, 20)
621 n := putUvarint(p, uint64(rec.unpaddedSize))
622 n += putUvarint(p[n:], uint64(rec.uncompressedSize))
623 return p[:n], nil
624}
625
626// writeIndex writes the index, a sequence of records.
627func writeIndex(w io.Writer, index []record) (n int64, err error) {
628 crc := crc32.NewIEEE()
629 mw := io.MultiWriter(w, crc)
630
631 // index indicator
632 k, err := mw.Write([]byte{0})
633 n += int64(k)
634 if err != nil {
635 return n, err
636 }
637
638 // number of records
639 p := make([]byte, 10)
640 k = putUvarint(p, uint64(len(index)))
641 k, err = mw.Write(p[:k])
642 n += int64(k)
643 if err != nil {
644 return n, err
645 }
646
647 // list of records
648 for _, rec := range index {
649 p, err := rec.MarshalBinary()
650 if err != nil {
651 return n, err
652 }
653 k, err = mw.Write(p)
654 n += int64(k)
655 if err != nil {
656 return n, err
657 }
658 }
659
660 // index padding
661 k, err = mw.Write(make([]byte, padLen(int64(n))))
662 n += int64(k)
663 if err != nil {
664 return n, err
665 }
666
667 // crc32 checksum
668 putUint32LE(p, crc.Sum32())
669 k, err = w.Write(p[:4])
670 n += int64(k)
671
672 return n, err
673}
674
675// readIndexBody reads the index from the reader. It assumes that the
676// index indicator has already been read.
677func readIndexBody(r io.Reader) (records []record, n int64, err error) {
678 crc := crc32.NewIEEE()
679 // index indicator
680 crc.Write([]byte{0})
681
682 br := lzma.ByteReader(io.TeeReader(r, crc))
683
684 // number of records
685 u, k, err := readUvarint(br)
686 n += int64(k)
687 if err != nil {
688 return nil, n, err
689 }
690 recLen := int(u)
691 if recLen < 0 || uint64(recLen) != u {
692 return nil, n, errors.New("xz: record number overflow")
693 }
694
695 // list of records
696 records = make([]record, recLen)
697 for i := range records {
698 records[i], k, err = readRecord(br)
699 n += int64(k)
700 if err != nil {
701 return nil, n, err
702 }
703 }
704
705 p := make([]byte, padLen(int64(n+1)), 4)
706 k, err = io.ReadFull(br.(io.Reader), p)
707 n += int64(k)
708 if err != nil {
709 return nil, n, err
710 }
711 if !allZeros(p) {
712 return nil, n, errors.New("xz: non-zero byte in index padding")
713 }
714
715 // crc32
716 s := crc.Sum32()
717 p = p[:4]
718 k, err = io.ReadFull(br.(io.Reader), p)
719 n += int64(k)
720 if err != nil {
721 return records, n, err
722 }
723 if uint32LE(p) != s {
724 return nil, n, errors.New("xz: wrong checksum for index")
725 }
726
727 return records, n, nil
728}
diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz
new file mode 100644
index 0000000..4b820bd
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/fox.xz
Binary files differ
diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
new file mode 100644
index 0000000..a328878
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
@@ -0,0 +1,181 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package hash
6
7// CyclicPoly provides a cyclic polynomial rolling hash.
8type CyclicPoly struct {
9 h uint64
10 p []uint64
11 i int
12}
13
14// ror rotates the unsigned 64-bit integer to right. The argument s must be
15// less than 64.
16func ror(x uint64, s uint) uint64 {
17 return (x >> s) | (x << (64 - s))
18}
19
20// NewCyclicPoly creates a new instance of the CyclicPoly structure. The
21// argument n gives the number of bytes for which a hash will be executed.
22// This number must be positive; the method panics if this isn't the case.
23func NewCyclicPoly(n int) *CyclicPoly {
24 if n < 1 {
25 panic("argument n must be positive")
26 }
27 return &CyclicPoly{p: make([]uint64, 0, n)}
28}
29
30// Len returns the length of the byte sequence for which a hash is generated.
31func (r *CyclicPoly) Len() int {
32 return cap(r.p)
33}
34
35// RollByte hashes the next byte and returns a hash value. The complete becomes
36// available after at least Len() bytes have been hashed.
37func (r *CyclicPoly) RollByte(x byte) uint64 {
38 y := hash[x]
39 if len(r.p) < cap(r.p) {
40 r.h = ror(r.h, 1) ^ y
41 r.p = append(r.p, y)
42 } else {
43 r.h ^= ror(r.p[r.i], uint(cap(r.p)-1))
44 r.h = ror(r.h, 1) ^ y
45 r.p[r.i] = y
46 r.i = (r.i + 1) % cap(r.p)
47 }
48 return r.h
49}
50
51// Stores the hash for the individual bytes.
52var hash = [256]uint64{
53 0x2e4fc3f904065142, 0xc790984cfbc99527,
54 0x879f95eb8c62f187, 0x3b61be86b5021ef2,
55 0x65a896a04196f0a5, 0xc5b307b80470b59e,
56 0xd3bff376a70df14b, 0xc332f04f0b3f1701,
57 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53,
58 0x1906a10c2c1c0208, 0xfb0c712a03421c0d,
59 0x38be311a65c9552b, 0xfee7ee4ca6445c7e,
60 0x71aadeded184f21e, 0xd73426fccda23b2d,
61 0x29773fb5fb9600b5, 0xce410261cd32981a,
62 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c,
63 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc,
64 0xa5f10b3910482cea, 0x2945d59be02dfaad,
65 0x06ee334ff70571b5, 0xbabf9d8070f44380,
66 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7,
67 0x26183cb9f7b1664c, 0xea71dac7da068f21,
68 0xea92eca5bd1d0bb7, 0x415595862defcd75,
69 0x248a386023c60648, 0x9cf021ab284b3c8a,
70 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc,
71 0x73e799d139dc6975, 0x7b15ae312486363c,
72 0xb70e5454a2239c80, 0x208e3fb31d3b2263,
73 0x01f563cabb930f44, 0x2ac4533d2a3240d8,
74 0x84231ed1064f6f7c, 0xa9f020977c2a6d19,
75 0x213c227271c20122, 0x09fe8a9a0a03d07a,
76 0x4236dc75bcaf910c, 0x460a8b2bead8f17e,
77 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e,
78 0x70adb010543bea12, 0xcdae938f7ea6f579,
79 0x3f3d870208672f4d, 0x8e6ccbce9d349536,
80 0xe4c0871a389095ae, 0xf5f2a49152bca080,
81 0x9a43f9b97269934e, 0xc17b3753cb6f475c,
82 0xd56d941e8e206bd4, 0xac0a4f3e525eda00,
83 0xa06d5a011912a550, 0x5537ed19537ad1df,
84 0xa32fe713d611449d, 0x2a1d05b47c3b579f,
85 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0,
86 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97,
87 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44,
88 0x0b63d5d801708420, 0x8f227ca8f37ffaec,
89 0x0256278670887c24, 0x107e14877dbf540b,
90 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61,
91 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001,
92 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7,
93 0xd99264421147eb03, 0x535a2d6d38aefcfe,
94 0x6ba8b4454a916237, 0xfa39366eaae4719c,
95 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4,
96 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8,
97 0xd61c2503fe639144, 0x30ce625441eb92d3,
98 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5,
99 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf,
100 0xc7ea4872c96b83ae, 0x6dd5d376f4392382,
101 0x1be88681aaa9792f, 0xfef465ee1b6c10d9,
102 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9,
103 0x7808e902b3857d0b, 0x171c9c4ea4607972,
104 0x58d66274850146df, 0x42b311c10d3981d1,
105 0x647fa8c621c41a4c, 0xf472771c66ddfedc,
106 0x338d27e3f847b46b, 0x6402ce3da97545ce,
107 0x5162db616fc38638, 0x9c83be97bc22a50e,
108 0x2d3d7478a78d5e72, 0xe621a9b938fd5397,
109 0x9454614eb0f81c45, 0x395fb6e742ed39b6,
110 0x77dd9179d06037bf, 0xc478d0fee4d2656d,
111 0x35d9d6cb772007af, 0x83a56e92c883f0f6,
112 0x27937453250c00a1, 0x27bd6ebc3a46a97d,
113 0x9f543bf784342d51, 0xd158f38c48b0ed52,
114 0x8dd8537c045f66b4, 0x846a57230226f6d5,
115 0x6b13939e0c4e7cdf, 0xfca25425d8176758,
116 0x92e5fc6cd52788e6, 0x9992e13d7a739170,
117 0x518246f7a199e8ea, 0xf104c2a71b9979c7,
118 0x86b3ffaabea4768f, 0x6388061cf3e351ad,
119 0x09d9b5295de5bbb5, 0x38bf1638c2599e92,
120 0x1d759846499e148d, 0x4c0ff015e5f96ef4,
121 0xa41a94cfa270f565, 0x42d76f9cb2326c0b,
122 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a,
123 0x337523aabbe6cf8d, 0x646bb14001d42b12,
124 0xc178729d138adc74, 0xf900ef4491f24086,
125 0xee1a90d334bb5ac4, 0x9755c92247301a50,
126 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9,
127 0x0fa8084cf91ac6ff, 0x10d226cf136e6189,
128 0xd302057a07d4fb21, 0x5f03800e20a0fcc3,
129 0x80118d4ae46bd210, 0x58ab61a522843733,
130 0x51edd575c5432a4b, 0x94ee6ff67f9197f7,
131 0x765669e0e5e8157b, 0xa5347830737132f0,
132 0x3ba485a69f01510c, 0x0b247d7b957a01c3,
133 0x1b3d63449fd807dc, 0x0fdc4721c30ad743,
134 0x8b535ed3829b2b14, 0xee41d0cad65d232c,
135 0xe6a99ed97a6a982f, 0x65ac6194c202003d,
136 0x692accf3a70573eb, 0xcc3c02c3e200d5af,
137 0x0d419e8b325914a3, 0x320f160f42c25e40,
138 0x00710d647a51fe7a, 0x3c947692330aed60,
139 0x9288aa280d355a7a, 0xa1806a9b791d1696,
140 0x5d60e38496763da1, 0x6c69e22e613fd0f4,
141 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba,
142 0x460c17992cbaece1, 0xf7822c5444d3297f,
143 0x344a9790c69b74aa, 0xb80a42e6cae09dce,
144 0x1b1361eaf2b1e757, 0xd84c1e758e236f01,
145 0x88e0b7be347627cc, 0x45246009b7a99490,
146 0x8011c6dd3fe50472, 0xc341d682bffb99d7,
147 0x2511be93808e2d15, 0xd5bc13d7fd739840,
148 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157,
149 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0,
150 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc,
151 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e,
152 0xa559cce0d9199aac, 0xde39d47ef3723380,
153 0xe5b69d848ce42e35, 0xefa24296f8e79f52,
154 0x70190b59db9a5afc, 0x26f166cdb211e7bf,
155 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017,
156 0xb9059b05e9420d90, 0x2f0da855c9388754,
157 0x611d5e9ab77949cc, 0x2912038ac01163f4,
158 0x0231df50402b2fba, 0x45660fc4f3245f58,
159 0xb91cc97c7c8dac50, 0xb72d2aafe4953427,
160 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2,
161 0x1310e1c1a48d21c3, 0xad48a7810cdd8544,
162 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de,
163 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442,
164 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93,
165 0x9b37db9d0335a39c, 0x494b6f870f5cfebc,
166 0x6d1b3c1149dda943, 0x372c943a518c1093,
167 0xad27af45e77c09c4, 0x3b6f92b646044604,
168 0xac2917909f5fcf4f, 0x2069a60e977e5557,
169 0x353a469e71014de5, 0x24be356281f55c15,
170 0x2b6d710ba8e9adea, 0x404ad1751c749c29,
171 0xed7311bf23d7f185, 0xba4f6976b4acc43e,
172 0x32d7198d2bc39000, 0xee667019014d6e01,
173 0x494ef3e128d14c83, 0x1f95a152baecd6be,
174 0x201648dff1f483a5, 0x68c28550c8384af6,
175 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28,
176 0xd82bbd95e9b30909, 0x234f0d1694c53f6d,
177 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e,
178 0xf8f6b97f5585080a, 0x74236084be57b95b,
179 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b,
180 0x4378ffe93e1528c5, 0x94ca92a17118e2d2,
181}
diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go
new file mode 100644
index 0000000..f99ec22
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go
@@ -0,0 +1,14 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5/*
6Package hash provides rolling hashes.
7
8Rolling hashes have to be used for maintaining the positions of n-byte
9sequences in the dictionary buffer.
10
11The package provides currently the Rabin-Karp rolling hash and a Cyclic
12Polynomial hash. Both support the Hashes method to be used with an interface.
13*/
14package hash
diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
new file mode 100644
index 0000000..58635b1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
@@ -0,0 +1,66 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package hash
6
7// A is the default constant for Robin-Karp rolling hash. This is a random
8// prime.
9const A = 0x97b548add41d5da1
10
11// RabinKarp supports the computation of a rolling hash.
12type RabinKarp struct {
13 A uint64
14 // a^n
15 aOldest uint64
16 h uint64
17 p []byte
18 i int
19}
20
21// NewRabinKarp creates a new RabinKarp value. The argument n defines the
22// length of the byte sequence to be hashed. The default constant will will be
23// used.
24func NewRabinKarp(n int) *RabinKarp {
25 return NewRabinKarpConst(n, A)
26}
27
28// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the
29// length of the byte sequence to be hashed. The argument a provides the
30// constant used to compute the hash.
31func NewRabinKarpConst(n int, a uint64) *RabinKarp {
32 if n <= 0 {
33 panic("number of bytes n must be positive")
34 }
35 aOldest := uint64(1)
36 // There are faster methods. For the small n required by the LZMA
37 // compressor O(n) is sufficient.
38 for i := 0; i < n; i++ {
39 aOldest *= a
40 }
41 return &RabinKarp{
42 A: a, aOldest: aOldest,
43 p: make([]byte, 0, n),
44 }
45}
46
47// Len returns the length of the byte sequence.
48func (r *RabinKarp) Len() int {
49 return cap(r.p)
50}
51
52// RollByte computes the hash after x has been added.
53func (r *RabinKarp) RollByte(x byte) uint64 {
54 if len(r.p) < cap(r.p) {
55 r.h += uint64(x)
56 r.h *= r.A
57 r.p = append(r.p, x)
58 } else {
59 r.h -= uint64(r.p[r.i]) * r.aOldest
60 r.h += uint64(x)
61 r.h *= r.A
62 r.p[r.i] = x
63 r.i = (r.i + 1) % cap(r.p)
64 }
65 return r.h
66}
diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go
new file mode 100644
index 0000000..ab6a19c
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go
@@ -0,0 +1,29 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package hash
6
7// Roller provides an interface for rolling hashes. The hash value will become
8// valid after hash has been called Len times.
9type Roller interface {
10 Len() int
11 RollByte(x byte) uint64
12}
13
14// Hashes computes all hash values for the array p. Note that the state of the
15// roller is changed.
16func Hashes(r Roller, p []byte) []uint64 {
17 n := r.Len()
18 if len(p) < n {
19 return nil
20 }
21 h := make([]uint64, len(p)-n+1)
22 for i := 0; i < n-1; i++ {
23 r.RollByte(p[i])
24 }
25 for i := range h {
26 h[i] = r.RollByte(p[i+n-1])
27 }
28 return h
29}
diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
new file mode 100644
index 0000000..0ba45e8
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
@@ -0,0 +1,457 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package xlog provides a simple logging package that allows to disable
6// certain message categories. It defines a type, Logger, with multiple
7// methods for formatting output. The package has also a predefined
8// 'standard' Logger accessible through helper function Print[f|ln],
9// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln]
10// that are easier to use then creating a Logger manually. That logger
11// writes to standard error and prints the date and time of each logged
12// message, which can be configured using the function SetFlags.
13//
14// The Fatal functions call os.Exit(1) after the message is output
15// unless not suppressed by the flags. The Panic functions call panic
16// after the writing the log message unless suppressed.
17package xlog
18
19import (
20 "fmt"
21 "io"
22 "os"
23 "runtime"
24 "sync"
25 "time"
26)
27
28// The flags define what information is prefixed to each log entry
29// generated by the Logger. The Lno* versions allow the suppression of
30// specific output. The bits are or'ed together to control what will be
31// printed. There is no control over the order of the items printed and
32// the format. The full format is:
33//
34// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message
35//
36const (
37 Ldate = 1 << iota // the date: 2009-01-23
38 Ltime // the time: 01:23:23
39 Lmicroseconds // microsecond resolution: 01:23:23.123123
40 Llongfile // full file name and line number: /a/b/c/d.go:23
41 Lshortfile // final file name element and line number: d.go:23
42 Lnopanic // suppresses output from Panic[f|ln] but not the panic call
43 Lnofatal // suppresses output from Fatal[f|ln] but not the exit
44 Lnowarn // suppresses output from Warn[f|ln]
45 Lnoprint // suppresses output from Print[f|ln]
46 Lnodebug // suppresses output from Debug[f|ln]
47 // initial values for the standard logger
48 Lstdflags = Ldate | Ltime | Lnodebug
49)
50
51// A Logger represents an active logging object that generates lines of
52// output to an io.Writer. Each logging operation if not suppressed
53// makes a single call to the Writer's Write method. A Logger can be
54// used simultaneously from multiple goroutines; it guarantees to
55// serialize access to the Writer.
56type Logger struct {
57 mu sync.Mutex // ensures atomic writes; and protects the following
58 // fields
59 prefix string // prefix to write at beginning of each line
60 flag int // properties
61 out io.Writer // destination for output
62 buf []byte // for accumulating text to write
63}
64
65// New creates a new Logger. The out argument sets the destination to
66// which the log output will be written. The prefix appears at the
67// beginning of each log line. The flag argument defines the logging
68// properties.
69func New(out io.Writer, prefix string, flag int) *Logger {
70 return &Logger{out: out, prefix: prefix, flag: flag}
71}
72
73// std is the standard logger used by the package scope functions.
74var std = New(os.Stderr, "", Lstdflags)
75
76// itoa converts the integer to ASCII. A negative widths will avoid
77// zero-padding. The function supports only non-negative integers.
78func itoa(buf *[]byte, i int, wid int) {
79 var u = uint(i)
80 if u == 0 && wid <= 1 {
81 *buf = append(*buf, '0')
82 return
83 }
84 var b [32]byte
85 bp := len(b)
86 for ; u > 0 || wid > 0; u /= 10 {
87 bp--
88 wid--
89 b[bp] = byte(u%10) + '0'
90 }
91 *buf = append(*buf, b[bp:]...)
92}
93
94// formatHeader puts the header into the buf field of the buffer.
95func (l *Logger) formatHeader(t time.Time, file string, line int) {
96 l.buf = append(l.buf, l.prefix...)
97 if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {
98 if l.flag&Ldate != 0 {
99 year, month, day := t.Date()
100 itoa(&l.buf, year, 4)
101 l.buf = append(l.buf, '-')
102 itoa(&l.buf, int(month), 2)
103 l.buf = append(l.buf, '-')
104 itoa(&l.buf, day, 2)
105 l.buf = append(l.buf, ' ')
106 }
107 if l.flag&(Ltime|Lmicroseconds) != 0 {
108 hour, min, sec := t.Clock()
109 itoa(&l.buf, hour, 2)
110 l.buf = append(l.buf, ':')
111 itoa(&l.buf, min, 2)
112 l.buf = append(l.buf, ':')
113 itoa(&l.buf, sec, 2)
114 if l.flag&Lmicroseconds != 0 {
115 l.buf = append(l.buf, '.')
116 itoa(&l.buf, t.Nanosecond()/1e3, 6)
117 }
118 l.buf = append(l.buf, ' ')
119 }
120 }
121 if l.flag&(Lshortfile|Llongfile) != 0 {
122 if l.flag&Lshortfile != 0 {
123 short := file
124 for i := len(file) - 1; i > 0; i-- {
125 if file[i] == '/' {
126 short = file[i+1:]
127 break
128 }
129 }
130 file = short
131 }
132 l.buf = append(l.buf, file...)
133 l.buf = append(l.buf, ':')
134 itoa(&l.buf, line, -1)
135 l.buf = append(l.buf, ": "...)
136 }
137}
138
139func (l *Logger) output(calldepth int, now time.Time, s string) error {
140 var file string
141 var line int
142 if l.flag&(Lshortfile|Llongfile) != 0 {
143 l.mu.Unlock()
144 var ok bool
145 _, file, line, ok = runtime.Caller(calldepth)
146 if !ok {
147 file = "???"
148 line = 0
149 }
150 l.mu.Lock()
151 }
152 l.buf = l.buf[:0]
153 l.formatHeader(now, file, line)
154 l.buf = append(l.buf, s...)
155 if len(s) == 0 || s[len(s)-1] != '\n' {
156 l.buf = append(l.buf, '\n')
157 }
158 _, err := l.out.Write(l.buf)
159 return err
160}
161
162// Output writes the string s with the header controlled by the flags to
163// the l.out writer. A newline will be appended if s doesn't end in a
164// newline. Calldepth is used to recover the PC, although all current
165// calls of Output use the call depth 2. Access to the function is serialized.
166func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error {
167 now := time.Now()
168 l.mu.Lock()
169 defer l.mu.Unlock()
170 if l.flag&noflag != 0 {
171 return nil
172 }
173 s := fmt.Sprint(v...)
174 return l.output(calldepth+1, now, s)
175}
176
177// Outputf works like output but formats the output like Printf.
178func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error {
179 now := time.Now()
180 l.mu.Lock()
181 defer l.mu.Unlock()
182 if l.flag&noflag != 0 {
183 return nil
184 }
185 s := fmt.Sprintf(format, v...)
186 return l.output(calldepth+1, now, s)
187}
188
189// Outputln works like output but formats the output like Println.
190func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error {
191 now := time.Now()
192 l.mu.Lock()
193 defer l.mu.Unlock()
194 if l.flag&noflag != 0 {
195 return nil
196 }
197 s := fmt.Sprintln(v...)
198 return l.output(calldepth+1, now, s)
199}
200
201// Panic prints the message like Print and calls panic. The printing
202// might be suppressed by the flag Lnopanic.
203func (l *Logger) Panic(v ...interface{}) {
204 l.Output(2, Lnopanic, v...)
205 s := fmt.Sprint(v...)
206 panic(s)
207}
208
209// Panic prints the message like Print and calls panic. The printing
210// might be suppressed by the flag Lnopanic.
211func Panic(v ...interface{}) {
212 std.Output(2, Lnopanic, v...)
213 s := fmt.Sprint(v...)
214 panic(s)
215}
216
217// Panicf prints the message like Printf and calls panic. The printing
218// might be suppressed by the flag Lnopanic.
219func (l *Logger) Panicf(format string, v ...interface{}) {
220 l.Outputf(2, Lnopanic, format, v...)
221 s := fmt.Sprintf(format, v...)
222 panic(s)
223}
224
225// Panicf prints the message like Printf and calls panic. The printing
226// might be suppressed by the flag Lnopanic.
227func Panicf(format string, v ...interface{}) {
228 std.Outputf(2, Lnopanic, format, v...)
229 s := fmt.Sprintf(format, v...)
230 panic(s)
231}
232
233// Panicln prints the message like Println and calls panic. The printing
234// might be suppressed by the flag Lnopanic.
235func (l *Logger) Panicln(v ...interface{}) {
236 l.Outputln(2, Lnopanic, v...)
237 s := fmt.Sprintln(v...)
238 panic(s)
239}
240
241// Panicln prints the message like Println and calls panic. The printing
242// might be suppressed by the flag Lnopanic.
243func Panicln(v ...interface{}) {
244 std.Outputln(2, Lnopanic, v...)
245 s := fmt.Sprintln(v...)
246 panic(s)
247}
248
249// Fatal prints the message like Print and calls os.Exit(1). The
250// printing might be suppressed by the flag Lnofatal.
251func (l *Logger) Fatal(v ...interface{}) {
252 l.Output(2, Lnofatal, v...)
253 os.Exit(1)
254}
255
256// Fatal prints the message like Print and calls os.Exit(1). The
257// printing might be suppressed by the flag Lnofatal.
258func Fatal(v ...interface{}) {
259 std.Output(2, Lnofatal, v...)
260 os.Exit(1)
261}
262
263// Fatalf prints the message like Printf and calls os.Exit(1). The
264// printing might be suppressed by the flag Lnofatal.
265func (l *Logger) Fatalf(format string, v ...interface{}) {
266 l.Outputf(2, Lnofatal, format, v...)
267 os.Exit(1)
268}
269
270// Fatalf prints the message like Printf and calls os.Exit(1). The
271// printing might be suppressed by the flag Lnofatal.
272func Fatalf(format string, v ...interface{}) {
273 std.Outputf(2, Lnofatal, format, v...)
274 os.Exit(1)
275}
276
277// Fatalln prints the message like Println and calls os.Exit(1). The
278// printing might be suppressed by the flag Lnofatal.
279func (l *Logger) Fatalln(format string, v ...interface{}) {
280 l.Outputln(2, Lnofatal, v...)
281 os.Exit(1)
282}
283
284// Fatalln prints the message like Println and calls os.Exit(1). The
285// printing might be suppressed by the flag Lnofatal.
286func Fatalln(format string, v ...interface{}) {
287 std.Outputln(2, Lnofatal, v...)
288 os.Exit(1)
289}
290
291// Warn prints the message like Print. The printing might be suppressed
292// by the flag Lnowarn.
293func (l *Logger) Warn(v ...interface{}) {
294 l.Output(2, Lnowarn, v...)
295}
296
297// Warn prints the message like Print. The printing might be suppressed
298// by the flag Lnowarn.
299func Warn(v ...interface{}) {
300 std.Output(2, Lnowarn, v...)
301}
302
303// Warnf prints the message like Printf. The printing might be suppressed
304// by the flag Lnowarn.
305func (l *Logger) Warnf(format string, v ...interface{}) {
306 l.Outputf(2, Lnowarn, format, v...)
307}
308
309// Warnf prints the message like Printf. The printing might be suppressed
310// by the flag Lnowarn.
311func Warnf(format string, v ...interface{}) {
312 std.Outputf(2, Lnowarn, format, v...)
313}
314
315// Warnln prints the message like Println. The printing might be suppressed
316// by the flag Lnowarn.
317func (l *Logger) Warnln(v ...interface{}) {
318 l.Outputln(2, Lnowarn, v...)
319}
320
321// Warnln prints the message like Println. The printing might be suppressed
322// by the flag Lnowarn.
323func Warnln(v ...interface{}) {
324 std.Outputln(2, Lnowarn, v...)
325}
326
327// Print prints the message like fmt.Print. The printing might be suppressed
328// by the flag Lnoprint.
329func (l *Logger) Print(v ...interface{}) {
330 l.Output(2, Lnoprint, v...)
331}
332
333// Print prints the message like fmt.Print. The printing might be suppressed
334// by the flag Lnoprint.
335func Print(v ...interface{}) {
336 std.Output(2, Lnoprint, v...)
337}
338
339// Printf prints the message like fmt.Printf. The printing might be suppressed
340// by the flag Lnoprint.
341func (l *Logger) Printf(format string, v ...interface{}) {
342 l.Outputf(2, Lnoprint, format, v...)
343}
344
345// Printf prints the message like fmt.Printf. The printing might be suppressed
346// by the flag Lnoprint.
347func Printf(format string, v ...interface{}) {
348 std.Outputf(2, Lnoprint, format, v...)
349}
350
351// Println prints the message like fmt.Println. The printing might be
352// suppressed by the flag Lnoprint.
353func (l *Logger) Println(v ...interface{}) {
354 l.Outputln(2, Lnoprint, v...)
355}
356
357// Println prints the message like fmt.Println. The printing might be
358// suppressed by the flag Lnoprint.
359func Println(v ...interface{}) {
360 std.Outputln(2, Lnoprint, v...)
361}
362
363// Debug prints the message like Print. The printing might be suppressed
364// by the flag Lnodebug.
365func (l *Logger) Debug(v ...interface{}) {
366 l.Output(2, Lnodebug, v...)
367}
368
369// Debug prints the message like Print. The printing might be suppressed
370// by the flag Lnodebug.
371func Debug(v ...interface{}) {
372 std.Output(2, Lnodebug, v...)
373}
374
375// Debugf prints the message like Printf. The printing might be suppressed
376// by the flag Lnodebug.
377func (l *Logger) Debugf(format string, v ...interface{}) {
378 l.Outputf(2, Lnodebug, format, v...)
379}
380
381// Debugf prints the message like Printf. The printing might be suppressed
382// by the flag Lnodebug.
383func Debugf(format string, v ...interface{}) {
384 std.Outputf(2, Lnodebug, format, v...)
385}
386
387// Debugln prints the message like Println. The printing might be suppressed
388// by the flag Lnodebug.
389func (l *Logger) Debugln(v ...interface{}) {
390 l.Outputln(2, Lnodebug, v...)
391}
392
393// Debugln prints the message like Println. The printing might be suppressed
394// by the flag Lnodebug.
395func Debugln(v ...interface{}) {
396 std.Outputln(2, Lnodebug, v...)
397}
398
399// Flags returns the current flags used by the logger.
400func (l *Logger) Flags() int {
401 l.mu.Lock()
402 defer l.mu.Unlock()
403 return l.flag
404}
405
406// Flags returns the current flags used by the standard logger.
407func Flags() int {
408 return std.Flags()
409}
410
411// SetFlags sets the flags of the logger.
412func (l *Logger) SetFlags(flag int) {
413 l.mu.Lock()
414 defer l.mu.Unlock()
415 l.flag = flag
416}
417
418// SetFlags sets the flags for the standard logger.
419func SetFlags(flag int) {
420 std.SetFlags(flag)
421}
422
423// Prefix returns the prefix used by the logger.
424func (l *Logger) Prefix() string {
425 l.mu.Lock()
426 defer l.mu.Unlock()
427 return l.prefix
428}
429
430// Prefix returns the prefix used by the standard logger of the package.
431func Prefix() string {
432 return std.Prefix()
433}
434
435// SetPrefix sets the prefix for the logger.
436func (l *Logger) SetPrefix(prefix string) {
437 l.mu.Lock()
438 defer l.mu.Unlock()
439 l.prefix = prefix
440}
441
442// SetPrefix sets the prefix of the standard logger of the package.
443func SetPrefix(prefix string) {
444 std.SetPrefix(prefix)
445}
446
447// SetOutput sets the output of the logger.
448func (l *Logger) SetOutput(w io.Writer) {
449 l.mu.Lock()
450 defer l.mu.Unlock()
451 l.out = w
452}
453
454// SetOutput sets the output for the standard logger of the package.
455func SetOutput(w io.Writer) {
456 std.SetOutput(w)
457}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go
new file mode 100644
index 0000000..a781bd1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go
@@ -0,0 +1,523 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "bufio"
9 "errors"
10 "fmt"
11 "io"
12 "unicode"
13)
14
15// node represents a node in the binary tree.
16type node struct {
17 // x is the search value
18 x uint32
19 // p parent node
20 p uint32
21 // l left child
22 l uint32
23 // r right child
24 r uint32
25}
26
27// wordLen is the number of bytes represented by the v field of a node.
28const wordLen = 4
29
30// binTree supports the identification of the next operation based on a
31// binary tree.
32//
33// Nodes will be identified by their index into the ring buffer.
34type binTree struct {
35 dict *encoderDict
36 // ring buffer of nodes
37 node []node
38 // absolute offset of the entry for the next node. Position 4
39 // byte larger.
40 hoff int64
41 // front position in the node ring buffer
42 front uint32
43 // index of the root node
44 root uint32
45 // current x value
46 x uint32
47 // preallocated array
48 data []byte
49}
50
51// null represents the nonexistent index. We can't use zero because it
52// would always exist or we would need to decrease the index for each
53// reference.
54const null uint32 = 1<<32 - 1
55
56// newBinTree initializes the binTree structure. The capacity defines
57// the size of the buffer and defines the maximum distance for which
58// matches will be found.
59func newBinTree(capacity int) (t *binTree, err error) {
60 if capacity < 1 {
61 return nil, errors.New(
62 "newBinTree: capacity must be larger than zero")
63 }
64 if int64(capacity) >= int64(null) {
65 return nil, errors.New(
66 "newBinTree: capacity must less 2^{32}-1")
67 }
68 t = &binTree{
69 node: make([]node, capacity),
70 hoff: -int64(wordLen),
71 root: null,
72 data: make([]byte, maxMatchLen),
73 }
74 return t, nil
75}
76
77func (t *binTree) SetDict(d *encoderDict) { t.dict = d }
78
79// WriteByte writes a single byte into the binary tree.
80func (t *binTree) WriteByte(c byte) error {
81 t.x = (t.x << 8) | uint32(c)
82 t.hoff++
83 if t.hoff < 0 {
84 return nil
85 }
86 v := t.front
87 if int64(v) < t.hoff {
88 // We are overwriting old nodes stored in the tree.
89 t.remove(v)
90 }
91 t.node[v].x = t.x
92 t.add(v)
93 t.front++
94 if int64(t.front) >= int64(len(t.node)) {
95 t.front = 0
96 }
97 return nil
98}
99
100// Writes writes a sequence of bytes into the binTree structure.
101func (t *binTree) Write(p []byte) (n int, err error) {
102 for _, c := range p {
103 t.WriteByte(c)
104 }
105 return len(p), nil
106}
107
108// add puts the node v into the tree. The node must not be part of the
109// tree before.
110func (t *binTree) add(v uint32) {
111 vn := &t.node[v]
112 // Set left and right to null indices.
113 vn.l, vn.r = null, null
114 // If the binary tree is empty make v the root.
115 if t.root == null {
116 t.root = v
117 vn.p = null
118 return
119 }
120 x := vn.x
121 p := t.root
122 // Search for the right leave link and add the new node.
123 for {
124 pn := &t.node[p]
125 if x <= pn.x {
126 if pn.l == null {
127 pn.l = v
128 vn.p = p
129 return
130 }
131 p = pn.l
132 } else {
133 if pn.r == null {
134 pn.r = v
135 vn.p = p
136 return
137 }
138 p = pn.r
139 }
140 }
141}
142
143// parent returns the parent node index of v and the pointer to v value
144// in the parent.
145func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) {
146 if t.root == v {
147 return null, &t.root
148 }
149 p = t.node[v].p
150 if t.node[p].l == v {
151 ptr = &t.node[p].l
152 } else {
153 ptr = &t.node[p].r
154 }
155 return
156}
157
158// Remove node v.
159func (t *binTree) remove(v uint32) {
160 vn := &t.node[v]
161 p, ptr := t.parent(v)
162 l, r := vn.l, vn.r
163 if l == null {
164 // Move the right child up.
165 *ptr = r
166 if r != null {
167 t.node[r].p = p
168 }
169 return
170 }
171 if r == null {
172 // Move the left child up.
173 *ptr = l
174 t.node[l].p = p
175 return
176 }
177
178 // Search the in-order predecessor u.
179 un := &t.node[l]
180 ur := un.r
181 if ur == null {
182 // In order predecessor is l. Move it up.
183 un.r = r
184 t.node[r].p = l
185 un.p = p
186 *ptr = l
187 return
188 }
189 var u uint32
190 for {
191 // Look for the max value in the tree where l is root.
192 u = ur
193 ur = t.node[u].r
194 if ur == null {
195 break
196 }
197 }
198 // replace u with ul
199 un = &t.node[u]
200 ul := un.l
201 up := un.p
202 t.node[up].r = ul
203 if ul != null {
204 t.node[ul].p = up
205 }
206
207 // replace v by u
208 un.l, un.r = l, r
209 t.node[l].p = u
210 t.node[r].p = u
211 *ptr = u
212 un.p = p
213}
214
215// search looks for the node that have the value x or for the nodes that
216// brace it. The node highest in the tree with the value x will be
217// returned. All other nodes with the same value live in left subtree of
218// the returned node.
219func (t *binTree) search(v uint32, x uint32) (a, b uint32) {
220 a, b = null, null
221 if v == null {
222 return
223 }
224 for {
225 vn := &t.node[v]
226 if x <= vn.x {
227 if x == vn.x {
228 return v, v
229 }
230 b = v
231 if vn.l == null {
232 return
233 }
234 v = vn.l
235 } else {
236 a = v
237 if vn.r == null {
238 return
239 }
240 v = vn.r
241 }
242 }
243}
244
245// max returns the node with maximum value in the subtree with v as
246// root.
247func (t *binTree) max(v uint32) uint32 {
248 if v == null {
249 return null
250 }
251 for {
252 r := t.node[v].r
253 if r == null {
254 return v
255 }
256 v = r
257 }
258}
259
260// min returns the node with the minimum value in the subtree with v as
261// root.
262func (t *binTree) min(v uint32) uint32 {
263 if v == null {
264 return null
265 }
266 for {
267 l := t.node[v].l
268 if l == null {
269 return v
270 }
271 v = l
272 }
273}
274
275// pred returns the in-order predecessor of node v.
276func (t *binTree) pred(v uint32) uint32 {
277 if v == null {
278 return null
279 }
280 u := t.max(t.node[v].l)
281 if u != null {
282 return u
283 }
284 for {
285 p := t.node[v].p
286 if p == null {
287 return null
288 }
289 if t.node[p].r == v {
290 return p
291 }
292 v = p
293 }
294}
295
296// succ returns the in-order successor of node v.
297func (t *binTree) succ(v uint32) uint32 {
298 if v == null {
299 return null
300 }
301 u := t.min(t.node[v].r)
302 if u != null {
303 return u
304 }
305 for {
306 p := t.node[v].p
307 if p == null {
308 return null
309 }
310 if t.node[p].l == v {
311 return p
312 }
313 v = p
314 }
315}
316
317// xval converts the first four bytes of a into an 32-bit unsigned
318// integer in big-endian order.
319func xval(a []byte) uint32 {
320 var x uint32
321 switch len(a) {
322 default:
323 x |= uint32(a[3])
324 fallthrough
325 case 3:
326 x |= uint32(a[2]) << 8
327 fallthrough
328 case 2:
329 x |= uint32(a[1]) << 16
330 fallthrough
331 case 1:
332 x |= uint32(a[0]) << 24
333 case 0:
334 }
335 return x
336}
337
338// dumpX converts value x into a four-letter string.
339func dumpX(x uint32) string {
340 a := make([]byte, 4)
341 for i := 0; i < 4; i++ {
342 c := byte(x >> uint((3-i)*8))
343 if unicode.IsGraphic(rune(c)) {
344 a[i] = c
345 } else {
346 a[i] = '.'
347 }
348 }
349 return string(a)
350}
351
352// dumpNode writes a representation of the node v into the io.Writer.
353func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) {
354 if v == null {
355 return
356 }
357
358 vn := &t.node[v]
359
360 t.dumpNode(w, vn.r, indent+2)
361
362 for i := 0; i < indent; i++ {
363 fmt.Fprint(w, " ")
364 }
365 if vn.p == null {
366 fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x))
367 } else {
368 fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p)
369 }
370
371 t.dumpNode(w, vn.l, indent+2)
372}
373
374// dump prints a representation of the binary tree into the writer.
375func (t *binTree) dump(w io.Writer) error {
376 bw := bufio.NewWriter(w)
377 t.dumpNode(bw, t.root, 0)
378 return bw.Flush()
379}
380
381func (t *binTree) distance(v uint32) int {
382 dist := int(t.front) - int(v)
383 if dist <= 0 {
384 dist += len(t.node)
385 }
386 return dist
387}
388
389type matchParams struct {
390 rep [4]uint32
391 // length when match will be accepted
392 nAccept int
393 // nodes to check
394 check int
395 // finish if length get shorter
396 stopShorter bool
397}
398
399func (t *binTree) match(m match, distIter func() (int, bool), p matchParams,
400) (r match, checked int, accepted bool) {
401 buf := &t.dict.buf
402 for {
403 if checked >= p.check {
404 return m, checked, true
405 }
406 dist, ok := distIter()
407 if !ok {
408 return m, checked, false
409 }
410 checked++
411 if m.n > 0 {
412 i := buf.rear - dist + m.n - 1
413 if i < 0 {
414 i += len(buf.data)
415 } else if i >= len(buf.data) {
416 i -= len(buf.data)
417 }
418 if buf.data[i] != t.data[m.n-1] {
419 if p.stopShorter {
420 return m, checked, false
421 }
422 continue
423 }
424 }
425 n := buf.matchLen(dist, t.data)
426 switch n {
427 case 0:
428 if p.stopShorter {
429 return m, checked, false
430 }
431 continue
432 case 1:
433 if uint32(dist-minDistance) != p.rep[0] {
434 continue
435 }
436 }
437 if n < m.n || (n == m.n && int64(dist) >= m.distance) {
438 continue
439 }
440 m = match{int64(dist), n}
441 if n >= p.nAccept {
442 return m, checked, true
443 }
444 }
445}
446
447func (t *binTree) NextOp(rep [4]uint32) operation {
448 // retrieve maxMatchLen data
449 n, _ := t.dict.buf.Peek(t.data[:maxMatchLen])
450 if n == 0 {
451 panic("no data in buffer")
452 }
453 t.data = t.data[:n]
454
455 var (
456 m match
457 x, u, v uint32
458 iterPred, iterSucc func() (int, bool)
459 )
460 p := matchParams{
461 rep: rep,
462 nAccept: maxMatchLen,
463 check: 32,
464 }
465 i := 4
466 iterSmall := func() (dist int, ok bool) {
467 i--
468 if i <= 0 {
469 return 0, false
470 }
471 return i, true
472 }
473 m, checked, accepted := t.match(m, iterSmall, p)
474 if accepted {
475 goto end
476 }
477 p.check -= checked
478 x = xval(t.data)
479 u, v = t.search(t.root, x)
480 if u == v && len(t.data) == 4 {
481 iter := func() (dist int, ok bool) {
482 if u == null {
483 return 0, false
484 }
485 dist = t.distance(u)
486 u, v = t.search(t.node[u].l, x)
487 if u != v {
488 u = null
489 }
490 return dist, true
491 }
492 m, _, _ = t.match(m, iter, p)
493 goto end
494 }
495 p.stopShorter = true
496 iterSucc = func() (dist int, ok bool) {
497 if v == null {
498 return 0, false
499 }
500 dist = t.distance(v)
501 v = t.succ(v)
502 return dist, true
503 }
504 m, checked, accepted = t.match(m, iterSucc, p)
505 if accepted {
506 goto end
507 }
508 p.check -= checked
509 iterPred = func() (dist int, ok bool) {
510 if u == null {
511 return 0, false
512 }
513 dist = t.distance(u)
514 u = t.pred(u)
515 return dist, true
516 }
517 m, _, _ = t.match(m, iterPred, p)
518end:
519 if m.n == 0 {
520 return lit{t.data[0]}
521 }
522 return m
523}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go
new file mode 100644
index 0000000..e9bab01
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go
@@ -0,0 +1,45 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7/* Naming conventions follows the CodeReviewComments in the Go Wiki. */
8
9// ntz32Const is used by the functions NTZ and NLZ.
10const ntz32Const = 0x04d7651f
11
12// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé.
13// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26.
14var ntz32Table = [32]int8{
15 0, 1, 2, 24, 3, 19, 6, 25,
16 22, 4, 20, 10, 16, 7, 12, 26,
17 31, 23, 18, 5, 21, 9, 15, 11,
18 30, 17, 8, 14, 29, 13, 28, 27,
19}
20
21// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer.
22func ntz32(x uint32) int {
23 if x == 0 {
24 return 32
25 }
26 x = (x & -x) * ntz32Const
27 return int(ntz32Table[x>>27])
28}
29
30// nlz32 computes the number of leading zeros for an unsigned 32-bit integer.
31func nlz32(x uint32) int {
32 // Smear left most bit to the right
33 x |= x >> 1
34 x |= x >> 2
35 x |= x >> 4
36 x |= x >> 8
37 x |= x >> 16
38 // Use ntz mechanism to calculate nlz.
39 x++
40 if x == 0 {
41 return 0
42 }
43 x *= ntz32Const
44 return 32 - int(ntz32Table[x>>27])
45}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go
new file mode 100644
index 0000000..5350d81
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go
@@ -0,0 +1,39 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10)
11
12// breader provides the ReadByte function for a Reader. It doesn't read
13// more data from the reader than absolutely necessary.
14type breader struct {
15 io.Reader
16 // helper slice to save allocations
17 p []byte
18}
19
20// ByteReader converts an io.Reader into an io.ByteReader.
21func ByteReader(r io.Reader) io.ByteReader {
22 br, ok := r.(io.ByteReader)
23 if !ok {
24 return &breader{r, make([]byte, 1)}
25 }
26 return br
27}
28
29// ReadByte read byte function.
30func (r *breader) ReadByte() (c byte, err error) {
31 n, err := r.Reader.Read(r.p)
32 if n < 1 {
33 if err == nil {
34 err = errors.New("breader.ReadByte: no data")
35 }
36 return 0, err
37 }
38 return r.p[0], nil
39}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go
new file mode 100644
index 0000000..50e0b6d
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go
@@ -0,0 +1,171 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9)
10
11// buffer provides a circular buffer of bytes. If the front index equals
12// the rear index the buffer is empty. As a consequence front cannot be
13// equal rear for a full buffer. So a full buffer has a length that is
14// one byte less the the length of the data slice.
15type buffer struct {
16 data []byte
17 front int
18 rear int
19}
20
21// newBuffer creates a buffer with the given size.
22func newBuffer(size int) *buffer {
23 return &buffer{data: make([]byte, size+1)}
24}
25
26// Cap returns the capacity of the buffer.
27func (b *buffer) Cap() int {
28 return len(b.data) - 1
29}
30
31// Resets the buffer. The front and rear index are set to zero.
32func (b *buffer) Reset() {
33 b.front = 0
34 b.rear = 0
35}
36
37// Buffered returns the number of bytes buffered.
38func (b *buffer) Buffered() int {
39 delta := b.front - b.rear
40 if delta < 0 {
41 delta += len(b.data)
42 }
43 return delta
44}
45
46// Available returns the number of bytes available for writing.
47func (b *buffer) Available() int {
48 delta := b.rear - 1 - b.front
49 if delta < 0 {
50 delta += len(b.data)
51 }
52 return delta
53}
54
55// addIndex adds a non-negative integer to the index i and returns the
56// resulting index. The function takes care of wrapping the index as
57// well as potential overflow situations.
58func (b *buffer) addIndex(i int, n int) int {
59 // subtraction of len(b.data) prevents overflow
60 i += n - len(b.data)
61 if i < 0 {
62 i += len(b.data)
63 }
64 return i
65}
66
67// Read reads bytes from the buffer into p and returns the number of
68// bytes read. The function never returns an error but might return less
69// data than requested.
70func (b *buffer) Read(p []byte) (n int, err error) {
71 n, err = b.Peek(p)
72 b.rear = b.addIndex(b.rear, n)
73 return n, err
74}
75
76// Peek reads bytes from the buffer into p without changing the buffer.
77// Peek will never return an error but might return less data than
78// requested.
79func (b *buffer) Peek(p []byte) (n int, err error) {
80 m := b.Buffered()
81 n = len(p)
82 if m < n {
83 n = m
84 p = p[:n]
85 }
86 k := copy(p, b.data[b.rear:])
87 if k < n {
88 copy(p[k:], b.data)
89 }
90 return n, nil
91}
92
93// Discard skips the n next bytes to read from the buffer, returning the
94// bytes discarded.
95//
96// If Discards skips fewer than n bytes, it returns an error.
97func (b *buffer) Discard(n int) (discarded int, err error) {
98 if n < 0 {
99 return 0, errors.New("buffer.Discard: negative argument")
100 }
101 m := b.Buffered()
102 if m < n {
103 n = m
104 err = errors.New(
105 "buffer.Discard: discarded less bytes then requested")
106 }
107 b.rear = b.addIndex(b.rear, n)
108 return n, err
109}
110
111// ErrNoSpace indicates that there is insufficient space for the Write
112// operation.
113var ErrNoSpace = errors.New("insufficient space")
114
115// Write puts data into the buffer. If less bytes are written than
116// requested ErrNoSpace is returned.
117func (b *buffer) Write(p []byte) (n int, err error) {
118 m := b.Available()
119 n = len(p)
120 if m < n {
121 n = m
122 p = p[:m]
123 err = ErrNoSpace
124 }
125 k := copy(b.data[b.front:], p)
126 if k < n {
127 copy(b.data, p[k:])
128 }
129 b.front = b.addIndex(b.front, n)
130 return n, err
131}
132
133// WriteByte writes a single byte into the buffer. The error ErrNoSpace
134// is returned if no single byte is available in the buffer for writing.
135func (b *buffer) WriteByte(c byte) error {
136 if b.Available() < 1 {
137 return ErrNoSpace
138 }
139 b.data[b.front] = c
140 b.front = b.addIndex(b.front, 1)
141 return nil
142}
143
144// prefixLen returns the length of the common prefix of a and b.
145func prefixLen(a, b []byte) int {
146 if len(a) > len(b) {
147 a, b = b, a
148 }
149 for i, c := range a {
150 if b[i] != c {
151 return i
152 }
153 }
154 return len(a)
155}
156
157// matchLen returns the length of the common prefix for the given
158// distance from the rear and the byte slice p.
159func (b *buffer) matchLen(distance int, p []byte) int {
160 var n int
161 i := b.rear - distance
162 if i < 0 {
163 if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i {
164 return n
165 }
166 p = p[n:]
167 i = 0
168 }
169 n += prefixLen(p, b.data[i:])
170 return n
171}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
new file mode 100644
index 0000000..a3696ba
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
@@ -0,0 +1,37 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10)
11
12// ErrLimit indicates that the limit of the LimitedByteWriter has been
13// reached.
14var ErrLimit = errors.New("limit reached")
15
16// LimitedByteWriter provides a byte writer that can be written until a
17// limit is reached. The field N provides the number of remaining
18// bytes.
19type LimitedByteWriter struct {
20 BW io.ByteWriter
21 N int64
22}
23
24// WriteByte writes a single byte to the limited byte writer. It returns
25// ErrLimit if the limit has been reached. If the byte is successfully
26// written the field N of the LimitedByteWriter will be decremented by
27// one.
28func (l *LimitedByteWriter) WriteByte(c byte) error {
29 if l.N <= 0 {
30 return ErrLimit
31 }
32 if err := l.BW.WriteByte(c); err != nil {
33 return err
34 }
35 l.N--
36 return nil
37}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go
new file mode 100644
index 0000000..16e14db
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go
@@ -0,0 +1,277 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "io"
11)
12
13// decoder decodes a raw LZMA stream without any header.
14type decoder struct {
15 // dictionary; the rear pointer of the buffer will be used for
16 // reading the data.
17 Dict *decoderDict
18 // decoder state
19 State *state
20 // range decoder
21 rd *rangeDecoder
22 // start stores the head value of the dictionary for the LZMA
23 // stream
24 start int64
25 // size of uncompressed data
26 size int64
27 // end-of-stream encountered
28 eos bool
29 // EOS marker found
30 eosMarker bool
31}
32
33// newDecoder creates a new decoder instance. The parameter size provides
34// the expected byte size of the decompressed data. If the size is
35// unknown use a negative value. In that case the decoder will look for
36// a terminating end-of-stream marker.
37func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) {
38 rd, err := newRangeDecoder(br)
39 if err != nil {
40 return nil, err
41 }
42 d = &decoder{
43 State: state,
44 Dict: dict,
45 rd: rd,
46 size: size,
47 start: dict.pos(),
48 }
49 return d, nil
50}
51
52// Reopen restarts the decoder with a new byte reader and a new size. Reopen
53// resets the Decompressed counter to zero.
54func (d *decoder) Reopen(br io.ByteReader, size int64) error {
55 var err error
56 if d.rd, err = newRangeDecoder(br); err != nil {
57 return err
58 }
59 d.start = d.Dict.pos()
60 d.size = size
61 d.eos = false
62 return nil
63}
64
65// decodeLiteral decodes a single literal from the LZMA stream.
66func (d *decoder) decodeLiteral() (op operation, err error) {
67 litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head)
68 match := d.Dict.byteAt(int(d.State.rep[0]) + 1)
69 s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState)
70 if err != nil {
71 return nil, err
72 }
73 return lit{s}, nil
74}
75
76// errEOS indicates that an EOS marker has been found.
77var errEOS = errors.New("EOS marker found")
78
79// readOp decodes the next operation from the compressed stream. It
80// returns the operation. If an explicit end of stream marker is
81// identified the eos error is returned.
82func (d *decoder) readOp() (op operation, err error) {
83 // Value of the end of stream (EOS) marker
84 const eosDist = 1<<32 - 1
85
86 state, state2, posState := d.State.states(d.Dict.head)
87
88 b, err := d.State.isMatch[state2].Decode(d.rd)
89 if err != nil {
90 return nil, err
91 }
92 if b == 0 {
93 // literal
94 op, err := d.decodeLiteral()
95 if err != nil {
96 return nil, err
97 }
98 d.State.updateStateLiteral()
99 return op, nil
100 }
101 b, err = d.State.isRep[state].Decode(d.rd)
102 if err != nil {
103 return nil, err
104 }
105 if b == 0 {
106 // simple match
107 d.State.rep[3], d.State.rep[2], d.State.rep[1] =
108 d.State.rep[2], d.State.rep[1], d.State.rep[0]
109
110 d.State.updateStateMatch()
111 // The length decoder returns the length offset.
112 n, err := d.State.lenCodec.Decode(d.rd, posState)
113 if err != nil {
114 return nil, err
115 }
116 // The dist decoder returns the distance offset. The actual
117 // distance is 1 higher.
118 d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n)
119 if err != nil {
120 return nil, err
121 }
122 if d.State.rep[0] == eosDist {
123 d.eosMarker = true
124 return nil, errEOS
125 }
126 op = match{n: int(n) + minMatchLen,
127 distance: int64(d.State.rep[0]) + minDistance}
128 return op, nil
129 }
130 b, err = d.State.isRepG0[state].Decode(d.rd)
131 if err != nil {
132 return nil, err
133 }
134 dist := d.State.rep[0]
135 if b == 0 {
136 // rep match 0
137 b, err = d.State.isRepG0Long[state2].Decode(d.rd)
138 if err != nil {
139 return nil, err
140 }
141 if b == 0 {
142 d.State.updateStateShortRep()
143 op = match{n: 1, distance: int64(dist) + minDistance}
144 return op, nil
145 }
146 } else {
147 b, err = d.State.isRepG1[state].Decode(d.rd)
148 if err != nil {
149 return nil, err
150 }
151 if b == 0 {
152 dist = d.State.rep[1]
153 } else {
154 b, err = d.State.isRepG2[state].Decode(d.rd)
155 if err != nil {
156 return nil, err
157 }
158 if b == 0 {
159 dist = d.State.rep[2]
160 } else {
161 dist = d.State.rep[3]
162 d.State.rep[3] = d.State.rep[2]
163 }
164 d.State.rep[2] = d.State.rep[1]
165 }
166 d.State.rep[1] = d.State.rep[0]
167 d.State.rep[0] = dist
168 }
169 n, err := d.State.repLenCodec.Decode(d.rd, posState)
170 if err != nil {
171 return nil, err
172 }
173 d.State.updateStateRep()
174 op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance}
175 return op, nil
176}
177
178// apply takes the operation and transforms the decoder dictionary accordingly.
179func (d *decoder) apply(op operation) error {
180 var err error
181 switch x := op.(type) {
182 case match:
183 err = d.Dict.writeMatch(x.distance, x.n)
184 case lit:
185 err = d.Dict.WriteByte(x.b)
186 default:
187 panic("op is neither a match nor a literal")
188 }
189 return err
190}
191
192// decompress fills the dictionary unless no space for new data is
193// available. If the end of the LZMA stream has been reached io.EOF will
194// be returned.
195func (d *decoder) decompress() error {
196 if d.eos {
197 return io.EOF
198 }
199 for d.Dict.Available() >= maxMatchLen {
200 op, err := d.readOp()
201 switch err {
202 case nil:
203 break
204 case errEOS:
205 d.eos = true
206 if !d.rd.possiblyAtEnd() {
207 return errDataAfterEOS
208 }
209 if d.size >= 0 && d.size != d.Decompressed() {
210 return errSize
211 }
212 return io.EOF
213 case io.EOF:
214 d.eos = true
215 return io.ErrUnexpectedEOF
216 default:
217 return err
218 }
219 if err = d.apply(op); err != nil {
220 return err
221 }
222 if d.size >= 0 && d.Decompressed() >= d.size {
223 d.eos = true
224 if d.Decompressed() > d.size {
225 return errSize
226 }
227 if !d.rd.possiblyAtEnd() {
228 switch _, err = d.readOp(); err {
229 case nil:
230 return errSize
231 case io.EOF:
232 return io.ErrUnexpectedEOF
233 case errEOS:
234 break
235 default:
236 return err
237 }
238 }
239 return io.EOF
240 }
241 }
242 return nil
243}
244
245// Errors that may be returned while decoding data.
246var (
247 errDataAfterEOS = errors.New("lzma: data after end of stream marker")
248 errSize = errors.New("lzma: wrong uncompressed data size")
249)
250
251// Read reads data from the buffer. If no more data is available io.EOF is
252// returned.
253func (d *decoder) Read(p []byte) (n int, err error) {
254 var k int
255 for {
256 // Read of decoder dict never returns an error.
257 k, err = d.Dict.Read(p[n:])
258 if err != nil {
259 panic(fmt.Errorf("dictionary read error %s", err))
260 }
261 if k == 0 && d.eos {
262 return n, io.EOF
263 }
264 n += k
265 if n >= len(p) {
266 return n, nil
267 }
268 if err = d.decompress(); err != nil && err != io.EOF {
269 return n, err
270 }
271 }
272}
273
274// Decompressed returns the number of bytes decompressed by the decoder.
275func (d *decoder) Decompressed() int64 {
276 return d.Dict.pos() - d.start
277}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
new file mode 100644
index 0000000..564a12b
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
@@ -0,0 +1,135 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10)
11
12// decoderDict provides the dictionary for the decoder. The whole
13// dictionary is used as reader buffer.
14type decoderDict struct {
15 buf buffer
16 head int64
17}
18
19// newDecoderDict creates a new decoder dictionary. The whole dictionary
20// will be used as reader buffer.
21func newDecoderDict(dictCap int) (d *decoderDict, err error) {
22 // lower limit supports easy test cases
23 if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
24 return nil, errors.New("lzma: dictCap out of range")
25 }
26 d = &decoderDict{buf: *newBuffer(dictCap)}
27 return d, nil
28}
29
30// Reset clears the dictionary. The read buffer is not changed, so the
31// buffered data can still be read.
32func (d *decoderDict) Reset() {
33 d.head = 0
34}
35
36// WriteByte writes a single byte into the dictionary. It is used to
37// write literals into the dictionary.
38func (d *decoderDict) WriteByte(c byte) error {
39 if err := d.buf.WriteByte(c); err != nil {
40 return err
41 }
42 d.head++
43 return nil
44}
45
46// pos returns the position of the dictionary head.
47func (d *decoderDict) pos() int64 { return d.head }
48
49// dictLen returns the actual length of the dictionary.
50func (d *decoderDict) dictLen() int {
51 capacity := d.buf.Cap()
52 if d.head >= int64(capacity) {
53 return capacity
54 }
55 return int(d.head)
56}
57
58// byteAt returns a byte stored in the dictionary. If the distance is
59// non-positive or exceeds the current length of the dictionary the zero
60// byte is returned.
61func (d *decoderDict) byteAt(dist int) byte {
62 if !(0 < dist && dist <= d.dictLen()) {
63 return 0
64 }
65 i := d.buf.front - dist
66 if i < 0 {
67 i += len(d.buf.data)
68 }
69 return d.buf.data[i]
70}
71
72// writeMatch writes the match at the top of the dictionary. The given
73// distance must point in the current dictionary and the length must not
74// exceed the maximum length 273 supported in LZMA.
75//
76// The error value ErrNoSpace indicates that no space is available in
77// the dictionary for writing. You need to read from the dictionary
78// first.
79func (d *decoderDict) writeMatch(dist int64, length int) error {
80 if !(0 < dist && dist <= int64(d.dictLen())) {
81 return errors.New("writeMatch: distance out of range")
82 }
83 if !(0 < length && length <= maxMatchLen) {
84 return errors.New("writeMatch: length out of range")
85 }
86 if length > d.buf.Available() {
87 return ErrNoSpace
88 }
89 d.head += int64(length)
90
91 i := d.buf.front - int(dist)
92 if i < 0 {
93 i += len(d.buf.data)
94 }
95 for length > 0 {
96 var p []byte
97 if i >= d.buf.front {
98 p = d.buf.data[i:]
99 i = 0
100 } else {
101 p = d.buf.data[i:d.buf.front]
102 i = d.buf.front
103 }
104 if len(p) > length {
105 p = p[:length]
106 }
107 if _, err := d.buf.Write(p); err != nil {
108 panic(fmt.Errorf("d.buf.Write returned error %s", err))
109 }
110 length -= len(p)
111 }
112 return nil
113}
114
115// Write writes the given bytes into the dictionary and advances the
116// head.
117func (d *decoderDict) Write(p []byte) (n int, err error) {
118 n, err = d.buf.Write(p)
119 d.head += int64(n)
120 return n, err
121}
122
123// Available returns the number of available bytes for writing into the
124// decoder dictionary.
125func (d *decoderDict) Available() int { return d.buf.Available() }
126
127// Read reads data from the buffer contained in the decoder dictionary.
128func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) }
129
130// Buffered returns the number of bytes currently buffered in the
131// decoder dictionary.
132func (d *decoderDict) buffered() int { return d.buf.Buffered() }
133
134// Peek gets data from the buffer without advancing the rear index.
135func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) }
diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go
new file mode 100644
index 0000000..e08eb98
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go
@@ -0,0 +1,49 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import "fmt"
8
9// directCodec allows the encoding and decoding of values with a fixed number
10// of bits. The number of bits must be in the range [1,32].
11type directCodec byte
12
13// makeDirectCodec creates a directCodec. The function panics if the number of
14// bits is not in the range [1,32].
15func makeDirectCodec(bits int) directCodec {
16 if !(1 <= bits && bits <= 32) {
17 panic(fmt.Errorf("bits=%d out of range", bits))
18 }
19 return directCodec(bits)
20}
21
22// Bits returns the number of bits supported by this codec.
23func (dc directCodec) Bits() int {
24 return int(dc)
25}
26
27// Encode uses the range encoder to encode a value with the fixed number of
28// bits. The most-significant bit is encoded first.
29func (dc directCodec) Encode(e *rangeEncoder, v uint32) error {
30 for i := int(dc) - 1; i >= 0; i-- {
31 if err := e.DirectEncodeBit(v >> uint(i)); err != nil {
32 return err
33 }
34 }
35 return nil
36}
37
38// Decode uses the range decoder to decode a value with the given number of
39// given bits. The most-significant bit is decoded first.
40func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) {
41 for i := int(dc) - 1; i >= 0; i-- {
42 x, err := d.DirectDecodeBit()
43 if err != nil {
44 return 0, err
45 }
46 v = (v << 1) | x
47 }
48 return v, nil
49}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go
new file mode 100644
index 0000000..b053a2d
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go
@@ -0,0 +1,156 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// Constants used by the distance codec.
8const (
9 // minimum supported distance
10 minDistance = 1
11 // maximum supported distance, value is used for the eos marker.
12 maxDistance = 1 << 32
13 // number of the supported len states
14 lenStates = 4
15 // start for the position models
16 startPosModel = 4
17 // first index with align bits support
18 endPosModel = 14
19 // bits for the position slots
20 posSlotBits = 6
21 // number of align bits
22 alignBits = 4
23 // maximum position slot
24 maxPosSlot = 63
25)
26
27// distCodec provides encoding and decoding of distance values.
28type distCodec struct {
29 posSlotCodecs [lenStates]treeCodec
30 posModel [endPosModel - startPosModel]treeReverseCodec
31 alignCodec treeReverseCodec
32}
33
34// deepcopy initializes dc as deep copy of the source.
35func (dc *distCodec) deepcopy(src *distCodec) {
36 if dc == src {
37 return
38 }
39 for i := range dc.posSlotCodecs {
40 dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i])
41 }
42 for i := range dc.posModel {
43 dc.posModel[i].deepcopy(&src.posModel[i])
44 }
45 dc.alignCodec.deepcopy(&src.alignCodec)
46}
47
48// distBits returns the number of bits required to encode dist.
49func distBits(dist uint32) int {
50 if dist < startPosModel {
51 return 6
52 }
53 // slot s > 3, dist d
54 // s = 2(bits(d)-1) + bit(d, bits(d)-2)
55 // s>>1 = bits(d)-1
56 // bits(d) = 32-nlz32(d)
57 // s>>1=31-nlz32(d)
58 // n = 5 + (s>>1) = 36 - nlz32(d)
59 return 36 - nlz32(dist)
60}
61
62// newDistCodec creates a new distance codec.
63func (dc *distCodec) init() {
64 for i := range dc.posSlotCodecs {
65 dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits)
66 }
67 for i := range dc.posModel {
68 posSlot := startPosModel + i
69 bits := (posSlot >> 1) - 1
70 dc.posModel[i] = makeTreeReverseCodec(bits)
71 }
72 dc.alignCodec = makeTreeReverseCodec(alignBits)
73}
74
75// lenState converts the value l to a supported lenState value.
76func lenState(l uint32) uint32 {
77 if l >= lenStates {
78 l = lenStates - 1
79 }
80 return l
81}
82
83// Encode encodes the distance using the parameter l. Dist can have values from
84// the full range of uint32 values. To get the distance offset the actual match
85// distance has to be decreased by 1. A distance offset of 0xffffffff (eos)
86// indicates the end of the stream.
87func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) {
88 // Compute the posSlot using nlz32
89 var posSlot uint32
90 var bits uint32
91 if dist < startPosModel {
92 posSlot = dist
93 } else {
94 bits = uint32(30 - nlz32(dist))
95 posSlot = startPosModel - 2 + (bits << 1)
96 posSlot += (dist >> uint(bits)) & 1
97 }
98
99 if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil {
100 return
101 }
102
103 switch {
104 case posSlot < startPosModel:
105 return nil
106 case posSlot < endPosModel:
107 tc := &dc.posModel[posSlot-startPosModel]
108 return tc.Encode(dist, e)
109 }
110 dic := directCodec(bits - alignBits)
111 if err = dic.Encode(e, dist>>alignBits); err != nil {
112 return
113 }
114 return dc.alignCodec.Encode(dist, e)
115}
116
117// Decode decodes the distance offset using the parameter l. The dist value
118// 0xffffffff (eos) indicates the end of the stream. Add one to the distance
119// offset to get the actual match distance.
120func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) {
121 posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d)
122 if err != nil {
123 return
124 }
125
126 // posSlot equals distance
127 if posSlot < startPosModel {
128 return posSlot, nil
129 }
130
131 // posSlot uses the individual models
132 bits := (posSlot >> 1) - 1
133 dist = (2 | (posSlot & 1)) << bits
134 var u uint32
135 if posSlot < endPosModel {
136 tc := &dc.posModel[posSlot-startPosModel]
137 if u, err = tc.Decode(d); err != nil {
138 return 0, err
139 }
140 dist += u
141 return dist, nil
142 }
143
144 // posSlots use direct encoding and a single model for the four align
145 // bits.
146 dic := directCodec(bits - alignBits)
147 if u, err = dic.Decode(d); err != nil {
148 return 0, err
149 }
150 dist += u << alignBits
151 if u, err = dc.alignCodec.Decode(d); err != nil {
152 return 0, err
153 }
154 dist += u
155 return dist, nil
156}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go
new file mode 100644
index 0000000..18ce009
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go
@@ -0,0 +1,268 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "fmt"
9 "io"
10)
11
12// opLenMargin provides the upper limit of the number of bytes required
13// to encode a single operation.
14const opLenMargin = 10
15
16// compressFlags control the compression process.
17type compressFlags uint32
18
19// Values for compressFlags.
20const (
21 // all data should be compressed, even if compression is not
22 // optimal.
23 all compressFlags = 1 << iota
24)
25
26// encoderFlags provide the flags for an encoder.
27type encoderFlags uint32
28
29// Flags for the encoder.
30const (
31 // eosMarker requests an EOS marker to be written.
32 eosMarker encoderFlags = 1 << iota
33)
34
35// Encoder compresses data buffered in the encoder dictionary and writes
36// it into a byte writer.
37type encoder struct {
38 dict *encoderDict
39 state *state
40 re *rangeEncoder
41 start int64
42 // generate eos marker
43 marker bool
44 limit bool
45 margin int
46}
47
48// newEncoder creates a new encoder. If the byte writer must be
49// limited use LimitedByteWriter provided by this package. The flags
50// argument supports the eosMarker flag, controlling whether a
51// terminating end-of-stream marker must be written.
52func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict,
53 flags encoderFlags) (e *encoder, err error) {
54
55 re, err := newRangeEncoder(bw)
56 if err != nil {
57 return nil, err
58 }
59 e = &encoder{
60 dict: dict,
61 state: state,
62 re: re,
63 marker: flags&eosMarker != 0,
64 start: dict.Pos(),
65 margin: opLenMargin,
66 }
67 if e.marker {
68 e.margin += 5
69 }
70 return e, nil
71}
72
73// Write writes the bytes from p into the dictionary. If not enough
74// space is available the data in the dictionary buffer will be
75// compressed to make additional space available. If the limit of the
76// underlying writer has been reached ErrLimit will be returned.
77func (e *encoder) Write(p []byte) (n int, err error) {
78 for {
79 k, err := e.dict.Write(p[n:])
80 n += k
81 if err == ErrNoSpace {
82 if err = e.compress(0); err != nil {
83 return n, err
84 }
85 continue
86 }
87 return n, err
88 }
89}
90
91// Reopen reopens the encoder with a new byte writer.
92func (e *encoder) Reopen(bw io.ByteWriter) error {
93 var err error
94 if e.re, err = newRangeEncoder(bw); err != nil {
95 return err
96 }
97 e.start = e.dict.Pos()
98 e.limit = false
99 return nil
100}
101
102// writeLiteral writes a literal into the LZMA stream
103func (e *encoder) writeLiteral(l lit) error {
104 var err error
105 state, state2, _ := e.state.states(e.dict.Pos())
106 if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil {
107 return err
108 }
109 litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos())
110 match := e.dict.ByteAt(int(e.state.rep[0]) + 1)
111 err = e.state.litCodec.Encode(e.re, l.b, state, match, litState)
112 if err != nil {
113 return err
114 }
115 e.state.updateStateLiteral()
116 return nil
117}
118
119// iverson implements the Iverson operator as proposed by Donald Knuth in his
120// book Concrete Mathematics.
121func iverson(ok bool) uint32 {
122 if ok {
123 return 1
124 }
125 return 0
126}
127
128// writeMatch writes a repetition operation into the operation stream
129func (e *encoder) writeMatch(m match) error {
130 var err error
131 if !(minDistance <= m.distance && m.distance <= maxDistance) {
132 panic(fmt.Errorf("match distance %d out of range", m.distance))
133 }
134 dist := uint32(m.distance - minDistance)
135 if !(minMatchLen <= m.n && m.n <= maxMatchLen) &&
136 !(dist == e.state.rep[0] && m.n == 1) {
137 panic(fmt.Errorf(
138 "match length %d out of range; dist %d rep[0] %d",
139 m.n, dist, e.state.rep[0]))
140 }
141 state, state2, posState := e.state.states(e.dict.Pos())
142 if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil {
143 return err
144 }
145 g := 0
146 for ; g < 4; g++ {
147 if e.state.rep[g] == dist {
148 break
149 }
150 }
151 b := iverson(g < 4)
152 if err = e.state.isRep[state].Encode(e.re, b); err != nil {
153 return err
154 }
155 n := uint32(m.n - minMatchLen)
156 if b == 0 {
157 // simple match
158 e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] =
159 e.state.rep[2], e.state.rep[1], e.state.rep[0], dist
160 e.state.updateStateMatch()
161 if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil {
162 return err
163 }
164 return e.state.distCodec.Encode(e.re, dist, n)
165 }
166 b = iverson(g != 0)
167 if err = e.state.isRepG0[state].Encode(e.re, b); err != nil {
168 return err
169 }
170 if b == 0 {
171 // g == 0
172 b = iverson(m.n != 1)
173 if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil {
174 return err
175 }
176 if b == 0 {
177 e.state.updateStateShortRep()
178 return nil
179 }
180 } else {
181 // g in {1,2,3}
182 b = iverson(g != 1)
183 if err = e.state.isRepG1[state].Encode(e.re, b); err != nil {
184 return err
185 }
186 if b == 1 {
187 // g in {2,3}
188 b = iverson(g != 2)
189 err = e.state.isRepG2[state].Encode(e.re, b)
190 if err != nil {
191 return err
192 }
193 if b == 1 {
194 e.state.rep[3] = e.state.rep[2]
195 }
196 e.state.rep[2] = e.state.rep[1]
197 }
198 e.state.rep[1] = e.state.rep[0]
199 e.state.rep[0] = dist
200 }
201 e.state.updateStateRep()
202 return e.state.repLenCodec.Encode(e.re, n, posState)
203}
204
205// writeOp writes a single operation to the range encoder. The function
206// checks whether there is enough space available to close the LZMA
207// stream.
208func (e *encoder) writeOp(op operation) error {
209 if e.re.Available() < int64(e.margin) {
210 return ErrLimit
211 }
212 switch x := op.(type) {
213 case lit:
214 return e.writeLiteral(x)
215 case match:
216 return e.writeMatch(x)
217 default:
218 panic("unexpected operation")
219 }
220}
221
222// compress compressed data from the dictionary buffer. If the flag all
223// is set, all data in the dictionary buffer will be compressed. The
224// function returns ErrLimit if the underlying writer has reached its
225// limit.
226func (e *encoder) compress(flags compressFlags) error {
227 n := 0
228 if flags&all == 0 {
229 n = maxMatchLen - 1
230 }
231 d := e.dict
232 m := d.m
233 for d.Buffered() > n {
234 op := m.NextOp(e.state.rep)
235 if err := e.writeOp(op); err != nil {
236 return err
237 }
238 d.Discard(op.Len())
239 }
240 return nil
241}
242
243// eosMatch is a pseudo operation that indicates the end of the stream.
244var eosMatch = match{distance: maxDistance, n: minMatchLen}
245
246// Close terminates the LZMA stream. If requested the end-of-stream
247// marker will be written. If the byte writer limit has been or will be
248// reached during compression of the remaining data in the buffer the
249// LZMA stream will be closed and data will remain in the buffer.
250func (e *encoder) Close() error {
251 err := e.compress(all)
252 if err != nil && err != ErrLimit {
253 return err
254 }
255 if e.marker {
256 if err := e.writeMatch(eosMatch); err != nil {
257 return err
258 }
259 }
260 err = e.re.Close()
261 return err
262}
263
264// Compressed returns the number bytes of the input data that been
265// compressed.
266func (e *encoder) Compressed() int64 {
267 return e.dict.Pos() - e.start
268}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
new file mode 100644
index 0000000..9d0fbc7
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
@@ -0,0 +1,149 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "io"
11)
12
13// matcher is an interface that supports the identification of the next
14// operation.
15type matcher interface {
16 io.Writer
17 SetDict(d *encoderDict)
18 NextOp(rep [4]uint32) operation
19}
20
21// encoderDict provides the dictionary of the encoder. It includes an
22// addtional buffer atop of the actual dictionary.
23type encoderDict struct {
24 buf buffer
25 m matcher
26 head int64
27 capacity int
28 // preallocated array
29 data [maxMatchLen]byte
30}
31
32// newEncoderDict creates the encoder dictionary. The argument bufSize
33// defines the size of the additional buffer.
34func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) {
35 if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
36 return nil, errors.New(
37 "lzma: dictionary capacity out of range")
38 }
39 if bufSize < 1 {
40 return nil, errors.New(
41 "lzma: buffer size must be larger than zero")
42 }
43 d = &encoderDict{
44 buf: *newBuffer(dictCap + bufSize),
45 capacity: dictCap,
46 m: m,
47 }
48 m.SetDict(d)
49 return d, nil
50}
51
52// Discard discards n bytes. Note that n must not be larger than
53// MaxMatchLen.
54func (d *encoderDict) Discard(n int) {
55 p := d.data[:n]
56 k, _ := d.buf.Read(p)
57 if k < n {
58 panic(fmt.Errorf("lzma: can't discard %d bytes", n))
59 }
60 d.head += int64(n)
61 d.m.Write(p)
62}
63
64// Len returns the data available in the encoder dictionary.
65func (d *encoderDict) Len() int {
66 n := d.buf.Available()
67 if int64(n) > d.head {
68 return int(d.head)
69 }
70 return n
71}
72
73// DictLen returns the actual length of data in the dictionary.
74func (d *encoderDict) DictLen() int {
75 if d.head < int64(d.capacity) {
76 return int(d.head)
77 }
78 return d.capacity
79}
80
81// Available returns the number of bytes that can be written by a
82// following Write call.
83func (d *encoderDict) Available() int {
84 return d.buf.Available() - d.DictLen()
85}
86
87// Write writes data into the dictionary buffer. Note that the position
88// of the dictionary head will not be moved. If there is not enough
89// space in the buffer ErrNoSpace will be returned.
90func (d *encoderDict) Write(p []byte) (n int, err error) {
91 m := d.Available()
92 if len(p) > m {
93 p = p[:m]
94 err = ErrNoSpace
95 }
96 var e error
97 if n, e = d.buf.Write(p); e != nil {
98 err = e
99 }
100 return n, err
101}
102
103// Pos returns the position of the head.
104func (d *encoderDict) Pos() int64 { return d.head }
105
106// ByteAt returns the byte at the given distance.
107func (d *encoderDict) ByteAt(distance int) byte {
108 if !(0 < distance && distance <= d.Len()) {
109 return 0
110 }
111 i := d.buf.rear - distance
112 if i < 0 {
113 i += len(d.buf.data)
114 }
115 return d.buf.data[i]
116}
117
118// CopyN copies the last n bytes from the dictionary into the provided
119// writer. This is used for copying uncompressed data into an
120// uncompressed segment.
121func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) {
122 if n <= 0 {
123 return 0, nil
124 }
125 m := d.Len()
126 if n > m {
127 n = m
128 err = ErrNoSpace
129 }
130 i := d.buf.rear - n
131 var e error
132 if i < 0 {
133 i += len(d.buf.data)
134 if written, e = w.Write(d.buf.data[i:]); e != nil {
135 return written, e
136 }
137 i = 0
138 }
139 var k int
140 k, e = w.Write(d.buf.data[i:d.buf.rear])
141 written += k
142 if e != nil {
143 err = e
144 }
145 return written, err
146}
147
148// Buffered returns the number of bytes in the buffer.
149func (d *encoderDict) Buffered() int { return d.buf.Buffered() }
diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma
new file mode 100644
index 0000000..5edad63
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma
Binary files differ
diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go
new file mode 100644
index 0000000..d786a97
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go
@@ -0,0 +1,309 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10
11 "github.com/ulikunitz/xz/internal/hash"
12)
13
14/* For compression we need to find byte sequences that match the byte
15 * sequence at the dictionary head. A hash table is a simple method to
16 * provide this capability.
17 */
18
19// maxMatches limits the number of matches requested from the Matches
20// function. This controls the speed of the overall encoding.
21const maxMatches = 16
22
23// shortDists defines the number of short distances supported by the
24// implementation.
25const shortDists = 8
26
27// The minimum is somehow arbitrary but the maximum is limited by the
28// memory requirements of the hash table.
29const (
30 minTableExponent = 9
31 maxTableExponent = 20
32)
33
34// newRoller contains the function used to create an instance of the
35// hash.Roller.
36var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) }
37
38// hashTable stores the hash table including the rolling hash method.
39//
40// We implement chained hashing into a circular buffer. Each entry in
41// the circular buffer stores the delta distance to the next position with a
42// word that has the same hash value.
43type hashTable struct {
44 dict *encoderDict
45 // actual hash table
46 t []int64
47 // circular list data with the offset to the next word
48 data []uint32
49 front int
50 // mask for computing the index for the hash table
51 mask uint64
52 // hash offset; initial value is -int64(wordLen)
53 hoff int64
54 // length of the hashed word
55 wordLen int
56 // hash roller for computing the hash values for the Write
57 // method
58 wr hash.Roller
59 // hash roller for computing arbitrary hashes
60 hr hash.Roller
61 // preallocated slices
62 p [maxMatches]int64
63 distances [maxMatches + shortDists]int
64}
65
66// hashTableExponent derives the hash table exponent from the dictionary
67// capacity.
68func hashTableExponent(n uint32) int {
69 e := 30 - nlz32(n)
70 switch {
71 case e < minTableExponent:
72 e = minTableExponent
73 case e > maxTableExponent:
74 e = maxTableExponent
75 }
76 return e
77}
78
79// newHashTable creates a new hash table for words of length wordLen
80func newHashTable(capacity int, wordLen int) (t *hashTable, err error) {
81 if !(0 < capacity) {
82 return nil, errors.New(
83 "newHashTable: capacity must not be negative")
84 }
85 exp := hashTableExponent(uint32(capacity))
86 if !(1 <= wordLen && wordLen <= 4) {
87 return nil, errors.New("newHashTable: " +
88 "argument wordLen out of range")
89 }
90 n := 1 << uint(exp)
91 if n <= 0 {
92 panic("newHashTable: exponent is too large")
93 }
94 t = &hashTable{
95 t: make([]int64, n),
96 data: make([]uint32, capacity),
97 mask: (uint64(1) << uint(exp)) - 1,
98 hoff: -int64(wordLen),
99 wordLen: wordLen,
100 wr: newRoller(wordLen),
101 hr: newRoller(wordLen),
102 }
103 return t, nil
104}
105
106func (t *hashTable) SetDict(d *encoderDict) { t.dict = d }
107
108// buffered returns the number of bytes that are currently hashed.
109func (t *hashTable) buffered() int {
110 n := t.hoff + 1
111 switch {
112 case n <= 0:
113 return 0
114 case n >= int64(len(t.data)):
115 return len(t.data)
116 }
117 return int(n)
118}
119
120// addIndex adds n to an index ensuring that is stays inside the
121// circular buffer for the hash chain.
122func (t *hashTable) addIndex(i, n int) int {
123 i += n - len(t.data)
124 if i < 0 {
125 i += len(t.data)
126 }
127 return i
128}
129
130// putDelta puts the delta instance at the current front of the circular
131// chain buffer.
132func (t *hashTable) putDelta(delta uint32) {
133 t.data[t.front] = delta
134 t.front = t.addIndex(t.front, 1)
135}
136
137// putEntry puts a new entry into the hash table. If there is already a
138// value stored it is moved into the circular chain buffer.
139func (t *hashTable) putEntry(h uint64, pos int64) {
140 if pos < 0 {
141 return
142 }
143 i := h & t.mask
144 old := t.t[i] - 1
145 t.t[i] = pos + 1
146 var delta int64
147 if old >= 0 {
148 delta = pos - old
149 if delta > 1<<32-1 || delta > int64(t.buffered()) {
150 delta = 0
151 }
152 }
153 t.putDelta(uint32(delta))
154}
155
156// WriteByte converts a single byte into a hash and puts them into the hash
157// table.
158func (t *hashTable) WriteByte(b byte) error {
159 h := t.wr.RollByte(b)
160 t.hoff++
161 t.putEntry(h, t.hoff)
162 return nil
163}
164
165// Write converts the bytes provided into hash tables and stores the
166// abbreviated offsets into the hash table. The method will never return an
167// error.
168func (t *hashTable) Write(p []byte) (n int, err error) {
169 for _, b := range p {
170 // WriteByte doesn't generate an error.
171 t.WriteByte(b)
172 }
173 return len(p), nil
174}
175
176// getMatches the matches for a specific hash. The functions returns the
177// number of positions found.
178//
179// TODO: Make a getDistances because that we are actually interested in.
180func (t *hashTable) getMatches(h uint64, positions []int64) (n int) {
181 if t.hoff < 0 || len(positions) == 0 {
182 return 0
183 }
184 buffered := t.buffered()
185 tailPos := t.hoff + 1 - int64(buffered)
186 rear := t.front - buffered
187 if rear >= 0 {
188 rear -= len(t.data)
189 }
190 // get the slot for the hash
191 pos := t.t[h&t.mask] - 1
192 delta := pos - tailPos
193 for {
194 if delta < 0 {
195 return n
196 }
197 positions[n] = tailPos + delta
198 n++
199 if n >= len(positions) {
200 return n
201 }
202 i := rear + int(delta)
203 if i < 0 {
204 i += len(t.data)
205 }
206 u := t.data[i]
207 if u == 0 {
208 return n
209 }
210 delta -= int64(u)
211 }
212}
213
214// hash computes the rolling hash for the word stored in p. For correct
215// results its length must be equal to t.wordLen.
216func (t *hashTable) hash(p []byte) uint64 {
217 var h uint64
218 for _, b := range p {
219 h = t.hr.RollByte(b)
220 }
221 return h
222}
223
224// Matches fills the positions slice with potential matches. The
225// functions returns the number of positions filled into positions. The
226// byte slice p must have word length of the hash table.
227func (t *hashTable) Matches(p []byte, positions []int64) int {
228 if len(p) != t.wordLen {
229 panic(fmt.Errorf(
230 "byte slice must have length %d", t.wordLen))
231 }
232 h := t.hash(p)
233 return t.getMatches(h, positions)
234}
235
236// NextOp identifies the next operation using the hash table.
237//
238// TODO: Use all repetitions to find matches.
239func (t *hashTable) NextOp(rep [4]uint32) operation {
240 // get positions
241 data := t.dict.data[:maxMatchLen]
242 n, _ := t.dict.buf.Peek(data)
243 data = data[:n]
244 var p []int64
245 if n < t.wordLen {
246 p = t.p[:0]
247 } else {
248 p = t.p[:maxMatches]
249 n = t.Matches(data[:t.wordLen], p)
250 p = p[:n]
251 }
252
253 // convert positions in potential distances
254 head := t.dict.head
255 dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8)
256 for _, pos := range p {
257 dis := int(head - pos)
258 if dis > shortDists {
259 dists = append(dists, dis)
260 }
261 }
262
263 // check distances
264 var m match
265 dictLen := t.dict.DictLen()
266 for _, dist := range dists {
267 if dist > dictLen {
268 continue
269 }
270
271 // Here comes a trick. We are only interested in matches
272 // that are longer than the matches we have been found
273 // before. So before we test the whole byte sequence at
274 // the given distance, we test the first byte that would
275 // make the match longer. If it doesn't match the byte
276 // to match, we don't to care any longer.
277 i := t.dict.buf.rear - dist + m.n
278 if i < 0 {
279 i += len(t.dict.buf.data)
280 }
281 if t.dict.buf.data[i] != data[m.n] {
282 // We can't get a longer match. Jump to the next
283 // distance.
284 continue
285 }
286
287 n := t.dict.buf.matchLen(dist, data)
288 switch n {
289 case 0:
290 continue
291 case 1:
292 if uint32(dist-minDistance) != rep[0] {
293 continue
294 }
295 }
296 if n > m.n {
297 m = match{int64(dist), n}
298 if n == len(data) {
299 // No better match will be found.
300 break
301 }
302 }
303 }
304
305 if m.n == 0 {
306 return lit{data[0]}
307 }
308 return m
309}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go
new file mode 100644
index 0000000..bc70896
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/header.go
@@ -0,0 +1,167 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10)
11
12// uint32LE reads an uint32 integer from a byte slice
13func uint32LE(b []byte) uint32 {
14 x := uint32(b[3]) << 24
15 x |= uint32(b[2]) << 16
16 x |= uint32(b[1]) << 8
17 x |= uint32(b[0])
18 return x
19}
20
21// uint64LE converts the uint64 value stored as little endian to an uint64
22// value.
23func uint64LE(b []byte) uint64 {
24 x := uint64(b[7]) << 56
25 x |= uint64(b[6]) << 48
26 x |= uint64(b[5]) << 40
27 x |= uint64(b[4]) << 32
28 x |= uint64(b[3]) << 24
29 x |= uint64(b[2]) << 16
30 x |= uint64(b[1]) << 8
31 x |= uint64(b[0])
32 return x
33}
34
35// putUint32LE puts an uint32 integer into a byte slice that must have at least
36// a length of 4 bytes.
37func putUint32LE(b []byte, x uint32) {
38 b[0] = byte(x)
39 b[1] = byte(x >> 8)
40 b[2] = byte(x >> 16)
41 b[3] = byte(x >> 24)
42}
43
44// putUint64LE puts the uint64 value into the byte slice as little endian
45// value. The byte slice b must have at least place for 8 bytes.
46func putUint64LE(b []byte, x uint64) {
47 b[0] = byte(x)
48 b[1] = byte(x >> 8)
49 b[2] = byte(x >> 16)
50 b[3] = byte(x >> 24)
51 b[4] = byte(x >> 32)
52 b[5] = byte(x >> 40)
53 b[6] = byte(x >> 48)
54 b[7] = byte(x >> 56)
55}
56
57// noHeaderSize defines the value of the length field in the LZMA header.
58const noHeaderSize uint64 = 1<<64 - 1
59
60// HeaderLen provides the length of the LZMA file header.
61const HeaderLen = 13
62
63// header represents the header of an LZMA file.
64type header struct {
65 properties Properties
66 dictCap int
67 // uncompressed size; negative value if no size is given
68 size int64
69}
70
71// marshalBinary marshals the header.
72func (h *header) marshalBinary() (data []byte, err error) {
73 if err = h.properties.verify(); err != nil {
74 return nil, err
75 }
76 if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) {
77 return nil, fmt.Errorf("lzma: DictCap %d out of range",
78 h.dictCap)
79 }
80
81 data = make([]byte, 13)
82
83 // property byte
84 data[0] = h.properties.Code()
85
86 // dictionary capacity
87 putUint32LE(data[1:5], uint32(h.dictCap))
88
89 // uncompressed size
90 var s uint64
91 if h.size > 0 {
92 s = uint64(h.size)
93 } else {
94 s = noHeaderSize
95 }
96 putUint64LE(data[5:], s)
97
98 return data, nil
99}
100
101// unmarshalBinary unmarshals the header.
102func (h *header) unmarshalBinary(data []byte) error {
103 if len(data) != HeaderLen {
104 return errors.New("lzma.unmarshalBinary: data has wrong length")
105 }
106
107 // properties
108 var err error
109 if h.properties, err = PropertiesForCode(data[0]); err != nil {
110 return err
111 }
112
113 // dictionary capacity
114 h.dictCap = int(uint32LE(data[1:]))
115 if h.dictCap < 0 {
116 return errors.New(
117 "LZMA header: dictionary capacity exceeds maximum " +
118 "integer")
119 }
120
121 // uncompressed size
122 s := uint64LE(data[5:])
123 if s == noHeaderSize {
124 h.size = -1
125 } else {
126 h.size = int64(s)
127 if h.size < 0 {
128 return errors.New(
129 "LZMA header: uncompressed size " +
130 "out of int64 range")
131 }
132 }
133
134 return nil
135}
136
137// validDictCap checks whether the dictionary capacity is correct. This
138// is used to weed out wrong file headers.
139func validDictCap(dictcap int) bool {
140 if int64(dictcap) == MaxDictCap {
141 return true
142 }
143 for n := uint(10); n < 32; n++ {
144 if dictcap == 1<<n {
145 return true
146 }
147 if dictcap == 1<<n+1<<(n-1) {
148 return true
149 }
150 }
151 return false
152}
153
154// ValidHeader checks for a valid LZMA file header. It allows only
155// dictionary sizes of 2^n or 2^n+2^(n-1) with n >= 10 or 2^32-1. If
156// there is an explicit size it must not exceed 256 GiB. The length of
157// the data argument must be HeaderLen.
158func ValidHeader(data []byte) bool {
159 var h header
160 if err := h.unmarshalBinary(data); err != nil {
161 return false
162 }
163 if !validDictCap(h.dictCap) {
164 return false
165 }
166 return h.size < 0 || h.size <= 1<<38
167}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go
new file mode 100644
index 0000000..ac6a71a
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go
@@ -0,0 +1,398 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "io"
11)
12
13const (
14 // maximum size of compressed data in a chunk
15 maxCompressed = 1 << 16
16 // maximum size of uncompressed data in a chunk
17 maxUncompressed = 1 << 21
18)
19
20// chunkType represents the type of an LZMA2 chunk. Note that this
21// value is an internal representation and no actual encoding of a LZMA2
22// chunk header.
23type chunkType byte
24
25// Possible values for the chunk type.
26const (
27 // end of stream
28 cEOS chunkType = iota
29 // uncompressed; reset dictionary
30 cUD
31 // uncompressed; no reset of dictionary
32 cU
33 // LZMA compressed; no reset
34 cL
35 // LZMA compressed; reset state
36 cLR
37 // LZMA compressed; reset state; new property value
38 cLRN
39 // LZMA compressed; reset state; new property value; reset dictionary
40 cLRND
41)
42
43// chunkTypeStrings provide a string representation for the chunk types.
44var chunkTypeStrings = [...]string{
45 cEOS: "EOS",
46 cU: "U",
47 cUD: "UD",
48 cL: "L",
49 cLR: "LR",
50 cLRN: "LRN",
51 cLRND: "LRND",
52}
53
54// String returns a string representation of the chunk type.
55func (c chunkType) String() string {
56 if !(cEOS <= c && c <= cLRND) {
57 return "unknown"
58 }
59 return chunkTypeStrings[c]
60}
61
62// Actual encodings for the chunk types in the value. Note that the high
63// uncompressed size bits are stored in the header byte additionally.
64const (
65 hEOS = 0
66 hUD = 1
67 hU = 2
68 hL = 1 << 7
69 hLR = 1<<7 | 1<<5
70 hLRN = 1<<7 | 1<<6
71 hLRND = 1<<7 | 1<<6 | 1<<5
72)
73
74// errHeaderByte indicates an unsupported value for the chunk header
75// byte. These bytes starts the variable-length chunk header.
76var errHeaderByte = errors.New("lzma: unsupported chunk header byte")
77
78// headerChunkType converts the header byte into a chunk type. It
79// ignores the uncompressed size bits in the chunk header byte.
80func headerChunkType(h byte) (c chunkType, err error) {
81 if h&hL == 0 {
82 // no compression
83 switch h {
84 case hEOS:
85 c = cEOS
86 case hUD:
87 c = cUD
88 case hU:
89 c = cU
90 default:
91 return 0, errHeaderByte
92 }
93 return
94 }
95 switch h & hLRND {
96 case hL:
97 c = cL
98 case hLR:
99 c = cLR
100 case hLRN:
101 c = cLRN
102 case hLRND:
103 c = cLRND
104 default:
105 return 0, errHeaderByte
106 }
107 return
108}
109
110// uncompressedHeaderLen provides the length of an uncompressed header
111const uncompressedHeaderLen = 3
112
113// headerLen returns the length of the LZMA2 header for a given chunk
114// type.
115func headerLen(c chunkType) int {
116 switch c {
117 case cEOS:
118 return 1
119 case cU, cUD:
120 return uncompressedHeaderLen
121 case cL, cLR:
122 return 5
123 case cLRN, cLRND:
124 return 6
125 }
126 panic(fmt.Errorf("unsupported chunk type %d", c))
127}
128
129// chunkHeader represents the contents of a chunk header.
130type chunkHeader struct {
131 ctype chunkType
132 uncompressed uint32
133 compressed uint16
134 props Properties
135}
136
137// String returns a string representation of the chunk header.
138func (h *chunkHeader) String() string {
139 return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed,
140 h.compressed, &h.props)
141}
142
143// UnmarshalBinary reads the content of the chunk header from the data
144// slice. The slice must have the correct length.
145func (h *chunkHeader) UnmarshalBinary(data []byte) error {
146 if len(data) == 0 {
147 return errors.New("no data")
148 }
149 c, err := headerChunkType(data[0])
150 if err != nil {
151 return err
152 }
153
154 n := headerLen(c)
155 if len(data) < n {
156 return errors.New("incomplete data")
157 }
158 if len(data) > n {
159 return errors.New("invalid data length")
160 }
161
162 *h = chunkHeader{ctype: c}
163 if c == cEOS {
164 return nil
165 }
166
167 h.uncompressed = uint32(uint16BE(data[1:3]))
168 if c <= cU {
169 return nil
170 }
171 h.uncompressed |= uint32(data[0]&^hLRND) << 16
172
173 h.compressed = uint16BE(data[3:5])
174 if c <= cLR {
175 return nil
176 }
177
178 h.props, err = PropertiesForCode(data[5])
179 return err
180}
181
182// MarshalBinary encodes the chunk header value. The function checks
183// whether the content of the chunk header is correct.
184func (h *chunkHeader) MarshalBinary() (data []byte, err error) {
185 if h.ctype > cLRND {
186 return nil, errors.New("invalid chunk type")
187 }
188 if err = h.props.verify(); err != nil {
189 return nil, err
190 }
191
192 data = make([]byte, headerLen(h.ctype))
193
194 switch h.ctype {
195 case cEOS:
196 return data, nil
197 case cUD:
198 data[0] = hUD
199 case cU:
200 data[0] = hU
201 case cL:
202 data[0] = hL
203 case cLR:
204 data[0] = hLR
205 case cLRN:
206 data[0] = hLRN
207 case cLRND:
208 data[0] = hLRND
209 }
210
211 putUint16BE(data[1:3], uint16(h.uncompressed))
212 if h.ctype <= cU {
213 return data, nil
214 }
215 data[0] |= byte(h.uncompressed>>16) &^ hLRND
216
217 putUint16BE(data[3:5], h.compressed)
218 if h.ctype <= cLR {
219 return data, nil
220 }
221
222 data[5] = h.props.Code()
223 return data, nil
224}
225
226// readChunkHeader reads the chunk header from the IO reader.
227func readChunkHeader(r io.Reader) (h *chunkHeader, err error) {
228 p := make([]byte, 1, 6)
229 if _, err = io.ReadFull(r, p); err != nil {
230 return
231 }
232 c, err := headerChunkType(p[0])
233 if err != nil {
234 return
235 }
236 p = p[:headerLen(c)]
237 if _, err = io.ReadFull(r, p[1:]); err != nil {
238 return
239 }
240 h = new(chunkHeader)
241 if err = h.UnmarshalBinary(p); err != nil {
242 return nil, err
243 }
244 return h, nil
245}
246
247// uint16BE converts a big-endian uint16 representation to an uint16
248// value.
249func uint16BE(p []byte) uint16 {
250 return uint16(p[0])<<8 | uint16(p[1])
251}
252
253// putUint16BE puts the big-endian uint16 presentation into the given
254// slice.
255func putUint16BE(p []byte, x uint16) {
256 p[0] = byte(x >> 8)
257 p[1] = byte(x)
258}
259
260// chunkState is used to manage the state of the chunks
261type chunkState byte
262
263// start and stop define the initial and terminating state of the chunk
264// state
265const (
266 start chunkState = 'S'
267 stop = 'T'
268)
269
270// errors for the chunk state handling
271var (
272 errChunkType = errors.New("lzma: unexpected chunk type")
273 errState = errors.New("lzma: wrong chunk state")
274)
275
276// next transitions state based on chunk type input
277func (c *chunkState) next(ctype chunkType) error {
278 switch *c {
279 // start state
280 case 'S':
281 switch ctype {
282 case cEOS:
283 *c = 'T'
284 case cUD:
285 *c = 'R'
286 case cLRND:
287 *c = 'L'
288 default:
289 return errChunkType
290 }
291 // normal LZMA mode
292 case 'L':
293 switch ctype {
294 case cEOS:
295 *c = 'T'
296 case cUD:
297 *c = 'R'
298 case cU:
299 *c = 'U'
300 case cL, cLR, cLRN, cLRND:
301 break
302 default:
303 return errChunkType
304 }
305 // reset required
306 case 'R':
307 switch ctype {
308 case cEOS:
309 *c = 'T'
310 case cUD, cU:
311 break
312 case cLRN, cLRND:
313 *c = 'L'
314 default:
315 return errChunkType
316 }
317 // uncompressed
318 case 'U':
319 switch ctype {
320 case cEOS:
321 *c = 'T'
322 case cUD:
323 *c = 'R'
324 case cU:
325 break
326 case cL, cLR, cLRN, cLRND:
327 *c = 'L'
328 default:
329 return errChunkType
330 }
331 // terminal state
332 case 'T':
333 return errChunkType
334 default:
335 return errState
336 }
337 return nil
338}
339
340// defaultChunkType returns the default chunk type for each chunk state.
341func (c chunkState) defaultChunkType() chunkType {
342 switch c {
343 case 'S':
344 return cLRND
345 case 'L', 'U':
346 return cL
347 case 'R':
348 return cLRN
349 default:
350 // no error
351 return cEOS
352 }
353}
354
355// maxDictCap defines the maximum dictionary capacity supported by the
356// LZMA2 dictionary capacity encoding.
357const maxDictCap = 1<<32 - 1
358
359// maxDictCapCode defines the maximum dictionary capacity code.
360const maxDictCapCode = 40
361
362// The function decodes the dictionary capacity byte, but doesn't change
363// for the correct range of the given byte.
364func decodeDictCap(c byte) int64 {
365 return (2 | int64(c)&1) << (11 + (c>>1)&0x1f)
366}
367
368// DecodeDictCap decodes the encoded dictionary capacity. The function
369// returns an error if the code is out of range.
370func DecodeDictCap(c byte) (n int64, err error) {
371 if c >= maxDictCapCode {
372 if c == maxDictCapCode {
373 return maxDictCap, nil
374 }
375 return 0, errors.New("lzma: invalid dictionary size code")
376 }
377 return decodeDictCap(c), nil
378}
379
380// EncodeDictCap encodes a dictionary capacity. The function returns the
381// code for the capacity that is greater or equal n. If n exceeds the
382// maximum support dictionary capacity, the maximum value is returned.
383func EncodeDictCap(n int64) byte {
384 a, b := byte(0), byte(40)
385 for a < b {
386 c := a + (b-a)>>1
387 m := decodeDictCap(c)
388 if n <= m {
389 if n == m {
390 return c
391 }
392 b = c
393 } else {
394 a = c + 1
395 }
396 }
397 return a
398}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
new file mode 100644
index 0000000..e517730
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
@@ -0,0 +1,129 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import "errors"
8
9// maxPosBits defines the number of bits of the position value that are used to
10// to compute the posState value. The value is used to select the tree codec
11// for length encoding and decoding.
12const maxPosBits = 4
13
14// minMatchLen and maxMatchLen give the minimum and maximum values for
15// encoding and decoding length values. minMatchLen is also used as base
16// for the encoded length values.
17const (
18 minMatchLen = 2
19 maxMatchLen = minMatchLen + 16 + 256 - 1
20)
21
22// lengthCodec support the encoding of the length value.
23type lengthCodec struct {
24 choice [2]prob
25 low [1 << maxPosBits]treeCodec
26 mid [1 << maxPosBits]treeCodec
27 high treeCodec
28}
29
30// deepcopy initializes the lc value as deep copy of the source value.
31func (lc *lengthCodec) deepcopy(src *lengthCodec) {
32 if lc == src {
33 return
34 }
35 lc.choice = src.choice
36 for i := range lc.low {
37 lc.low[i].deepcopy(&src.low[i])
38 }
39 for i := range lc.mid {
40 lc.mid[i].deepcopy(&src.mid[i])
41 }
42 lc.high.deepcopy(&src.high)
43}
44
45// init initializes a new length codec.
46func (lc *lengthCodec) init() {
47 for i := range lc.choice {
48 lc.choice[i] = probInit
49 }
50 for i := range lc.low {
51 lc.low[i] = makeTreeCodec(3)
52 }
53 for i := range lc.mid {
54 lc.mid[i] = makeTreeCodec(3)
55 }
56 lc.high = makeTreeCodec(8)
57}
58
59// lBits gives the number of bits used for the encoding of the l value
60// provided to the range encoder.
61func lBits(l uint32) int {
62 switch {
63 case l < 8:
64 return 4
65 case l < 16:
66 return 5
67 default:
68 return 10
69 }
70}
71
72// Encode encodes the length offset. The length offset l can be compute by
73// subtracting minMatchLen (2) from the actual length.
74//
75// l = length - minMatchLen
76//
77func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32,
78) (err error) {
79 if l > maxMatchLen-minMatchLen {
80 return errors.New("lengthCodec.Encode: l out of range")
81 }
82 if l < 8 {
83 if err = lc.choice[0].Encode(e, 0); err != nil {
84 return
85 }
86 return lc.low[posState].Encode(e, l)
87 }
88 if err = lc.choice[0].Encode(e, 1); err != nil {
89 return
90 }
91 if l < 16 {
92 if err = lc.choice[1].Encode(e, 0); err != nil {
93 return
94 }
95 return lc.mid[posState].Encode(e, l-8)
96 }
97 if err = lc.choice[1].Encode(e, 1); err != nil {
98 return
99 }
100 if err = lc.high.Encode(e, l-16); err != nil {
101 return
102 }
103 return nil
104}
105
106// Decode reads the length offset. Add minMatchLen to compute the actual length
107// to the length offset l.
108func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32,
109) (l uint32, err error) {
110 var b uint32
111 if b, err = lc.choice[0].Decode(d); err != nil {
112 return
113 }
114 if b == 0 {
115 l, err = lc.low[posState].Decode(d)
116 return
117 }
118 if b, err = lc.choice[1].Decode(d); err != nil {
119 return
120 }
121 if b == 0 {
122 l, err = lc.mid[posState].Decode(d)
123 l += 8
124 return
125 }
126 l, err = lc.high.Decode(d)
127 l += 16
128 return
129}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
new file mode 100644
index 0000000..c949d6e
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
@@ -0,0 +1,132 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// literalCodec supports the encoding of literal. It provides 768 probability
8// values per literal state. The upper 512 probabilities are used with the
9// context of a match bit.
10type literalCodec struct {
11 probs []prob
12}
13
14// deepcopy initializes literal codec c as a deep copy of the source.
15func (c *literalCodec) deepcopy(src *literalCodec) {
16 if c == src {
17 return
18 }
19 c.probs = make([]prob, len(src.probs))
20 copy(c.probs, src.probs)
21}
22
23// init initializes the literal codec.
24func (c *literalCodec) init(lc, lp int) {
25 switch {
26 case !(minLC <= lc && lc <= maxLC):
27 panic("lc out of range")
28 case !(minLP <= lp && lp <= maxLP):
29 panic("lp out of range")
30 }
31 c.probs = make([]prob, 0x300<<uint(lc+lp))
32 for i := range c.probs {
33 c.probs[i] = probInit
34 }
35}
36
37// Encode encodes the byte s using a range encoder as well as the current LZMA
38// encoder state, a match byte and the literal state.
39func (c *literalCodec) Encode(e *rangeEncoder, s byte,
40 state uint32, match byte, litState uint32,
41) (err error) {
42 k := litState * 0x300
43 probs := c.probs[k : k+0x300]
44 symbol := uint32(1)
45 r := uint32(s)
46 if state >= 7 {
47 m := uint32(match)
48 for {
49 matchBit := (m >> 7) & 1
50 m <<= 1
51 bit := (r >> 7) & 1
52 r <<= 1
53 i := ((1 + matchBit) << 8) | symbol
54 if err = probs[i].Encode(e, bit); err != nil {
55 return
56 }
57 symbol = (symbol << 1) | bit
58 if matchBit != bit {
59 break
60 }
61 if symbol >= 0x100 {
62 break
63 }
64 }
65 }
66 for symbol < 0x100 {
67 bit := (r >> 7) & 1
68 r <<= 1
69 if err = probs[symbol].Encode(e, bit); err != nil {
70 return
71 }
72 symbol = (symbol << 1) | bit
73 }
74 return nil
75}
76
77// Decode decodes a literal byte using the range decoder as well as the LZMA
78// state, a match byte, and the literal state.
79func (c *literalCodec) Decode(d *rangeDecoder,
80 state uint32, match byte, litState uint32,
81) (s byte, err error) {
82 k := litState * 0x300
83 probs := c.probs[k : k+0x300]
84 symbol := uint32(1)
85 if state >= 7 {
86 m := uint32(match)
87 for {
88 matchBit := (m >> 7) & 1
89 m <<= 1
90 i := ((1 + matchBit) << 8) | symbol
91 bit, err := d.DecodeBit(&probs[i])
92 if err != nil {
93 return 0, err
94 }
95 symbol = (symbol << 1) | bit
96 if matchBit != bit {
97 break
98 }
99 if symbol >= 0x100 {
100 break
101 }
102 }
103 }
104 for symbol < 0x100 {
105 bit, err := d.DecodeBit(&probs[symbol])
106 if err != nil {
107 return 0, err
108 }
109 symbol = (symbol << 1) | bit
110 }
111 s = byte(symbol - 0x100)
112 return s, nil
113}
114
115// minLC and maxLC define the range for LC values.
116const (
117 minLC = 0
118 maxLC = 8
119)
120
121// minLC and maxLC define the range for LP values.
122const (
123 minLP = 0
124 maxLP = 4
125)
126
127// minState and maxState define a range for the state values stored in
128// the State values.
129const (
130 minState = 0
131 maxState = 11
132)
diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go
new file mode 100644
index 0000000..4a244eb
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go
@@ -0,0 +1,52 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import "errors"
8
9// MatchAlgorithm identifies an algorithm to find matches in the
10// dictionary.
11type MatchAlgorithm byte
12
13// Supported matcher algorithms.
14const (
15 HashTable4 MatchAlgorithm = iota
16 BinaryTree
17)
18
19// maStrings are used by the String method.
20var maStrings = map[MatchAlgorithm]string{
21 HashTable4: "HashTable4",
22 BinaryTree: "BinaryTree",
23}
24
25// String returns a string representation of the Matcher.
26func (a MatchAlgorithm) String() string {
27 if s, ok := maStrings[a]; ok {
28 return s
29 }
30 return "unknown"
31}
32
33var errUnsupportedMatchAlgorithm = errors.New(
34 "lzma: unsupported match algorithm value")
35
36// verify checks whether the matcher value is supported.
37func (a MatchAlgorithm) verify() error {
38 if _, ok := maStrings[a]; !ok {
39 return errUnsupportedMatchAlgorithm
40 }
41 return nil
42}
43
44func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) {
45 switch a {
46 case HashTable4:
47 return newHashTable(dictCap, 4)
48 case BinaryTree:
49 return newBinTree(dictCap)
50 }
51 return nil, errUnsupportedMatchAlgorithm
52}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go
new file mode 100644
index 0000000..733bb99
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go
@@ -0,0 +1,80 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "unicode"
11)
12
13// operation represents an operation on the dictionary during encoding or
14// decoding.
15type operation interface {
16 Len() int
17}
18
19// rep represents a repetition at the given distance and the given length
20type match struct {
21 // supports all possible distance values, including the eos marker
22 distance int64
23 // length
24 n int
25}
26
27// verify checks whether the match is valid. If that is not the case an
28// error is returned.
29func (m match) verify() error {
30 if !(minDistance <= m.distance && m.distance <= maxDistance) {
31 return errors.New("distance out of range")
32 }
33 if !(1 <= m.n && m.n <= maxMatchLen) {
34 return errors.New("length out of range")
35 }
36 return nil
37}
38
39// l return the l-value for the match, which is the difference of length
40// n and 2.
41func (m match) l() uint32 {
42 return uint32(m.n - minMatchLen)
43}
44
45// dist returns the dist value for the match, which is one less of the
46// distance stored in the match.
47func (m match) dist() uint32 {
48 return uint32(m.distance - minDistance)
49}
50
51// Len returns the number of bytes matched.
52func (m match) Len() int {
53 return m.n
54}
55
56// String returns a string representation for the repetition.
57func (m match) String() string {
58 return fmt.Sprintf("M{%d,%d}", m.distance, m.n)
59}
60
61// lit represents a single byte literal.
62type lit struct {
63 b byte
64}
65
66// Len returns 1 for the single byte literal.
67func (l lit) Len() int {
68 return 1
69}
70
71// String returns a string representation for the literal.
72func (l lit) String() string {
73 var c byte
74 if unicode.IsPrint(rune(l.b)) {
75 c = l.b
76 } else {
77 c = '.'
78 }
79 return fmt.Sprintf("L{%c/%02x}", c, l.b)
80}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go
new file mode 100644
index 0000000..24d50ec
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go
@@ -0,0 +1,53 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// movebits defines the number of bits used for the updates of probability
8// values.
9const movebits = 5
10
11// probbits defines the number of bits of a probability value.
12const probbits = 11
13
14// probInit defines 0.5 as initial value for prob values.
15const probInit prob = 1 << (probbits - 1)
16
17// Type prob represents probabilities. The type can also be used to encode and
18// decode single bits.
19type prob uint16
20
21// Dec decreases the probability. The decrease is proportional to the
22// probability value.
23func (p *prob) dec() {
24 *p -= *p >> movebits
25}
26
27// Inc increases the probability. The Increase is proportional to the
28// difference of 1 and the probability value.
29func (p *prob) inc() {
30 *p += ((1 << probbits) - *p) >> movebits
31}
32
33// Computes the new bound for a given range using the probability value.
34func (p prob) bound(r uint32) uint32 {
35 return (r >> probbits) * uint32(p)
36}
37
38// Bits returns 1. One is the number of bits that can be encoded or decoded
39// with a single prob value.
40func (p prob) Bits() int {
41 return 1
42}
43
44// Encode encodes the least-significant bit of v. Note that the p value will be
45// changed.
46func (p *prob) Encode(e *rangeEncoder, v uint32) error {
47 return e.EncodeBit(v, p)
48}
49
50// Decode decodes a single bit. Note that the p value will change.
51func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) {
52 return d.DecodeBit(p)
53}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go
new file mode 100644
index 0000000..23418e2
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go
@@ -0,0 +1,69 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10)
11
12// maximum and minimum values for the LZMA properties.
13const (
14 minPB = 0
15 maxPB = 4
16)
17
18// maxPropertyCode is the possible maximum of a properties code byte.
19const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1
20
21// Properties contains the parameters LC, LP and PB. The parameter LC
22// defines the number of literal context bits; parameter LP the number
23// of literal position bits and PB the number of position bits.
24type Properties struct {
25 LC int
26 LP int
27 PB int
28}
29
30// String returns the properties in a string representation.
31func (p *Properties) String() string {
32 return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB)
33}
34
35// PropertiesForCode converts a properties code byte into a Properties value.
36func PropertiesForCode(code byte) (p Properties, err error) {
37 if code > maxPropertyCode {
38 return p, errors.New("lzma: invalid properties code")
39 }
40 p.LC = int(code % 9)
41 code /= 9
42 p.LP = int(code % 5)
43 code /= 5
44 p.PB = int(code % 5)
45 return p, err
46}
47
48// verify checks the properties for correctness.
49func (p *Properties) verify() error {
50 if p == nil {
51 return errors.New("lzma: properties are nil")
52 }
53 if !(minLC <= p.LC && p.LC <= maxLC) {
54 return errors.New("lzma: lc out of range")
55 }
56 if !(minLP <= p.LP && p.LP <= maxLP) {
57 return errors.New("lzma: lp out of range")
58 }
59 if !(minPB <= p.PB && p.PB <= maxPB) {
60 return errors.New("lzma: pb out of range")
61 }
62 return nil
63}
64
65// Code converts the properties to a byte. The function assumes that
66// the properties components are all in range.
67func (p Properties) Code() byte {
68 return byte((p.PB*5+p.LP)*9 + p.LC)
69}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
new file mode 100644
index 0000000..6361c5e
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
@@ -0,0 +1,248 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10)
11
12// rangeEncoder implements range encoding of single bits. The low value can
13// overflow therefore we need uint64. The cache value is used to handle
14// overflows.
15type rangeEncoder struct {
16 lbw *LimitedByteWriter
17 nrange uint32
18 low uint64
19 cacheLen int64
20 cache byte
21}
22
23// maxInt64 provides the maximal value of the int64 type
24const maxInt64 = 1<<63 - 1
25
26// newRangeEncoder creates a new range encoder.
27func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) {
28 lbw, ok := bw.(*LimitedByteWriter)
29 if !ok {
30 lbw = &LimitedByteWriter{BW: bw, N: maxInt64}
31 }
32 return &rangeEncoder{
33 lbw: lbw,
34 nrange: 0xffffffff,
35 cacheLen: 1}, nil
36}
37
38// Available returns the number of bytes that still can be written. The
39// method takes the bytes that will be currently written by Close into
40// account.
41func (e *rangeEncoder) Available() int64 {
42 return e.lbw.N - (e.cacheLen + 4)
43}
44
45// writeByte writes a single byte to the underlying writer. An error is
46// returned if the limit is reached. The written byte will be counted if
47// the underlying writer doesn't return an error.
48func (e *rangeEncoder) writeByte(c byte) error {
49 if e.Available() < 1 {
50 return ErrLimit
51 }
52 return e.lbw.WriteByte(c)
53}
54
55// DirectEncodeBit encodes the least-significant bit of b with probability 1/2.
56func (e *rangeEncoder) DirectEncodeBit(b uint32) error {
57 e.nrange >>= 1
58 e.low += uint64(e.nrange) & (0 - (uint64(b) & 1))
59
60 // normalize
61 const top = 1 << 24
62 if e.nrange >= top {
63 return nil
64 }
65 e.nrange <<= 8
66 return e.shiftLow()
67}
68
69// EncodeBit encodes the least significant bit of b. The p value will be
70// updated by the function depending on the bit encoded.
71func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error {
72 bound := p.bound(e.nrange)
73 if b&1 == 0 {
74 e.nrange = bound
75 p.inc()
76 } else {
77 e.low += uint64(bound)
78 e.nrange -= bound
79 p.dec()
80 }
81
82 // normalize
83 const top = 1 << 24
84 if e.nrange >= top {
85 return nil
86 }
87 e.nrange <<= 8
88 return e.shiftLow()
89}
90
91// Close writes a complete copy of the low value.
92func (e *rangeEncoder) Close() error {
93 for i := 0; i < 5; i++ {
94 if err := e.shiftLow(); err != nil {
95 return err
96 }
97 }
98 return nil
99}
100
101// shiftLow shifts the low value for 8 bit. The shifted byte is written into
102// the byte writer. The cache value is used to handle overflows.
103func (e *rangeEncoder) shiftLow() error {
104 if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 {
105 tmp := e.cache
106 for {
107 err := e.writeByte(tmp + byte(e.low>>32))
108 if err != nil {
109 return err
110 }
111 tmp = 0xff
112 e.cacheLen--
113 if e.cacheLen <= 0 {
114 if e.cacheLen < 0 {
115 panic("negative cacheLen")
116 }
117 break
118 }
119 }
120 e.cache = byte(uint32(e.low) >> 24)
121 }
122 e.cacheLen++
123 e.low = uint64(uint32(e.low) << 8)
124 return nil
125}
126
127// rangeDecoder decodes single bits of the range encoding stream.
128type rangeDecoder struct {
129 br io.ByteReader
130 nrange uint32
131 code uint32
132}
133
134// init initializes the range decoder, by reading from the byte reader.
135func (d *rangeDecoder) init() error {
136 d.nrange = 0xffffffff
137 d.code = 0
138
139 b, err := d.br.ReadByte()
140 if err != nil {
141 return err
142 }
143 if b != 0 {
144 return errors.New("newRangeDecoder: first byte not zero")
145 }
146
147 for i := 0; i < 4; i++ {
148 if err = d.updateCode(); err != nil {
149 return err
150 }
151 }
152
153 if d.code >= d.nrange {
154 return errors.New("newRangeDecoder: d.code >= d.nrange")
155 }
156
157 return nil
158}
159
160// newRangeDecoder initializes a range decoder. It reads five bytes from the
161// reader and therefore may return an error.
162func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) {
163 d = &rangeDecoder{br: br, nrange: 0xffffffff}
164
165 b, err := d.br.ReadByte()
166 if err != nil {
167 return nil, err
168 }
169 if b != 0 {
170 return nil, errors.New("newRangeDecoder: first byte not zero")
171 }
172
173 for i := 0; i < 4; i++ {
174 if err = d.updateCode(); err != nil {
175 return nil, err
176 }
177 }
178
179 if d.code >= d.nrange {
180 return nil, errors.New("newRangeDecoder: d.code >= d.nrange")
181 }
182
183 return d, nil
184}
185
186// possiblyAtEnd checks whether the decoder may be at the end of the stream.
187func (d *rangeDecoder) possiblyAtEnd() bool {
188 return d.code == 0
189}
190
191// DirectDecodeBit decodes a bit with probability 1/2. The return value b will
192// contain the bit at the least-significant position. All other bits will be
193// zero.
194func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) {
195 d.nrange >>= 1
196 d.code -= d.nrange
197 t := 0 - (d.code >> 31)
198 d.code += d.nrange & t
199 b = (t + 1) & 1
200
201 // d.code will stay less then d.nrange
202
203 // normalize
204 // assume d.code < d.nrange
205 const top = 1 << 24
206 if d.nrange >= top {
207 return b, nil
208 }
209 d.nrange <<= 8
210 // d.code < d.nrange will be maintained
211 return b, d.updateCode()
212}
213
214// decodeBit decodes a single bit. The bit will be returned at the
215// least-significant position. All other bits will be zero. The probability
216// value will be updated.
217func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) {
218 bound := p.bound(d.nrange)
219 if d.code < bound {
220 d.nrange = bound
221 p.inc()
222 b = 0
223 } else {
224 d.code -= bound
225 d.nrange -= bound
226 p.dec()
227 b = 1
228 }
229 // normalize
230 // assume d.code < d.nrange
231 const top = 1 << 24
232 if d.nrange >= top {
233 return b, nil
234 }
235 d.nrange <<= 8
236 // d.code < d.nrange will be maintained
237 return b, d.updateCode()
238}
239
240// updateCode reads a new byte into the code.
241func (d *rangeDecoder) updateCode() error {
242 b, err := d.br.ReadByte()
243 if err != nil {
244 return err
245 }
246 d.code = (d.code << 8) | uint32(b)
247 return nil
248}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go
new file mode 100644
index 0000000..2ef3dca
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go
@@ -0,0 +1,100 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package lzma supports the decoding and encoding of LZMA streams.
6// Reader and Writer support the classic LZMA format. Reader2 and
7// Writer2 support the decoding and encoding of LZMA2 streams.
8//
9// The package is written completely in Go and doesn't rely on any external
10// library.
11package lzma
12
13import (
14 "errors"
15 "io"
16)
17
18// ReaderConfig stores the parameters for the reader of the classic LZMA
19// format.
20type ReaderConfig struct {
21 DictCap int
22}
23
24// fill converts the zero values of the configuration to the default values.
25func (c *ReaderConfig) fill() {
26 if c.DictCap == 0 {
27 c.DictCap = 8 * 1024 * 1024
28 }
29}
30
31// Verify checks the reader configuration for errors. Zero values will
32// be replaced by default values.
33func (c *ReaderConfig) Verify() error {
34 c.fill()
35 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
36 return errors.New("lzma: dictionary capacity is out of range")
37 }
38 return nil
39}
40
41// Reader provides a reader for LZMA files or streams.
42type Reader struct {
43 lzma io.Reader
44 h header
45 d *decoder
46}
47
48// NewReader creates a new reader for an LZMA stream using the classic
49// format. NewReader reads and checks the header of the LZMA stream.
50func NewReader(lzma io.Reader) (r *Reader, err error) {
51 return ReaderConfig{}.NewReader(lzma)
52}
53
54// NewReader creates a new reader for an LZMA stream in the classic
55// format. The function reads and verifies the the header of the LZMA
56// stream.
57func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) {
58 if err = c.Verify(); err != nil {
59 return nil, err
60 }
61 data := make([]byte, HeaderLen)
62 if _, err := io.ReadFull(lzma, data); err != nil {
63 if err == io.EOF {
64 return nil, errors.New("lzma: unexpected EOF")
65 }
66 return nil, err
67 }
68 r = &Reader{lzma: lzma}
69 if err = r.h.unmarshalBinary(data); err != nil {
70 return nil, err
71 }
72 if r.h.dictCap < MinDictCap {
73 return nil, errors.New("lzma: dictionary capacity too small")
74 }
75 dictCap := r.h.dictCap
76 if c.DictCap > dictCap {
77 dictCap = c.DictCap
78 }
79
80 state := newState(r.h.properties)
81 dict, err := newDecoderDict(dictCap)
82 if err != nil {
83 return nil, err
84 }
85 r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size)
86 if err != nil {
87 return nil, err
88 }
89 return r, nil
90}
91
92// EOSMarker indicates that an EOS marker has been encountered.
93func (r *Reader) EOSMarker() bool {
94 return r.d.eosMarker
95}
96
97// Read returns uncompressed data.
98func (r *Reader) Read(p []byte) (n int, err error) {
99 return r.d.Read(p)
100}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go
new file mode 100644
index 0000000..a55cfaa
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go
@@ -0,0 +1,232 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10
11 "github.com/ulikunitz/xz/internal/xlog"
12)
13
14// Reader2Config stores the parameters for the LZMA2 reader.
15// format.
16type Reader2Config struct {
17 DictCap int
18}
19
20// fill converts the zero values of the configuration to the default values.
21func (c *Reader2Config) fill() {
22 if c.DictCap == 0 {
23 c.DictCap = 8 * 1024 * 1024
24 }
25}
26
27// Verify checks the reader configuration for errors. Zero configuration values
28// will be replaced by default values.
29func (c *Reader2Config) Verify() error {
30 c.fill()
31 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
32 return errors.New("lzma: dictionary capacity is out of range")
33 }
34 return nil
35}
36
37// Reader2 supports the reading of LZMA2 chunk sequences. Note that the
38// first chunk should have a dictionary reset and the first compressed
39// chunk a properties reset. The chunk sequence may not be terminated by
40// an end-of-stream chunk.
41type Reader2 struct {
42 r io.Reader
43 err error
44
45 dict *decoderDict
46 ur *uncompressedReader
47 decoder *decoder
48 chunkReader io.Reader
49
50 cstate chunkState
51 ctype chunkType
52}
53
54// NewReader2 creates a reader for an LZMA2 chunk sequence.
55func NewReader2(lzma2 io.Reader) (r *Reader2, err error) {
56 return Reader2Config{}.NewReader2(lzma2)
57}
58
59// NewReader2 creates an LZMA2 reader using the given configuration.
60func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) {
61 if err = c.Verify(); err != nil {
62 return nil, err
63 }
64 r = &Reader2{r: lzma2, cstate: start}
65 r.dict, err = newDecoderDict(c.DictCap)
66 if err != nil {
67 return nil, err
68 }
69 if err = r.startChunk(); err != nil {
70 r.err = err
71 }
72 return r, nil
73}
74
75// uncompressed tests whether the chunk type specifies an uncompressed
76// chunk.
77func uncompressed(ctype chunkType) bool {
78 return ctype == cU || ctype == cUD
79}
80
81// startChunk parses a new chunk.
82func (r *Reader2) startChunk() error {
83 r.chunkReader = nil
84 header, err := readChunkHeader(r.r)
85 if err != nil {
86 if err == io.EOF {
87 err = io.ErrUnexpectedEOF
88 }
89 return err
90 }
91 xlog.Debugf("chunk header %v", header)
92 if err = r.cstate.next(header.ctype); err != nil {
93 return err
94 }
95 if r.cstate == stop {
96 return io.EOF
97 }
98 if header.ctype == cUD || header.ctype == cLRND {
99 r.dict.Reset()
100 }
101 size := int64(header.uncompressed) + 1
102 if uncompressed(header.ctype) {
103 if r.ur != nil {
104 r.ur.Reopen(r.r, size)
105 } else {
106 r.ur = newUncompressedReader(r.r, r.dict, size)
107 }
108 r.chunkReader = r.ur
109 return nil
110 }
111 br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1))
112 if r.decoder == nil {
113 state := newState(header.props)
114 r.decoder, err = newDecoder(br, state, r.dict, size)
115 if err != nil {
116 return err
117 }
118 r.chunkReader = r.decoder
119 return nil
120 }
121 switch header.ctype {
122 case cLR:
123 r.decoder.State.Reset()
124 case cLRN, cLRND:
125 r.decoder.State = newState(header.props)
126 }
127 err = r.decoder.Reopen(br, size)
128 if err != nil {
129 return err
130 }
131 r.chunkReader = r.decoder
132 return nil
133}
134
135// Read reads data from the LZMA2 chunk sequence.
136func (r *Reader2) Read(p []byte) (n int, err error) {
137 if r.err != nil {
138 return 0, r.err
139 }
140 for n < len(p) {
141 var k int
142 k, err = r.chunkReader.Read(p[n:])
143 n += k
144 if err != nil {
145 if err == io.EOF {
146 err = r.startChunk()
147 if err == nil {
148 continue
149 }
150 }
151 r.err = err
152 return n, err
153 }
154 if k == 0 {
155 r.err = errors.New("lzma: Reader2 doesn't get data")
156 return n, r.err
157 }
158 }
159 return n, nil
160}
161
162// EOS returns whether the LZMA2 stream has been terminated by an
163// end-of-stream chunk.
164func (r *Reader2) EOS() bool {
165 return r.cstate == stop
166}
167
168// uncompressedReader is used to read uncompressed chunks.
169type uncompressedReader struct {
170 lr io.LimitedReader
171 Dict *decoderDict
172 eof bool
173 err error
174}
175
176// newUncompressedReader initializes a new uncompressedReader.
177func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader {
178 ur := &uncompressedReader{
179 lr: io.LimitedReader{R: r, N: size},
180 Dict: dict,
181 }
182 return ur
183}
184
185// Reopen reinitializes an uncompressed reader.
186func (ur *uncompressedReader) Reopen(r io.Reader, size int64) {
187 ur.err = nil
188 ur.eof = false
189 ur.lr = io.LimitedReader{R: r, N: size}
190}
191
192// fill reads uncompressed data into the dictionary.
193func (ur *uncompressedReader) fill() error {
194 if !ur.eof {
195 n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available()))
196 if err != io.EOF {
197 return err
198 }
199 ur.eof = true
200 if n > 0 {
201 return nil
202 }
203 }
204 if ur.lr.N != 0 {
205 return io.ErrUnexpectedEOF
206 }
207 return io.EOF
208}
209
210// Read reads uncompressed data from the limited reader.
211func (ur *uncompressedReader) Read(p []byte) (n int, err error) {
212 if ur.err != nil {
213 return 0, ur.err
214 }
215 for {
216 var k int
217 k, err = ur.Dict.Read(p[n:])
218 n += k
219 if n >= len(p) {
220 return n, nil
221 }
222 if err != nil {
223 break
224 }
225 err = ur.fill()
226 if err != nil {
227 break
228 }
229 }
230 ur.err = err
231 return n, err
232}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go
new file mode 100644
index 0000000..5023510
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/state.go
@@ -0,0 +1,151 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// states defines the overall state count
8const states = 12
9
10// State maintains the full state of the operation encoding or decoding
11// process.
12type state struct {
13 rep [4]uint32
14 isMatch [states << maxPosBits]prob
15 isRepG0Long [states << maxPosBits]prob
16 isRep [states]prob
17 isRepG0 [states]prob
18 isRepG1 [states]prob
19 isRepG2 [states]prob
20 litCodec literalCodec
21 lenCodec lengthCodec
22 repLenCodec lengthCodec
23 distCodec distCodec
24 state uint32
25 posBitMask uint32
26 Properties Properties
27}
28
29// initProbSlice initializes a slice of probabilities.
30func initProbSlice(p []prob) {
31 for i := range p {
32 p[i] = probInit
33 }
34}
35
36// Reset sets all state information to the original values.
37func (s *state) Reset() {
38 p := s.Properties
39 *s = state{
40 Properties: p,
41 // dict: s.dict,
42 posBitMask: (uint32(1) << uint(p.PB)) - 1,
43 }
44 initProbSlice(s.isMatch[:])
45 initProbSlice(s.isRep[:])
46 initProbSlice(s.isRepG0[:])
47 initProbSlice(s.isRepG1[:])
48 initProbSlice(s.isRepG2[:])
49 initProbSlice(s.isRepG0Long[:])
50 s.litCodec.init(p.LC, p.LP)
51 s.lenCodec.init()
52 s.repLenCodec.init()
53 s.distCodec.init()
54}
55
56// initState initializes the state.
57func initState(s *state, p Properties) {
58 *s = state{Properties: p}
59 s.Reset()
60}
61
62// newState creates a new state from the give Properties.
63func newState(p Properties) *state {
64 s := &state{Properties: p}
65 s.Reset()
66 return s
67}
68
69// deepcopy initializes s as a deep copy of the source.
70func (s *state) deepcopy(src *state) {
71 if s == src {
72 return
73 }
74 s.rep = src.rep
75 s.isMatch = src.isMatch
76 s.isRepG0Long = src.isRepG0Long
77 s.isRep = src.isRep
78 s.isRepG0 = src.isRepG0
79 s.isRepG1 = src.isRepG1
80 s.isRepG2 = src.isRepG2
81 s.litCodec.deepcopy(&src.litCodec)
82 s.lenCodec.deepcopy(&src.lenCodec)
83 s.repLenCodec.deepcopy(&src.repLenCodec)
84 s.distCodec.deepcopy(&src.distCodec)
85 s.state = src.state
86 s.posBitMask = src.posBitMask
87 s.Properties = src.Properties
88}
89
90// cloneState creates a new clone of the give state.
91func cloneState(src *state) *state {
92 s := new(state)
93 s.deepcopy(src)
94 return s
95}
96
97// updateStateLiteral updates the state for a literal.
98func (s *state) updateStateLiteral() {
99 switch {
100 case s.state < 4:
101 s.state = 0
102 return
103 case s.state < 10:
104 s.state -= 3
105 return
106 }
107 s.state -= 6
108}
109
110// updateStateMatch updates the state for a match.
111func (s *state) updateStateMatch() {
112 if s.state < 7 {
113 s.state = 7
114 } else {
115 s.state = 10
116 }
117}
118
119// updateStateRep updates the state for a repetition.
120func (s *state) updateStateRep() {
121 if s.state < 7 {
122 s.state = 8
123 } else {
124 s.state = 11
125 }
126}
127
128// updateStateShortRep updates the state for a short repetition.
129func (s *state) updateStateShortRep() {
130 if s.state < 7 {
131 s.state = 9
132 } else {
133 s.state = 11
134 }
135}
136
137// states computes the states of the operation codec.
138func (s *state) states(dictHead int64) (state1, state2, posState uint32) {
139 state1 = s.state
140 posState = uint32(dictHead) & s.posBitMask
141 state2 = (s.state << maxPosBits) | posState
142 return
143}
144
145// litState computes the literal state.
146func (s *state) litState(prev byte, dictHead int64) uint32 {
147 lp, lc := uint(s.Properties.LP), uint(s.Properties.LC)
148 litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) |
149 (uint32(prev) >> (8 - lc))
150 return litState
151}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go
new file mode 100644
index 0000000..504b3d7
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go
@@ -0,0 +1,133 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// treeCodec encodes or decodes values with a fixed bit size. It is using a
8// tree of probability value. The root of the tree is the most-significant bit.
9type treeCodec struct {
10 probTree
11}
12
13// makeTreeCodec makes a tree codec. The bits value must be inside the range
14// [1,32].
15func makeTreeCodec(bits int) treeCodec {
16 return treeCodec{makeProbTree(bits)}
17}
18
19// deepcopy initializes tc as a deep copy of the source.
20func (tc *treeCodec) deepcopy(src *treeCodec) {
21 tc.probTree.deepcopy(&src.probTree)
22}
23
24// Encode uses the range encoder to encode a fixed-bit-size value.
25func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) {
26 m := uint32(1)
27 for i := int(tc.bits) - 1; i >= 0; i-- {
28 b := (v >> uint(i)) & 1
29 if err := e.EncodeBit(b, &tc.probs[m]); err != nil {
30 return err
31 }
32 m = (m << 1) | b
33 }
34 return nil
35}
36
37// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may
38// be caused by the range decoder.
39func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) {
40 m := uint32(1)
41 for j := 0; j < int(tc.bits); j++ {
42 b, err := d.DecodeBit(&tc.probs[m])
43 if err != nil {
44 return 0, err
45 }
46 m = (m << 1) | b
47 }
48 return m - (1 << uint(tc.bits)), nil
49}
50
51// treeReverseCodec is another tree codec, where the least-significant bit is
52// the start of the probability tree.
53type treeReverseCodec struct {
54 probTree
55}
56
57// deepcopy initializes the treeReverseCodec as a deep copy of the
58// source.
59func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) {
60 tc.probTree.deepcopy(&src.probTree)
61}
62
63// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must
64// be in the range [1,32].
65func makeTreeReverseCodec(bits int) treeReverseCodec {
66 return treeReverseCodec{makeProbTree(bits)}
67}
68
69// Encode uses range encoder to encode a fixed-bit-size value. The range
70// encoder may cause errors.
71func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) {
72 m := uint32(1)
73 for i := uint(0); i < uint(tc.bits); i++ {
74 b := (v >> i) & 1
75 if err := e.EncodeBit(b, &tc.probs[m]); err != nil {
76 return err
77 }
78 m = (m << 1) | b
79 }
80 return nil
81}
82
83// Decodes uses the range decoder to decode a fixed-bit-size value. Errors
84// returned by the range decoder will be returned.
85func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) {
86 m := uint32(1)
87 for j := uint(0); j < uint(tc.bits); j++ {
88 b, err := d.DecodeBit(&tc.probs[m])
89 if err != nil {
90 return 0, err
91 }
92 m = (m << 1) | b
93 v |= b << j
94 }
95 return v, nil
96}
97
98// probTree stores enough probability values to be used by the treeEncode and
99// treeDecode methods of the range coder types.
100type probTree struct {
101 probs []prob
102 bits byte
103}
104
105// deepcopy initializes the probTree value as a deep copy of the source.
106func (t *probTree) deepcopy(src *probTree) {
107 if t == src {
108 return
109 }
110 t.probs = make([]prob, len(src.probs))
111 copy(t.probs, src.probs)
112 t.bits = src.bits
113}
114
115// makeProbTree initializes a probTree structure.
116func makeProbTree(bits int) probTree {
117 if !(1 <= bits && bits <= 32) {
118 panic("bits outside of range [1,32]")
119 }
120 t := probTree{
121 bits: byte(bits),
122 probs: make([]prob, 1<<uint(bits)),
123 }
124 for i := range t.probs {
125 t.probs[i] = probInit
126 }
127 return t
128}
129
130// Bits provides the number of bits for the values to de- or encode.
131func (t *probTree) Bits() int {
132 return int(t.bits)
133}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer.go b/vendor/github.com/ulikunitz/xz/lzma/writer.go
new file mode 100644
index 0000000..efe34fb
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/writer.go
@@ -0,0 +1,209 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "bufio"
9 "errors"
10 "io"
11)
12
13// MinDictCap and MaxDictCap provide the range of supported dictionary
14// capacities.
15const (
16 MinDictCap = 1 << 12
17 MaxDictCap = 1<<32 - 1
18)
19
20// WriterConfig defines the configuration parameter for a writer.
21type WriterConfig struct {
22 // Properties for the encoding. If the it is nil the value
23 // {LC: 3, LP: 0, PB: 2} will be chosen.
24 Properties *Properties
25 // The capacity of the dictionary. If DictCap is zero, the value
26 // 8 MiB will be chosen.
27 DictCap int
28 // Size of the lookahead buffer; value 0 indicates default size
29 // 4096
30 BufSize int
31 // Match algorithm
32 Matcher MatchAlgorithm
33 // SizeInHeader indicates that the header will contain an
34 // explicit size.
35 SizeInHeader bool
36 // Size of the data to be encoded. A positive value will imply
37 // than an explicit size will be set in the header.
38 Size int64
39 // EOSMarker requests whether the EOSMarker needs to be written.
40 // If no explicit size is been given the EOSMarker will be
41 // set automatically.
42 EOSMarker bool
43}
44
45// fill converts zero-value fields to their explicit default values.
46func (c *WriterConfig) fill() {
47 if c.Properties == nil {
48 c.Properties = &Properties{LC: 3, LP: 0, PB: 2}
49 }
50 if c.DictCap == 0 {
51 c.DictCap = 8 * 1024 * 1024
52 }
53 if c.BufSize == 0 {
54 c.BufSize = 4096
55 }
56 if c.Size > 0 {
57 c.SizeInHeader = true
58 }
59 if !c.SizeInHeader {
60 c.EOSMarker = true
61 }
62}
63
64// Verify checks WriterConfig for errors. Verify will replace zero
65// values with default values.
66func (c *WriterConfig) Verify() error {
67 c.fill()
68 var err error
69 if c == nil {
70 return errors.New("lzma: WriterConfig is nil")
71 }
72 if c.Properties == nil {
73 return errors.New("lzma: WriterConfig has no Properties set")
74 }
75 if err = c.Properties.verify(); err != nil {
76 return err
77 }
78 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
79 return errors.New("lzma: dictionary capacity is out of range")
80 }
81 if !(maxMatchLen <= c.BufSize) {
82 return errors.New("lzma: lookahead buffer size too small")
83 }
84 if c.SizeInHeader {
85 if c.Size < 0 {
86 return errors.New("lzma: negative size not supported")
87 }
88 } else if !c.EOSMarker {
89 return errors.New("lzma: EOS marker is required")
90 }
91 if err = c.Matcher.verify(); err != nil {
92 return err
93 }
94
95 return nil
96}
97
98// header returns the header structure for this configuration.
99func (c *WriterConfig) header() header {
100 h := header{
101 properties: *c.Properties,
102 dictCap: c.DictCap,
103 size: -1,
104 }
105 if c.SizeInHeader {
106 h.size = c.Size
107 }
108 return h
109}
110
111// Writer writes an LZMA stream in the classic format.
112type Writer struct {
113 h header
114 bw io.ByteWriter
115 buf *bufio.Writer
116 e *encoder
117}
118
119// NewWriter creates a new LZMA writer for the classic format. The
120// method will write the header to the underlying stream.
121func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) {
122 if err = c.Verify(); err != nil {
123 return nil, err
124 }
125 w = &Writer{h: c.header()}
126
127 var ok bool
128 w.bw, ok = lzma.(io.ByteWriter)
129 if !ok {
130 w.buf = bufio.NewWriter(lzma)
131 w.bw = w.buf
132 }
133 state := newState(w.h.properties)
134 m, err := c.Matcher.new(w.h.dictCap)
135 if err != nil {
136 return nil, err
137 }
138 dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m)
139 if err != nil {
140 return nil, err
141 }
142 var flags encoderFlags
143 if c.EOSMarker {
144 flags = eosMarker
145 }
146 if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil {
147 return nil, err
148 }
149
150 if err = w.writeHeader(); err != nil {
151 return nil, err
152 }
153 return w, nil
154}
155
156// NewWriter creates a new LZMA writer using the classic format. The
157// function writes the header to the underlying stream.
158func NewWriter(lzma io.Writer) (w *Writer, err error) {
159 return WriterConfig{}.NewWriter(lzma)
160}
161
162// writeHeader writes the LZMA header into the stream.
163func (w *Writer) writeHeader() error {
164 data, err := w.h.marshalBinary()
165 if err != nil {
166 return err
167 }
168 _, err = w.bw.(io.Writer).Write(data)
169 return err
170}
171
172// Write puts data into the Writer.
173func (w *Writer) Write(p []byte) (n int, err error) {
174 if w.h.size >= 0 {
175 m := w.h.size
176 m -= w.e.Compressed() + int64(w.e.dict.Buffered())
177 if m < 0 {
178 m = 0
179 }
180 if m < int64(len(p)) {
181 p = p[:m]
182 err = ErrNoSpace
183 }
184 }
185 var werr error
186 if n, werr = w.e.Write(p); werr != nil {
187 err = werr
188 }
189 return n, err
190}
191
192// Close closes the writer stream. It ensures that all data from the
193// buffer will be compressed and the LZMA stream will be finished.
194func (w *Writer) Close() error {
195 if w.h.size >= 0 {
196 n := w.e.Compressed() + int64(w.e.dict.Buffered())
197 if n != w.h.size {
198 return errSize
199 }
200 }
201 err := w.e.Close()
202 if w.buf != nil {
203 ferr := w.buf.Flush()
204 if err == nil {
205 err = ferr
206 }
207 }
208 return err
209}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go
new file mode 100644
index 0000000..7c1afe1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go
@@ -0,0 +1,305 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "bytes"
9 "errors"
10 "io"
11)
12
13// Writer2Config is used to create a Writer2 using parameters.
14type Writer2Config struct {
15 // The properties for the encoding. If the it is nil the value
16 // {LC: 3, LP: 0, PB: 2} will be chosen.
17 Properties *Properties
18 // The capacity of the dictionary. If DictCap is zero, the value
19 // 8 MiB will be chosen.
20 DictCap int
21 // Size of the lookahead buffer; value 0 indicates default size
22 // 4096
23 BufSize int
24 // Match algorithm
25 Matcher MatchAlgorithm
26}
27
28// fill replaces zero values with default values.
29func (c *Writer2Config) fill() {
30 if c.Properties == nil {
31 c.Properties = &Properties{LC: 3, LP: 0, PB: 2}
32 }
33 if c.DictCap == 0 {
34 c.DictCap = 8 * 1024 * 1024
35 }
36 if c.BufSize == 0 {
37 c.BufSize = 4096
38 }
39}
40
41// Verify checks the Writer2Config for correctness. Zero values will be
42// replaced by default values.
43func (c *Writer2Config) Verify() error {
44 c.fill()
45 var err error
46 if c == nil {
47 return errors.New("lzma: WriterConfig is nil")
48 }
49 if c.Properties == nil {
50 return errors.New("lzma: WriterConfig has no Properties set")
51 }
52 if err = c.Properties.verify(); err != nil {
53 return err
54 }
55 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
56 return errors.New("lzma: dictionary capacity is out of range")
57 }
58 if !(maxMatchLen <= c.BufSize) {
59 return errors.New("lzma: lookahead buffer size too small")
60 }
61 if c.Properties.LC+c.Properties.LP > 4 {
62 return errors.New("lzma: sum of lc and lp exceeds 4")
63 }
64 if err = c.Matcher.verify(); err != nil {
65 return err
66 }
67 return nil
68}
69
70// Writer2 supports the creation of an LZMA2 stream. But note that
71// written data is buffered, so call Flush or Close to write data to the
72// underlying writer. The Close method writes the end-of-stream marker
73// to the stream. So you may be able to concatenate the output of two
74// writers as long the output of the first writer has only been flushed
75// but not closed.
76//
77// Any change to the fields Properties, DictCap must be done before the
78// first call to Write, Flush or Close.
79type Writer2 struct {
80 w io.Writer
81
82 start *state
83 encoder *encoder
84
85 cstate chunkState
86 ctype chunkType
87
88 buf bytes.Buffer
89 lbw LimitedByteWriter
90}
91
92// NewWriter2 creates an LZMA2 chunk sequence writer with the default
93// parameters and options.
94func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) {
95 return Writer2Config{}.NewWriter2(lzma2)
96}
97
98// NewWriter2 creates a new LZMA2 writer using the given configuration.
99func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) {
100 if err = c.Verify(); err != nil {
101 return nil, err
102 }
103 w = &Writer2{
104 w: lzma2,
105 start: newState(*c.Properties),
106 cstate: start,
107 ctype: start.defaultChunkType(),
108 }
109 w.buf.Grow(maxCompressed)
110 w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed}
111 m, err := c.Matcher.new(c.DictCap)
112 if err != nil {
113 return nil, err
114 }
115 d, err := newEncoderDict(c.DictCap, c.BufSize, m)
116 if err != nil {
117 return nil, err
118 }
119 w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0)
120 if err != nil {
121 return nil, err
122 }
123 return w, nil
124}
125
126// written returns the number of bytes written to the current chunk
127func (w *Writer2) written() int {
128 if w.encoder == nil {
129 return 0
130 }
131 return int(w.encoder.Compressed()) + w.encoder.dict.Buffered()
132}
133
134// errClosed indicates that the writer is closed.
135var errClosed = errors.New("lzma: writer closed")
136
137// Writes data to LZMA2 stream. Note that written data will be buffered.
138// Use Flush or Close to ensure that data is written to the underlying
139// writer.
140func (w *Writer2) Write(p []byte) (n int, err error) {
141 if w.cstate == stop {
142 return 0, errClosed
143 }
144 for n < len(p) {
145 m := maxUncompressed - w.written()
146 if m <= 0 {
147 panic("lzma: maxUncompressed reached")
148 }
149 var q []byte
150 if n+m < len(p) {
151 q = p[n : n+m]
152 } else {
153 q = p[n:]
154 }
155 k, err := w.encoder.Write(q)
156 n += k
157 if err != nil && err != ErrLimit {
158 return n, err
159 }
160 if err == ErrLimit || k == m {
161 if err = w.flushChunk(); err != nil {
162 return n, err
163 }
164 }
165 }
166 return n, nil
167}
168
169// writeUncompressedChunk writes an uncompressed chunk to the LZMA2
170// stream.
171func (w *Writer2) writeUncompressedChunk() error {
172 u := w.encoder.Compressed()
173 if u <= 0 {
174 return errors.New("lzma: can't write empty uncompressed chunk")
175 }
176 if u > maxUncompressed {
177 panic("overrun of uncompressed data limit")
178 }
179 switch w.ctype {
180 case cLRND:
181 w.ctype = cUD
182 default:
183 w.ctype = cU
184 }
185 w.encoder.state = w.start
186
187 header := chunkHeader{
188 ctype: w.ctype,
189 uncompressed: uint32(u - 1),
190 }
191 hdata, err := header.MarshalBinary()
192 if err != nil {
193 return err
194 }
195 if _, err = w.w.Write(hdata); err != nil {
196 return err
197 }
198 _, err = w.encoder.dict.CopyN(w.w, int(u))
199 return err
200}
201
202// writeCompressedChunk writes a compressed chunk to the underlying
203// writer.
204func (w *Writer2) writeCompressedChunk() error {
205 if w.ctype == cU || w.ctype == cUD {
206 panic("chunk type uncompressed")
207 }
208
209 u := w.encoder.Compressed()
210 if u <= 0 {
211 return errors.New("writeCompressedChunk: empty chunk")
212 }
213 if u > maxUncompressed {
214 panic("overrun of uncompressed data limit")
215 }
216 c := w.buf.Len()
217 if c <= 0 {
218 panic("no compressed data")
219 }
220 if c > maxCompressed {
221 panic("overrun of compressed data limit")
222 }
223 header := chunkHeader{
224 ctype: w.ctype,
225 uncompressed: uint32(u - 1),
226 compressed: uint16(c - 1),
227 props: w.encoder.state.Properties,
228 }
229 hdata, err := header.MarshalBinary()
230 if err != nil {
231 return err
232 }
233 if _, err = w.w.Write(hdata); err != nil {
234 return err
235 }
236 _, err = io.Copy(w.w, &w.buf)
237 return err
238}
239
240// writes a single chunk to the underlying writer.
241func (w *Writer2) writeChunk() error {
242 u := int(uncompressedHeaderLen + w.encoder.Compressed())
243 c := headerLen(w.ctype) + w.buf.Len()
244 if u < c {
245 return w.writeUncompressedChunk()
246 }
247 return w.writeCompressedChunk()
248}
249
250// flushChunk terminates the current chunk. The encoder will be reset
251// to support the next chunk.
252func (w *Writer2) flushChunk() error {
253 if w.written() == 0 {
254 return nil
255 }
256 var err error
257 if err = w.encoder.Close(); err != nil {
258 return err
259 }
260 if err = w.writeChunk(); err != nil {
261 return err
262 }
263 w.buf.Reset()
264 w.lbw.N = maxCompressed
265 if err = w.encoder.Reopen(&w.lbw); err != nil {
266 return err
267 }
268 if err = w.cstate.next(w.ctype); err != nil {
269 return err
270 }
271 w.ctype = w.cstate.defaultChunkType()
272 w.start = cloneState(w.encoder.state)
273 return nil
274}
275
276// Flush writes all buffered data out to the underlying stream. This
277// could result in multiple chunks to be created.
278func (w *Writer2) Flush() error {
279 if w.cstate == stop {
280 return errClosed
281 }
282 for w.written() > 0 {
283 if err := w.flushChunk(); err != nil {
284 return err
285 }
286 }
287 return nil
288}
289
290// Close terminates the LZMA2 stream with an EOS chunk.
291func (w *Writer2) Close() error {
292 if w.cstate == stop {
293 return errClosed
294 }
295 if err := w.Flush(); err != nil {
296 return nil
297 }
298 // write zero byte EOS chunk
299 _, err := w.w.Write([]byte{0})
300 if err != nil {
301 return err
302 }
303 w.cstate = stop
304 return nil
305}
diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go
new file mode 100644
index 0000000..69cf5f7
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go
@@ -0,0 +1,117 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package xz
6
7import (
8 "errors"
9 "fmt"
10 "io"
11
12 "github.com/ulikunitz/xz/lzma"
13)
14
15// LZMA filter constants.
16const (
17 lzmaFilterID = 0x21
18 lzmaFilterLen = 3
19)
20
21// lzmaFilter declares the LZMA2 filter information stored in an xz
22// block header.
23type lzmaFilter struct {
24 dictCap int64
25}
26
27// String returns a representation of the LZMA filter.
28func (f lzmaFilter) String() string {
29 return fmt.Sprintf("LZMA dict cap %#x", f.dictCap)
30}
31
32// id returns the ID for the LZMA2 filter.
33func (f lzmaFilter) id() uint64 { return lzmaFilterID }
34
35// MarshalBinary converts the lzmaFilter in its encoded representation.
36func (f lzmaFilter) MarshalBinary() (data []byte, err error) {
37 c := lzma.EncodeDictCap(f.dictCap)
38 return []byte{lzmaFilterID, 1, c}, nil
39}
40
41// UnmarshalBinary unmarshals the given data representation of the LZMA2
42// filter.
43func (f *lzmaFilter) UnmarshalBinary(data []byte) error {
44 if len(data) != lzmaFilterLen {
45 return errors.New("xz: data for LZMA2 filter has wrong length")
46 }
47 if data[0] != lzmaFilterID {
48 return errors.New("xz: wrong LZMA2 filter id")
49 }
50 if data[1] != 1 {
51 return errors.New("xz: wrong LZMA2 filter size")
52 }
53 dc, err := lzma.DecodeDictCap(data[2])
54 if err != nil {
55 return errors.New("xz: wrong LZMA2 dictionary size property")
56 }
57
58 f.dictCap = dc
59 return nil
60}
61
62// reader creates a new reader for the LZMA2 filter.
63func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader,
64 err error) {
65
66 config := new(lzma.Reader2Config)
67 if c != nil {
68 config.DictCap = c.DictCap
69 }
70 dc := int(f.dictCap)
71 if dc < 1 {
72 return nil, errors.New("xz: LZMA2 filter parameter " +
73 "dictionary capacity overflow")
74 }
75 if dc > config.DictCap {
76 config.DictCap = dc
77 }
78
79 fr, err = config.NewReader2(r)
80 if err != nil {
81 return nil, err
82 }
83 return fr, nil
84}
85
86// writeCloser creates a io.WriteCloser for the LZMA2 filter.
87func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig,
88) (fw io.WriteCloser, err error) {
89 config := new(lzma.Writer2Config)
90 if c != nil {
91 *config = lzma.Writer2Config{
92 Properties: c.Properties,
93 DictCap: c.DictCap,
94 BufSize: c.BufSize,
95 Matcher: c.Matcher,
96 }
97 }
98
99 dc := int(f.dictCap)
100 if dc < 1 {
101 return nil, errors.New("xz: LZMA2 filter parameter " +
102 "dictionary capacity overflow")
103 }
104 if dc > config.DictCap {
105 config.DictCap = dc
106 }
107
108 fw, err = config.NewWriter2(w)
109 if err != nil {
110 return nil, err
111 }
112 return fw, nil
113}
114
115// last returns true, because an LZMA2 filter must be the last filter in
116// the filter list.
117func (f lzmaFilter) last() bool { return true }
diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs
new file mode 100644
index 0000000..a8c612c
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/make-docs
@@ -0,0 +1,5 @@
1#!/bin/sh
2
3set -x
4pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md
5pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md
diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go
new file mode 100644
index 0000000..0634c6b
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/reader.go
@@ -0,0 +1,373 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package xz supports the compression and decompression of xz files. It
6// supports version 1.0.4 of the specification without the non-LZMA2
7// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt
8package xz
9
10import (
11 "bytes"
12 "errors"
13 "fmt"
14 "hash"
15 "io"
16
17 "github.com/ulikunitz/xz/internal/xlog"
18 "github.com/ulikunitz/xz/lzma"
19)
20
21// ReaderConfig defines the parameters for the xz reader. The
22// SingleStream parameter requests the reader to assume that the
23// underlying stream contains only a single stream.
24type ReaderConfig struct {
25 DictCap int
26 SingleStream bool
27}
28
29// fill replaces all zero values with their default values.
30func (c *ReaderConfig) fill() {
31 if c.DictCap == 0 {
32 c.DictCap = 8 * 1024 * 1024
33 }
34}
35
36// Verify checks the reader parameters for Validity. Zero values will be
37// replaced by default values.
38func (c *ReaderConfig) Verify() error {
39 if c == nil {
40 return errors.New("xz: reader parameters are nil")
41 }
42 lc := lzma.Reader2Config{DictCap: c.DictCap}
43 if err := lc.Verify(); err != nil {
44 return err
45 }
46 return nil
47}
48
49// Reader supports the reading of one or multiple xz streams.
50type Reader struct {
51 ReaderConfig
52
53 xz io.Reader
54 sr *streamReader
55}
56
57// streamReader decodes a single xz stream
58type streamReader struct {
59 ReaderConfig
60
61 xz io.Reader
62 br *blockReader
63 newHash func() hash.Hash
64 h header
65 index []record
66}
67
68// NewReader creates a new xz reader using the default parameters.
69// The function reads and checks the header of the first XZ stream. The
70// reader will process multiple streams including padding.
71func NewReader(xz io.Reader) (r *Reader, err error) {
72 return ReaderConfig{}.NewReader(xz)
73}
74
75// NewReader creates an xz stream reader. The created reader will be
76// able to process multiple streams and padding unless a SingleStream
77// has been set in the reader configuration c.
78func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) {
79 if err = c.Verify(); err != nil {
80 return nil, err
81 }
82 r = &Reader{
83 ReaderConfig: c,
84 xz: xz,
85 }
86 if r.sr, err = c.newStreamReader(xz); err != nil {
87 if err == io.EOF {
88 err = io.ErrUnexpectedEOF
89 }
90 return nil, err
91 }
92 return r, nil
93}
94
95var errUnexpectedData = errors.New("xz: unexpected data after stream")
96
97// Read reads uncompressed data from the stream.
98func (r *Reader) Read(p []byte) (n int, err error) {
99 for n < len(p) {
100 if r.sr == nil {
101 if r.SingleStream {
102 data := make([]byte, 1)
103 _, err = io.ReadFull(r.xz, data)
104 if err != io.EOF {
105 return n, errUnexpectedData
106 }
107 return n, io.EOF
108 }
109 for {
110 r.sr, err = r.ReaderConfig.newStreamReader(r.xz)
111 if err != errPadding {
112 break
113 }
114 }
115 if err != nil {
116 return n, err
117 }
118 }
119 k, err := r.sr.Read(p[n:])
120 n += k
121 if err != nil {
122 if err == io.EOF {
123 r.sr = nil
124 continue
125 }
126 return n, err
127 }
128 }
129 return n, nil
130}
131
132var errPadding = errors.New("xz: padding (4 zero bytes) encountered")
133
134// newStreamReader creates a new xz stream reader using the given configuration
135// parameters. NewReader reads and checks the header of the xz stream.
136func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) {
137 if err = c.Verify(); err != nil {
138 return nil, err
139 }
140 data := make([]byte, HeaderLen)
141 if _, err := io.ReadFull(xz, data[:4]); err != nil {
142 return nil, err
143 }
144 if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) {
145 return nil, errPadding
146 }
147 if _, err = io.ReadFull(xz, data[4:]); err != nil {
148 if err == io.EOF {
149 err = io.ErrUnexpectedEOF
150 }
151 return nil, err
152 }
153 r = &streamReader{
154 ReaderConfig: c,
155 xz: xz,
156 index: make([]record, 0, 4),
157 }
158 if err = r.h.UnmarshalBinary(data); err != nil {
159 return nil, err
160 }
161 xlog.Debugf("xz header %s", r.h)
162 if r.newHash, err = newHashFunc(r.h.flags); err != nil {
163 return nil, err
164 }
165 return r, nil
166}
167
168// errIndex indicates an error with the xz file index.
169var errIndex = errors.New("xz: error in xz file index")
170
171// readTail reads the index body and the xz footer.
172func (r *streamReader) readTail() error {
173 index, n, err := readIndexBody(r.xz)
174 if err != nil {
175 if err == io.EOF {
176 err = io.ErrUnexpectedEOF
177 }
178 return err
179 }
180 if len(index) != len(r.index) {
181 return fmt.Errorf("xz: index length is %d; want %d",
182 len(index), len(r.index))
183 }
184 for i, rec := range r.index {
185 if rec != index[i] {
186 return fmt.Errorf("xz: record %d is %v; want %v",
187 i, rec, index[i])
188 }
189 }
190
191 p := make([]byte, footerLen)
192 if _, err = io.ReadFull(r.xz, p); err != nil {
193 if err == io.EOF {
194 err = io.ErrUnexpectedEOF
195 }
196 return err
197 }
198 var f footer
199 if err = f.UnmarshalBinary(p); err != nil {
200 return err
201 }
202 xlog.Debugf("xz footer %s", f)
203 if f.flags != r.h.flags {
204 return errors.New("xz: footer flags incorrect")
205 }
206 if f.indexSize != int64(n)+1 {
207 return errors.New("xz: index size in footer wrong")
208 }
209 return nil
210}
211
212// Read reads actual data from the xz stream.
213func (r *streamReader) Read(p []byte) (n int, err error) {
214 for n < len(p) {
215 if r.br == nil {
216 bh, hlen, err := readBlockHeader(r.xz)
217 if err != nil {
218 if err == errIndexIndicator {
219 if err = r.readTail(); err != nil {
220 return n, err
221 }
222 return n, io.EOF
223 }
224 return n, err
225 }
226 xlog.Debugf("block %v", *bh)
227 r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh,
228 hlen, r.newHash())
229 if err != nil {
230 return n, err
231 }
232 }
233 k, err := r.br.Read(p[n:])
234 n += k
235 if err != nil {
236 if err == io.EOF {
237 r.index = append(r.index, r.br.record())
238 r.br = nil
239 } else {
240 return n, err
241 }
242 }
243 }
244 return n, nil
245}
246
247// countingReader is a reader that counts the bytes read.
248type countingReader struct {
249 r io.Reader
250 n int64
251}
252
253// Read reads data from the wrapped reader and adds it to the n field.
254func (lr *countingReader) Read(p []byte) (n int, err error) {
255 n, err = lr.r.Read(p)
256 lr.n += int64(n)
257 return n, err
258}
259
260// blockReader supports the reading of a block.
261type blockReader struct {
262 lxz countingReader
263 header *blockHeader
264 headerLen int
265 n int64
266 hash hash.Hash
267 r io.Reader
268 err error
269}
270
271// newBlockReader creates a new block reader.
272func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader,
273 hlen int, hash hash.Hash) (br *blockReader, err error) {
274
275 br = &blockReader{
276 lxz: countingReader{r: xz},
277 header: h,
278 headerLen: hlen,
279 hash: hash,
280 }
281
282 fr, err := c.newFilterReader(&br.lxz, h.filters)
283 if err != nil {
284 return nil, err
285 }
286 br.r = io.TeeReader(fr, br.hash)
287
288 return br, nil
289}
290
291// uncompressedSize returns the uncompressed size of the block.
292func (br *blockReader) uncompressedSize() int64 {
293 return br.n
294}
295
296// compressedSize returns the compressed size of the block.
297func (br *blockReader) compressedSize() int64 {
298 return br.lxz.n
299}
300
301// unpaddedSize computes the unpadded size for the block.
302func (br *blockReader) unpaddedSize() int64 {
303 n := int64(br.headerLen)
304 n += br.compressedSize()
305 n += int64(br.hash.Size())
306 return n
307}
308
309// record returns the index record for the current block.
310func (br *blockReader) record() record {
311 return record{br.unpaddedSize(), br.uncompressedSize()}
312}
313
314// errBlockSize indicates that the size of the block in the block header
315// is wrong.
316var errBlockSize = errors.New("xz: wrong uncompressed size for block")
317
318// Read reads data from the block.
319func (br *blockReader) Read(p []byte) (n int, err error) {
320 n, err = br.r.Read(p)
321 br.n += int64(n)
322
323 u := br.header.uncompressedSize
324 if u >= 0 && br.uncompressedSize() > u {
325 return n, errors.New("xz: wrong uncompressed size for block")
326 }
327 c := br.header.compressedSize
328 if c >= 0 && br.compressedSize() > c {
329 return n, errors.New("xz: wrong compressed size for block")
330 }
331 if err != io.EOF {
332 return n, err
333 }
334 if br.uncompressedSize() < u || br.compressedSize() < c {
335 return n, io.ErrUnexpectedEOF
336 }
337
338 s := br.hash.Size()
339 k := padLen(br.lxz.n)
340 q := make([]byte, k+s, k+2*s)
341 if _, err = io.ReadFull(br.lxz.r, q); err != nil {
342 if err == io.EOF {
343 err = io.ErrUnexpectedEOF
344 }
345 return n, err
346 }
347 if !allZeros(q[:k]) {
348 return n, errors.New("xz: non-zero block padding")
349 }
350 checkSum := q[k:]
351 computedSum := br.hash.Sum(checkSum[s:])
352 if !bytes.Equal(checkSum, computedSum) {
353 return n, errors.New("xz: checksum error for block")
354 }
355 return n, io.EOF
356}
357
358func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader,
359 err error) {
360
361 if err = verifyFilters(f); err != nil {
362 return nil, err
363 }
364
365 fr = r
366 for i := len(f) - 1; i >= 0; i-- {
367 fr, err = f[i].reader(fr, c)
368 if err != nil {
369 return nil, err
370 }
371 }
372 return fr, nil
373}
diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go
new file mode 100644
index 0000000..c126f70
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/writer.go
@@ -0,0 +1,386 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package xz
6
7import (
8 "errors"
9 "hash"
10 "io"
11
12 "github.com/ulikunitz/xz/lzma"
13)
14
15// WriterConfig describe the parameters for an xz writer.
16type WriterConfig struct {
17 Properties *lzma.Properties
18 DictCap int
19 BufSize int
20 BlockSize int64
21 // checksum method: CRC32, CRC64 or SHA256
22 CheckSum byte
23 // match algorithm
24 Matcher lzma.MatchAlgorithm
25}
26
27// fill replaces zero values with default values.
28func (c *WriterConfig) fill() {
29 if c.Properties == nil {
30 c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2}
31 }
32 if c.DictCap == 0 {
33 c.DictCap = 8 * 1024 * 1024
34 }
35 if c.BufSize == 0 {
36 c.BufSize = 4096
37 }
38 if c.BlockSize == 0 {
39 c.BlockSize = maxInt64
40 }
41 if c.CheckSum == 0 {
42 c.CheckSum = CRC64
43 }
44}
45
46// Verify checks the configuration for errors. Zero values will be
47// replaced by default values.
48func (c *WriterConfig) Verify() error {
49 if c == nil {
50 return errors.New("xz: writer configuration is nil")
51 }
52 c.fill()
53 lc := lzma.Writer2Config{
54 Properties: c.Properties,
55 DictCap: c.DictCap,
56 BufSize: c.BufSize,
57 Matcher: c.Matcher,
58 }
59 if err := lc.Verify(); err != nil {
60 return err
61 }
62 if c.BlockSize <= 0 {
63 return errors.New("xz: block size out of range")
64 }
65 if err := verifyFlags(c.CheckSum); err != nil {
66 return err
67 }
68 return nil
69}
70
71// filters creates the filter list for the given parameters.
72func (c *WriterConfig) filters() []filter {
73 return []filter{&lzmaFilter{int64(c.DictCap)}}
74}
75
76// maxInt64 defines the maximum 64-bit signed integer.
77const maxInt64 = 1<<63 - 1
78
79// verifyFilters checks the filter list for the length and the right
80// sequence of filters.
81func verifyFilters(f []filter) error {
82 if len(f) == 0 {
83 return errors.New("xz: no filters")
84 }
85 if len(f) > 4 {
86 return errors.New("xz: more than four filters")
87 }
88 for _, g := range f[:len(f)-1] {
89 if g.last() {
90 return errors.New("xz: last filter is not last")
91 }
92 }
93 if !f[len(f)-1].last() {
94 return errors.New("xz: wrong last filter")
95 }
96 return nil
97}
98
99// newFilterWriteCloser converts a filter list into a WriteCloser that
100// can be used by a blockWriter.
101func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) {
102 if err = verifyFilters(f); err != nil {
103 return nil, err
104 }
105 fw = nopWriteCloser(w)
106 for i := len(f) - 1; i >= 0; i-- {
107 fw, err = f[i].writeCloser(fw, c)
108 if err != nil {
109 return nil, err
110 }
111 }
112 return fw, nil
113}
114
115// nopWCloser implements a WriteCloser with a Close method not doing
116// anything.
117type nopWCloser struct {
118 io.Writer
119}
120
121// Close returns nil and doesn't do anything else.
122func (c nopWCloser) Close() error {
123 return nil
124}
125
126// nopWriteCloser converts the Writer into a WriteCloser with a Close
127// function that does nothing beside returning nil.
128func nopWriteCloser(w io.Writer) io.WriteCloser {
129 return nopWCloser{w}
130}
131
132// Writer compresses data written to it. It is an io.WriteCloser.
133type Writer struct {
134 WriterConfig
135
136 xz io.Writer
137 bw *blockWriter
138 newHash func() hash.Hash
139 h header
140 index []record
141 closed bool
142}
143
144// newBlockWriter creates a new block writer writes the header out.
145func (w *Writer) newBlockWriter() error {
146 var err error
147 w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash())
148 if err != nil {
149 return err
150 }
151 if err = w.bw.writeHeader(w.xz); err != nil {
152 return err
153 }
154 return nil
155}
156
157// closeBlockWriter closes a block writer and records the sizes in the
158// index.
159func (w *Writer) closeBlockWriter() error {
160 var err error
161 if err = w.bw.Close(); err != nil {
162 return err
163 }
164 w.index = append(w.index, w.bw.record())
165 return nil
166}
167
168// NewWriter creates a new xz writer using default parameters.
169func NewWriter(xz io.Writer) (w *Writer, err error) {
170 return WriterConfig{}.NewWriter(xz)
171}
172
173// NewWriter creates a new Writer using the given configuration parameters.
174func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) {
175 if err = c.Verify(); err != nil {
176 return nil, err
177 }
178 w = &Writer{
179 WriterConfig: c,
180 xz: xz,
181 h: header{c.CheckSum},
182 index: make([]record, 0, 4),
183 }
184 if w.newHash, err = newHashFunc(c.CheckSum); err != nil {
185 return nil, err
186 }
187 data, err := w.h.MarshalBinary()
188 if _, err = xz.Write(data); err != nil {
189 return nil, err
190 }
191 if err = w.newBlockWriter(); err != nil {
192 return nil, err
193 }
194 return w, nil
195
196}
197
198// Write compresses the uncompressed data provided.
199func (w *Writer) Write(p []byte) (n int, err error) {
200 if w.closed {
201 return 0, errClosed
202 }
203 for {
204 k, err := w.bw.Write(p[n:])
205 n += k
206 if err != errNoSpace {
207 return n, err
208 }
209 if err = w.closeBlockWriter(); err != nil {
210 return n, err
211 }
212 if err = w.newBlockWriter(); err != nil {
213 return n, err
214 }
215 }
216}
217
218// Close closes the writer and adds the footer to the Writer. Close
219// doesn't close the underlying writer.
220func (w *Writer) Close() error {
221 if w.closed {
222 return errClosed
223 }
224 w.closed = true
225 var err error
226 if err = w.closeBlockWriter(); err != nil {
227 return err
228 }
229
230 f := footer{flags: w.h.flags}
231 if f.indexSize, err = writeIndex(w.xz, w.index); err != nil {
232 return err
233 }
234 data, err := f.MarshalBinary()
235 if err != nil {
236 return err
237 }
238 if _, err = w.xz.Write(data); err != nil {
239 return err
240 }
241 return nil
242}
243
244// countingWriter is a writer that counts all data written to it.
245type countingWriter struct {
246 w io.Writer
247 n int64
248}
249
250// Write writes data to the countingWriter.
251func (cw *countingWriter) Write(p []byte) (n int, err error) {
252 n, err = cw.w.Write(p)
253 cw.n += int64(n)
254 if err == nil && cw.n < 0 {
255 return n, errors.New("xz: counter overflow")
256 }
257 return
258}
259
260// blockWriter is writes a single block.
261type blockWriter struct {
262 cxz countingWriter
263 // mw combines io.WriteCloser w and the hash.
264 mw io.Writer
265 w io.WriteCloser
266 n int64
267 blockSize int64
268 closed bool
269 headerLen int
270
271 filters []filter
272 hash hash.Hash
273}
274
275// newBlockWriter creates a new block writer.
276func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) {
277 bw = &blockWriter{
278 cxz: countingWriter{w: xz},
279 blockSize: c.BlockSize,
280 filters: c.filters(),
281 hash: hash,
282 }
283 bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters)
284 if err != nil {
285 return nil, err
286 }
287 bw.mw = io.MultiWriter(bw.w, bw.hash)
288 return bw, nil
289}
290
291// writeHeader writes the header. If the function is called after Close
292// the commpressedSize and uncompressedSize fields will be filled.
293func (bw *blockWriter) writeHeader(w io.Writer) error {
294 h := blockHeader{
295 compressedSize: -1,
296 uncompressedSize: -1,
297 filters: bw.filters,
298 }
299 if bw.closed {
300 h.compressedSize = bw.compressedSize()
301 h.uncompressedSize = bw.uncompressedSize()
302 }
303 data, err := h.MarshalBinary()
304 if err != nil {
305 return err
306 }
307 if _, err = w.Write(data); err != nil {
308 return err
309 }
310 bw.headerLen = len(data)
311 return nil
312}
313
314// compressed size returns the amount of data written to the underlying
315// stream.
316func (bw *blockWriter) compressedSize() int64 {
317 return bw.cxz.n
318}
319
320// uncompressedSize returns the number of data written to the
321// blockWriter
322func (bw *blockWriter) uncompressedSize() int64 {
323 return bw.n
324}
325
326// unpaddedSize returns the sum of the header length, the uncompressed
327// size of the block and the hash size.
328func (bw *blockWriter) unpaddedSize() int64 {
329 if bw.headerLen <= 0 {
330 panic("xz: block header not written")
331 }
332 n := int64(bw.headerLen)
333 n += bw.compressedSize()
334 n += int64(bw.hash.Size())
335 return n
336}
337
338// record returns the record for the current stream. Call Close before
339// calling this method.
340func (bw *blockWriter) record() record {
341 return record{bw.unpaddedSize(), bw.uncompressedSize()}
342}
343
344var errClosed = errors.New("xz: writer already closed")
345
346var errNoSpace = errors.New("xz: no space")
347
348// Write writes uncompressed data to the block writer.
349func (bw *blockWriter) Write(p []byte) (n int, err error) {
350 if bw.closed {
351 return 0, errClosed
352 }
353
354 t := bw.blockSize - bw.n
355 if int64(len(p)) > t {
356 err = errNoSpace
357 p = p[:t]
358 }
359
360 var werr error
361 n, werr = bw.mw.Write(p)
362 bw.n += int64(n)
363 if werr != nil {
364 return n, werr
365 }
366 return n, err
367}
368
369// Close closes the writer.
370func (bw *blockWriter) Close() error {
371 if bw.closed {
372 return errClosed
373 }
374 bw.closed = true
375 if err := bw.w.Close(); err != nil {
376 return err
377 }
378 s := bw.hash.Size()
379 k := padLen(bw.cxz.n)
380 p := make([]byte, k+s)
381 bw.hash.Sum(p[k:k])
382 if _, err := bw.cxz.w.Write(p); err != nil {
383 return err
384 }
385 return nil
386}
diff --git a/vendor/github.com/zclconf/go-cty/LICENSE b/vendor/github.com/zclconf/go-cty/LICENSE
new file mode 100644
index 0000000..d6503b5
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/LICENSE
@@ -0,0 +1,21 @@
1MIT License
2
3Copyright (c) 2017-2018 Martin Atkins
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in all
13copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21SOFTWARE.
diff --git a/vendor/github.com/zclconf/go-cty/cty/capsule.go b/vendor/github.com/zclconf/go-cty/cty/capsule.go
new file mode 100644
index 0000000..4fce92a
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/capsule.go
@@ -0,0 +1,89 @@
1package cty
2
3import (
4 "fmt"
5 "reflect"
6)
7
8type capsuleType struct {
9 typeImplSigil
10 Name string
11 GoType reflect.Type
12}
13
14func (t *capsuleType) Equals(other Type) bool {
15 if otherP, ok := other.typeImpl.(*capsuleType); ok {
16 // capsule types compare by pointer identity
17 return otherP == t
18 }
19 return false
20}
21
22func (t *capsuleType) FriendlyName() string {
23 return t.Name
24}
25
26func (t *capsuleType) GoString() string {
27 // To get a useful representation of our native type requires some
28 // shenanigans.
29 victimVal := reflect.Zero(t.GoType)
30 return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface())
31}
32
33// Capsule creates a new Capsule type.
34//
35// A Capsule type is a special type that can be used to transport arbitrary
36// Go native values of a given type through the cty type system. A language
37// that uses cty as its type system might, for example, provide functions
38// that return capsule-typed values and then other functions that operate
39// on those values.
40//
41// From cty's perspective, Capsule types have a few interesting characteristics,
42// described in the following paragraphs.
43//
44// Each capsule type has an associated Go native type that it is able to
45// transport. Capsule types compare by identity, so each call to the
46// Capsule function creates an entirely-distinct cty Type, even if two calls
47// use the same native type.
48//
49// Each capsule-typed value contains a pointer to a value of the given native
50// type. A capsule-typed value supports no operations except equality, and
51// equality is implemented by pointer identity of the encapsulated pointer.
52//
53// The given name is used as the new type's "friendly name". This can be any
54// string in principle, but will usually be a short, all-lowercase name aimed
55// at users of the embedding language (i.e. not mention Go-specific details)
56// and will ideally not create ambiguity with any predefined cty type.
57//
58// Capsule types are never introduced by any standard cty operation, so a
59// calling application opts in to including them within its own type system
60// by creating them and introducing them via its own functions. At that point,
61// the application is responsible for dealing with any capsule-typed values
62// that might be returned.
63func Capsule(name string, nativeType reflect.Type) Type {
64 return Type{
65 &capsuleType{
66 Name: name,
67 GoType: nativeType,
68 },
69 }
70}
71
72// IsCapsuleType returns true if this type is a capsule type, as created
73// by cty.Capsule .
74func (t Type) IsCapsuleType() bool {
75 _, ok := t.typeImpl.(*capsuleType)
76 return ok
77}
78
79// EncapsulatedType returns the encapsulated native type of a capsule type,
80// or panics if the receiver is not a Capsule type.
81//
82// Is IsCapsuleType to determine if this method is safe to call.
83func (t Type) EncapsulatedType() reflect.Type {
84 impl, ok := t.typeImpl.(*capsuleType)
85 if !ok {
86 panic("not a capsule type")
87 }
88 return impl.GoType
89}
diff --git a/vendor/github.com/zclconf/go-cty/cty/collection.go b/vendor/github.com/zclconf/go-cty/cty/collection.go
new file mode 100644
index 0000000..ab3919b
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/collection.go
@@ -0,0 +1,34 @@
1package cty
2
3import (
4 "errors"
5)
6
7type collectionTypeImpl interface {
8 ElementType() Type
9}
10
11// IsCollectionType returns true if the given type supports the operations
12// that are defined for all collection types.
13func (t Type) IsCollectionType() bool {
14 _, ok := t.typeImpl.(collectionTypeImpl)
15 return ok
16}
17
18// ElementType returns the element type of the receiver if it is a collection
19// type, or panics if it is not. Use IsCollectionType first to test whether
20// this method will succeed.
21func (t Type) ElementType() Type {
22 if ct, ok := t.typeImpl.(collectionTypeImpl); ok {
23 return ct.ElementType()
24 }
25 panic(errors.New("not a collection type"))
26}
27
28// ElementCallback is a callback type used for iterating over elements of
29// collections and attributes of objects.
30//
31// The types of key and value depend on what type is being iterated over.
32// Return true to stop iterating after the current element, or false to
33// continue iterating.
34type ElementCallback func(key Value, val Value) (stop bool)
diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go
new file mode 100644
index 0000000..d84f6ac
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go
@@ -0,0 +1,165 @@
1package convert
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// compareTypes implements a preference order for unification.
8//
9// The result of this method is not useful for anything other than unification
10// preferences, since it assumes that the caller will verify that any suggested
11// conversion is actually possible and it is thus able to to make certain
12// optimistic assumptions.
13func compareTypes(a cty.Type, b cty.Type) int {
14
15 // DynamicPseudoType always has lowest preference, because anything can
16 // convert to it (it acts as a placeholder for "any type") and we want
17 // to optimistically assume that any dynamics will converge on matching
18 // their neighbors.
19 if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType {
20 if a != cty.DynamicPseudoType {
21 return -1
22 }
23 if b != cty.DynamicPseudoType {
24 return 1
25 }
26 return 0
27 }
28
29 if a.IsPrimitiveType() && b.IsPrimitiveType() {
30 // String is a supertype of all primitive types, because we can
31 // represent all primitive values as specially-formatted strings.
32 if a == cty.String || b == cty.String {
33 if a != cty.String {
34 return 1
35 }
36 if b != cty.String {
37 return -1
38 }
39 return 0
40 }
41 }
42
43 if a.IsListType() && b.IsListType() {
44 return compareTypes(a.ElementType(), b.ElementType())
45 }
46 if a.IsSetType() && b.IsSetType() {
47 return compareTypes(a.ElementType(), b.ElementType())
48 }
49 if a.IsMapType() && b.IsMapType() {
50 return compareTypes(a.ElementType(), b.ElementType())
51 }
52
53 // From this point on we may have swapped the two items in order to
54 // simplify our cases. Therefore any non-zero return after this point
55 // must be multiplied by "swap" to potentially invert the return value
56 // if needed.
57 swap := 1
58 switch {
59 case a.IsTupleType() && b.IsListType():
60 fallthrough
61 case a.IsObjectType() && b.IsMapType():
62 fallthrough
63 case a.IsSetType() && b.IsTupleType():
64 fallthrough
65 case a.IsSetType() && b.IsListType():
66 a, b = b, a
67 swap = -1
68 }
69
70 if b.IsSetType() && (a.IsTupleType() || a.IsListType()) {
71 // We'll just optimistically assume that the element types are
72 // unifyable/convertible, and let a second recursive pass
73 // figure out how to make that so.
74 return -1 * swap
75 }
76
77 if a.IsListType() && b.IsTupleType() {
78 // We'll just optimistically assume that the tuple's element types
79 // can be unified into something compatible with the list's element
80 // type.
81 return -1 * swap
82 }
83
84 if a.IsMapType() && b.IsObjectType() {
85 // We'll just optimistically assume that the object's attribute types
86 // can be unified into something compatible with the map's element
87 // type.
88 return -1 * swap
89 }
90
91 // For object and tuple types, comparing two types doesn't really tell
92 // the whole story because it may be possible to construct a new type C
93 // that is the supertype of both A and B by unifying each attribute/element
94 // separately. That possibility is handled by Unify as a follow-up if
95 // type sorting is insufficient to produce a valid result.
96 //
97 // Here we will take care of the simple possibilities where no new type
98 // is needed.
99 if a.IsObjectType() && b.IsObjectType() {
100 atysA := a.AttributeTypes()
101 atysB := b.AttributeTypes()
102
103 if len(atysA) != len(atysB) {
104 return 0
105 }
106
107 hasASuper := false
108 hasBSuper := false
109 for k := range atysA {
110 if _, has := atysB[k]; !has {
111 return 0
112 }
113
114 cmp := compareTypes(atysA[k], atysB[k])
115 if cmp < 0 {
116 hasASuper = true
117 } else if cmp > 0 {
118 hasBSuper = true
119 }
120 }
121
122 switch {
123 case hasASuper && hasBSuper:
124 return 0
125 case hasASuper:
126 return -1 * swap
127 case hasBSuper:
128 return 1 * swap
129 default:
130 return 0
131 }
132 }
133 if a.IsTupleType() && b.IsTupleType() {
134 etysA := a.TupleElementTypes()
135 etysB := b.TupleElementTypes()
136
137 if len(etysA) != len(etysB) {
138 return 0
139 }
140
141 hasASuper := false
142 hasBSuper := false
143 for i := range etysA {
144 cmp := compareTypes(etysA[i], etysB[i])
145 if cmp < 0 {
146 hasASuper = true
147 } else if cmp > 0 {
148 hasBSuper = true
149 }
150 }
151
152 switch {
153 case hasASuper && hasBSuper:
154 return 0
155 case hasASuper:
156 return -1 * swap
157 case hasBSuper:
158 return 1 * swap
159 default:
160 return 0
161 }
162 }
163
164 return 0
165}
diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go
new file mode 100644
index 0000000..7bfcc08
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go
@@ -0,0 +1,120 @@
1package convert
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// conversion is an internal variant of Conversion that carries around
8// a cty.Path to be used in error responses.
9type conversion func(cty.Value, cty.Path) (cty.Value, error)
10
11func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion {
12 conv := getConversionKnown(in, out, unsafe)
13 if conv == nil {
14 return nil
15 }
16
17 // Wrap the conversion in some standard checks that we don't want to
18 // have to repeat in every conversion function.
19 return func(in cty.Value, path cty.Path) (cty.Value, error) {
20 if !in.IsKnown() {
21 return cty.UnknownVal(out), nil
22 }
23 if in.IsNull() {
24 // We'll pass through nulls, albeit type converted, and let
25 // the caller deal with whatever handling they want to do in
26 // case null values are considered valid in some applications.
27 return cty.NullVal(out), nil
28 }
29
30 return conv(in, path)
31 }
32}
33
34func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion {
35 switch {
36
37 case out == cty.DynamicPseudoType:
38 // Conversion *to* DynamicPseudoType means that the caller wishes
39 // to allow any type in this position, so we'll produce a do-nothing
40 // conversion that just passes through the value as-is.
41 return dynamicPassthrough
42
43 case unsafe && in == cty.DynamicPseudoType:
44 // Conversion *from* DynamicPseudoType means that we have a value
45 // whose type isn't yet known during type checking. For these we will
46 // assume that conversion will succeed and deal with any errors that
47 // result (which is why we can only do this when "unsafe" is set).
48 return dynamicFixup(out)
49
50 case in.IsPrimitiveType() && out.IsPrimitiveType():
51 conv := primitiveConversionsSafe[in][out]
52 if conv != nil {
53 return conv
54 }
55 if unsafe {
56 return primitiveConversionsUnsafe[in][out]
57 }
58 return nil
59
60 case out.IsListType() && (in.IsListType() || in.IsSetType()):
61 inEty := in.ElementType()
62 outEty := out.ElementType()
63 if inEty.Equals(outEty) {
64 // This indicates that we're converting from list to set with
65 // the same element type, so we don't need an element converter.
66 return conversionCollectionToList(outEty, nil)
67 }
68
69 convEty := getConversion(inEty, outEty, unsafe)
70 if convEty == nil {
71 return nil
72 }
73 return conversionCollectionToList(outEty, convEty)
74
75 case out.IsSetType() && (in.IsListType() || in.IsSetType()):
76 if in.IsListType() && !unsafe {
77 // Conversion from list to map is unsafe because it will lose
78 // information: the ordering will not be preserved, and any
79 // duplicate elements will be conflated.
80 return nil
81 }
82 inEty := in.ElementType()
83 outEty := out.ElementType()
84 convEty := getConversion(inEty, outEty, unsafe)
85 if inEty.Equals(outEty) {
86 // This indicates that we're converting from set to list with
87 // the same element type, so we don't need an element converter.
88 return conversionCollectionToSet(outEty, nil)
89 }
90
91 if convEty == nil {
92 return nil
93 }
94 return conversionCollectionToSet(outEty, convEty)
95
96 case out.IsListType() && in.IsTupleType():
97 outEty := out.ElementType()
98 return conversionTupleToList(in, outEty, unsafe)
99
100 case out.IsMapType() && in.IsObjectType():
101 outEty := out.ElementType()
102 return conversionObjectToMap(in, outEty, unsafe)
103
104 default:
105 return nil
106
107 }
108}
109
110// retConversion wraps a conversion (internal type) so it can be returned
111// as a Conversion (public type).
112func retConversion(conv conversion) Conversion {
113 if conv == nil {
114 return nil
115 }
116
117 return func(in cty.Value) (cty.Value, error) {
118 return conv(in, cty.Path(nil))
119 }
120}
diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go
new file mode 100644
index 0000000..eace85d
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go
@@ -0,0 +1,226 @@
1package convert
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// conversionCollectionToList returns a conversion that will apply the given
8// conversion to all of the elements of a collection (something that supports
9// ForEachElement and LengthInt) and then returns the result as a list.
10//
11// "conv" can be nil if the elements are expected to already be of the
12// correct type and just need to be re-wrapped into a list. (For example,
13// if we're converting from a set into a list of the same element type.)
14func conversionCollectionToList(ety cty.Type, conv conversion) conversion {
15 return func(val cty.Value, path cty.Path) (cty.Value, error) {
16 elems := make([]cty.Value, 0, val.LengthInt())
17 i := int64(0)
18 path = append(path, nil)
19 it := val.ElementIterator()
20 for it.Next() {
21 _, val := it.Element()
22 var err error
23
24 path[len(path)-1] = cty.IndexStep{
25 Key: cty.NumberIntVal(i),
26 }
27
28 if conv != nil {
29 val, err = conv(val, path)
30 if err != nil {
31 return cty.NilVal, err
32 }
33 }
34 elems = append(elems, val)
35
36 i++
37 }
38
39 if len(elems) == 0 {
40 return cty.ListValEmpty(ety), nil
41 }
42
43 return cty.ListVal(elems), nil
44 }
45}
46
47// conversionCollectionToSet returns a conversion that will apply the given
48// conversion to all of the elements of a collection (something that supports
49// ForEachElement and LengthInt) and then returns the result as a set.
50//
51// "conv" can be nil if the elements are expected to already be of the
52// correct type and just need to be re-wrapped into a set. (For example,
53// if we're converting from a list into a set of the same element type.)
54func conversionCollectionToSet(ety cty.Type, conv conversion) conversion {
55 return func(val cty.Value, path cty.Path) (cty.Value, error) {
56 elems := make([]cty.Value, 0, val.LengthInt())
57 i := int64(0)
58 path = append(path, nil)
59 it := val.ElementIterator()
60 for it.Next() {
61 _, val := it.Element()
62 var err error
63
64 path[len(path)-1] = cty.IndexStep{
65 Key: cty.NumberIntVal(i),
66 }
67
68 if conv != nil {
69 val, err = conv(val, path)
70 if err != nil {
71 return cty.NilVal, err
72 }
73 }
74 elems = append(elems, val)
75
76 i++
77 }
78
79 if len(elems) == 0 {
80 return cty.SetValEmpty(ety), nil
81 }
82
83 return cty.SetVal(elems), nil
84 }
85}
86
87// conversionTupleToList returns a conversion that will take a value of the
88// given tuple type and return a list of the given element type.
89//
90// Will panic if the given tupleType isn't actually a tuple type.
91func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion {
92 tupleEtys := tupleType.TupleElementTypes()
93
94 if len(tupleEtys) == 0 {
95 // Empty tuple short-circuit
96 return func(val cty.Value, path cty.Path) (cty.Value, error) {
97 return cty.ListValEmpty(listEty), nil
98 }
99 }
100
101 if listEty == cty.DynamicPseudoType {
102 // This is a special case where the caller wants us to find
103 // a suitable single type that all elements can convert to, if
104 // possible.
105 listEty, _ = unify(tupleEtys, unsafe)
106 if listEty == cty.NilType {
107 return nil
108 }
109 }
110
111 elemConvs := make([]conversion, len(tupleEtys))
112 for i, tupleEty := range tupleEtys {
113 if tupleEty.Equals(listEty) {
114 // no conversion required
115 continue
116 }
117
118 elemConvs[i] = getConversion(tupleEty, listEty, unsafe)
119 if elemConvs[i] == nil {
120 // If any of our element conversions are impossible, then the our
121 // whole conversion is impossible.
122 return nil
123 }
124 }
125
126 // If we fall out here then a conversion is possible, using the
127 // element conversions in elemConvs
128 return func(val cty.Value, path cty.Path) (cty.Value, error) {
129 elems := make([]cty.Value, 0, len(elemConvs))
130 path = append(path, nil)
131 i := int64(0)
132 it := val.ElementIterator()
133 for it.Next() {
134 _, val := it.Element()
135 var err error
136
137 path[len(path)-1] = cty.IndexStep{
138 Key: cty.NumberIntVal(i),
139 }
140
141 conv := elemConvs[i]
142 if conv != nil {
143 val, err = conv(val, path)
144 if err != nil {
145 return cty.NilVal, err
146 }
147 }
148 elems = append(elems, val)
149
150 i++
151 }
152
153 return cty.ListVal(elems), nil
154 }
155}
156
157// conversionObjectToMap returns a conversion that will take a value of the
158// given object type and return a map of the given element type.
159//
160// Will panic if the given objectType isn't actually an object type.
161func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion {
162 objectAtys := objectType.AttributeTypes()
163
164 if len(objectAtys) == 0 {
165 // Empty object short-circuit
166 return func(val cty.Value, path cty.Path) (cty.Value, error) {
167 return cty.MapValEmpty(mapEty), nil
168 }
169 }
170
171 if mapEty == cty.DynamicPseudoType {
172 // This is a special case where the caller wants us to find
173 // a suitable single type that all elements can convert to, if
174 // possible.
175 objectAtysList := make([]cty.Type, 0, len(objectAtys))
176 for _, aty := range objectAtys {
177 objectAtysList = append(objectAtysList, aty)
178 }
179 mapEty, _ = unify(objectAtysList, unsafe)
180 if mapEty == cty.NilType {
181 return nil
182 }
183 }
184
185 elemConvs := make(map[string]conversion, len(objectAtys))
186 for name, objectAty := range objectAtys {
187 if objectAty.Equals(mapEty) {
188 // no conversion required
189 continue
190 }
191
192 elemConvs[name] = getConversion(objectAty, mapEty, unsafe)
193 if elemConvs[name] == nil {
194 // If any of our element conversions are impossible, then the our
195 // whole conversion is impossible.
196 return nil
197 }
198 }
199
200 // If we fall out here then a conversion is possible, using the
201 // element conversions in elemConvs
202 return func(val cty.Value, path cty.Path) (cty.Value, error) {
203 elems := make(map[string]cty.Value, len(elemConvs))
204 path = append(path, nil)
205 it := val.ElementIterator()
206 for it.Next() {
207 name, val := it.Element()
208 var err error
209
210 path[len(path)-1] = cty.IndexStep{
211 Key: name,
212 }
213
214 conv := elemConvs[name.AsString()]
215 if conv != nil {
216 val, err = conv(val, path)
217 if err != nil {
218 return cty.NilVal, err
219 }
220 }
221 elems[name.AsString()] = val
222 }
223
224 return cty.MapVal(elems), nil
225 }
226}
diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go
new file mode 100644
index 0000000..4d19cf6
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go
@@ -0,0 +1,33 @@
1package convert
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// dynamicFixup deals with just-in-time conversions of values that were
8// input-typed as cty.DynamicPseudoType during analysis, ensuring that
9// we end up with the desired output type once the value is known, or
10// failing with an error if that is not possible.
11//
12// This is in the spirit of the cty philosophy of optimistically assuming that
13// DynamicPseudoType values will become the intended value eventually, and
14// dealing with any inconsistencies during final evaluation.
15func dynamicFixup(wantType cty.Type) conversion {
16 return func(in cty.Value, path cty.Path) (cty.Value, error) {
17 ret, err := Convert(in, wantType)
18 if err != nil {
19 // Re-wrap this error so that the returned path is relative
20 // to the caller's original value, rather than relative to our
21 // conversion value here.
22 return cty.NilVal, path.NewError(err)
23 }
24 return ret, nil
25 }
26}
27
28// dynamicPassthrough is an identity conversion that is used when the
29// target type is DynamicPseudoType, indicating that the caller doesn't care
30// which type is returned.
31func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) {
32 return in, nil
33}
diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go
new file mode 100644
index 0000000..e563ee3
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go
@@ -0,0 +1,50 @@
1package convert
2
3import (
4 "math/big"
5
6 "github.com/zclconf/go-cty/cty"
7)
8
9var stringTrue = cty.StringVal("true")
10var stringFalse = cty.StringVal("false")
11
12var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{
13 cty.Number: {
14 cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) {
15 f := val.AsBigFloat()
16 return cty.StringVal(f.Text('f', -1)), nil
17 },
18 },
19 cty.Bool: {
20 cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) {
21 if val.True() {
22 return stringTrue, nil
23 } else {
24 return stringFalse, nil
25 }
26 },
27 },
28}
29
30var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{
31 cty.String: {
32 cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) {
33 f, _, err := big.ParseFloat(val.AsString(), 10, 512, big.ToNearestEven)
34 if err != nil {
35 return cty.NilVal, path.NewErrorf("a number is required")
36 }
37 return cty.NumberVal(f), nil
38 },
39 cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) {
40 switch val.AsString() {
41 case "true", "1":
42 return cty.True, nil
43 case "false", "0":
44 return cty.False, nil
45 default:
46 return cty.NilVal, path.NewErrorf("a bool is required")
47 }
48 },
49 },
50}
diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/doc.go b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go
new file mode 100644
index 0000000..2037299
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go
@@ -0,0 +1,15 @@
1// Package convert contains some routines for converting between cty types.
2// The intent of providing this package is to encourage applications using
3// cty to have consistent type conversion behavior for maximal interoperability
4// when Values pass from one application to another.
5//
6// The conversions are categorized into two categories. "Safe" conversions are
7// ones that are guaranteed to succeed if given a non-null value of the
8// appropriate source type. "Unsafe" conversions, on the other hand, are valid
9// for only a subset of input values, and thus may fail with an error when
10// called for values outside of that valid subset.
11//
12// The functions whose names end in Unsafe support all of the conversions that
13// are supported by the corresponding functions whose names do not have that
14// suffix, and then additional unsafe conversions as well.
15package convert
diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/public.go b/vendor/github.com/zclconf/go-cty/cty/convert/public.go
new file mode 100644
index 0000000..55f44ae
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/convert/public.go
@@ -0,0 +1,83 @@
1package convert
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7)
8
9// This file contains the public interface of this package, which is intended
10// to be a small, convenient interface designed for easy integration into
11// a hypothetical language type checker and interpreter.
12
13// Conversion is a named function type representing a conversion from a
14// value of one type to a value of another type.
15//
16// The source type for a conversion is always the source type given to
17// the function that returned the Conversion, but there is no way to recover
18// that from a Conversion value itself. If a Conversion is given a value
19// that is not of its expected type (with the exception of DynamicPseudoType,
20// which is always supported) then the function may panic or produce undefined
21// results.
22type Conversion func(in cty.Value) (out cty.Value, err error)
23
24// GetConversion returns a Conversion between the given in and out Types if
25// a safe one is available, or returns nil otherwise.
26func GetConversion(in cty.Type, out cty.Type) Conversion {
27 return retConversion(getConversion(in, out, false))
28}
29
30// GetConversionUnsafe returns a Conversion between the given in and out Types
31// if either a safe or unsafe one is available, or returns nil otherwise.
32func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion {
33 return retConversion(getConversion(in, out, true))
34}
35
36// Convert returns the result of converting the given value to the given type
37// if an safe or unsafe conversion is available, or returns an error if such a
38// conversion is impossible.
39//
40// This is a convenience wrapper around calling GetConversionUnsafe and then
41// immediately passing the given value to the resulting function.
42func Convert(in cty.Value, want cty.Type) (cty.Value, error) {
43 if in.Type().Equals(want) {
44 return in, nil
45 }
46
47 conv := GetConversionUnsafe(in.Type(), want)
48 if conv == nil {
49 return cty.NilVal, fmt.Errorf("incorrect type; %s required", want.FriendlyName())
50 }
51 return conv(in)
52}
53
54// Unify attempts to find the most general type that can be converted from
55// all of the given types. If this is possible, that type is returned along
56// with a slice of necessary conversions for some of the given types.
57//
58// If no common supertype can be found, this function returns cty.NilType and
59// a nil slice.
60//
61// If a common supertype *can* be found, the returned slice will always be
62// non-nil and will contain a non-nil conversion for each given type that
63// needs to be converted, with indices corresponding to the input slice.
64// Any given type that does *not* need conversion (because it is already of
65// the appropriate type) will have a nil Conversion.
66//
67// cty.DynamicPseudoType is, as usual, a special case. If the given type list
68// contains a mixture of dynamic and non-dynamic types, the dynamic types are
69// disregarded for type selection and a conversion is returned for them that
70// will attempt a late conversion of the given value to the target type,
71// failing with a conversion error if the eventual concrete type is not
72// compatible. If *all* given types are DynamicPseudoType, or in the
73// degenerate case of an empty slice of types, the returned type is itself
74// cty.DynamicPseudoType and no conversions are attempted.
75func Unify(types []cty.Type) (cty.Type, []Conversion) {
76 return unify(types, false)
77}
78
79// UnifyUnsafe is the same as Unify except that it may return unsafe
80// conversions in situations where a safe conversion isn't also available.
81func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) {
82 return unify(types, true)
83}
diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go
new file mode 100644
index 0000000..b776910
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go
@@ -0,0 +1,69 @@
1package convert
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// sortTypes produces an ordering of the given types that serves as a
8// preference order for the result of unification of the given types.
9// The return value is a slice of indices into the given slice, and will
10// thus always be the same length as the given slice.
11//
12// The goal is that the most general of the given types will appear first
13// in the ordering. If there are uncomparable pairs of types in the list
14// then they will appear in an undefined order, and the unification pass
15// will presumably then fail.
16func sortTypes(tys []cty.Type) []int {
17 l := len(tys)
18
19 // First we build a graph whose edges represent "more general than",
20 // which we will then do a topological sort of.
21 edges := make([][]int, l)
22 for i := 0; i < (l - 1); i++ {
23 for j := i + 1; j < l; j++ {
24 cmp := compareTypes(tys[i], tys[j])
25 switch {
26 case cmp < 0:
27 edges[i] = append(edges[i], j)
28 case cmp > 0:
29 edges[j] = append(edges[j], i)
30 }
31 }
32 }
33
34 // Compute the in-degree of each node
35 inDegree := make([]int, l)
36 for _, outs := range edges {
37 for _, j := range outs {
38 inDegree[j]++
39 }
40 }
41
42 // The array backing our result will double as our queue for visiting
43 // the nodes, with the queue slice moving along this array until it
44 // is empty and positioned at the end of the array. Thus our visiting
45 // order is also our result order.
46 result := make([]int, l)
47 queue := result[0:0]
48
49 // Initialize the queue with any item of in-degree 0, preserving
50 // their relative order.
51 for i, n := range inDegree {
52 if n == 0 {
53 queue = append(queue, i)
54 }
55 }
56
57 for len(queue) != 0 {
58 i := queue[0]
59 queue = queue[1:]
60 for _, j := range edges[i] {
61 inDegree[j]--
62 if inDegree[j] == 0 {
63 queue = append(queue, j)
64 }
65 }
66 }
67
68 return result
69}
diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/unify.go b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go
new file mode 100644
index 0000000..bd6736b
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go
@@ -0,0 +1,66 @@
1package convert
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// The current unify implementation is somewhat inefficient, but we accept this
8// under the assumption that it will generally be used with small numbers of
9// types and with types of reasonable complexity. However, it does have a
10// "happy path" where all of the given types are equal.
11//
12// This function is likely to have poor performance in cases where any given
13// types are very complex (lots of deeply-nested structures) or if the list
14// of types itself is very large. In particular, it will walk the nested type
15// structure under the given types several times, especially when given a
16// list of types for which unification is not possible, since each permutation
17// will be tried to determine that result.
18func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) {
19 if len(types) == 0 {
20 // Degenerate case
21 return cty.NilType, nil
22 }
23
24 prefOrder := sortTypes(types)
25
26 // sortTypes gives us an order where earlier items are preferable as
27 // our result type. We'll now walk through these and choose the first
28 // one we encounter for which conversions exist for all source types.
29 conversions := make([]Conversion, len(types))
30Preferences:
31 for _, wantTypeIdx := range prefOrder {
32 wantType := types[wantTypeIdx]
33 for i, tryType := range types {
34 if i == wantTypeIdx {
35 // Don't need to convert our wanted type to itself
36 conversions[i] = nil
37 continue
38 }
39
40 if tryType.Equals(wantType) {
41 conversions[i] = nil
42 continue
43 }
44
45 if unsafe {
46 conversions[i] = GetConversionUnsafe(tryType, wantType)
47 } else {
48 conversions[i] = GetConversion(tryType, wantType)
49 }
50
51 if conversions[i] == nil {
52 // wantType is not a suitable unification type, so we'll
53 // try the next one in our preference order.
54 continue Preferences
55 }
56 }
57
58 return wantType, conversions
59 }
60
61 // TODO: For structural types, try to invent a new type that they
62 // can all be unified to, by unifying their respective attributes.
63
64 // If we fall out here, no unification is possible
65 return cty.NilType, nil
66}
diff --git a/vendor/github.com/zclconf/go-cty/cty/doc.go b/vendor/github.com/zclconf/go-cty/cty/doc.go
new file mode 100644
index 0000000..d31f054
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/doc.go
@@ -0,0 +1,18 @@
1// Package cty (pronounced see-tie) provides some infrastructure for a type
2// system that might be useful for applications that need to represent
3// configuration values provided by the user whose types are not known
4// at compile time, particularly if the calling application also allows
5// such values to be used in expressions.
6//
7// The type system consists of primitive types Number, String and Bool, as
8// well as List and Map collection types and Object types that can have
9// arbitrarily-typed sets of attributes.
10//
11// A set of operations is defined on these types, which is accessible via
12// the wrapper struct Value, which annotates the raw, internal representation
13// of a value with its corresponding type.
14//
15// This package is oriented towards being a building block for configuration
16// languages used to bootstrap an application. It is not optimized for use
17// in tight loops where CPU time or memory pressure are a concern.
18package cty
diff --git a/vendor/github.com/zclconf/go-cty/cty/element_iterator.go b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go
new file mode 100644
index 0000000..0bf84c7
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go
@@ -0,0 +1,191 @@
1package cty
2
3import (
4 "sort"
5
6 "github.com/zclconf/go-cty/cty/set"
7)
8
9// ElementIterator is the interface type returned by Value.ElementIterator to
10// allow the caller to iterate over elements of a collection-typed value.
11//
12// Its usage pattern is as follows:
13//
14// it := val.ElementIterator()
15// for it.Next() {
16// key, val := it.Element()
17// // ...
18// }
19type ElementIterator interface {
20 Next() bool
21 Element() (key Value, value Value)
22}
23
24func canElementIterator(val Value) bool {
25 switch {
26 case val.ty.IsListType():
27 return true
28 case val.ty.IsMapType():
29 return true
30 case val.ty.IsSetType():
31 return true
32 case val.ty.IsTupleType():
33 return true
34 case val.ty.IsObjectType():
35 return true
36 default:
37 return false
38 }
39}
40
41func elementIterator(val Value) ElementIterator {
42 switch {
43 case val.ty.IsListType():
44 return &listElementIterator{
45 ety: val.ty.ElementType(),
46 vals: val.v.([]interface{}),
47 idx: -1,
48 }
49 case val.ty.IsMapType():
50 // We iterate the keys in a predictable lexicographical order so
51 // that results will always be stable given the same input map.
52 rawMap := val.v.(map[string]interface{})
53 keys := make([]string, 0, len(rawMap))
54 for key := range rawMap {
55 keys = append(keys, key)
56 }
57 sort.Strings(keys)
58
59 return &mapElementIterator{
60 ety: val.ty.ElementType(),
61 vals: rawMap,
62 keys: keys,
63 idx: -1,
64 }
65 case val.ty.IsSetType():
66 rawSet := val.v.(set.Set)
67 return &setElementIterator{
68 ety: val.ty.ElementType(),
69 setIt: rawSet.Iterator(),
70 }
71 case val.ty.IsTupleType():
72 return &tupleElementIterator{
73 etys: val.ty.TupleElementTypes(),
74 vals: val.v.([]interface{}),
75 idx: -1,
76 }
77 case val.ty.IsObjectType():
78 // We iterate the keys in a predictable lexicographical order so
79 // that results will always be stable given the same object type.
80 atys := val.ty.AttributeTypes()
81 keys := make([]string, 0, len(atys))
82 for key := range atys {
83 keys = append(keys, key)
84 }
85 sort.Strings(keys)
86
87 return &objectElementIterator{
88 atys: atys,
89 vals: val.v.(map[string]interface{}),
90 attrNames: keys,
91 idx: -1,
92 }
93 default:
94 panic("attempt to iterate on non-collection, non-tuple type")
95 }
96}
97
98type listElementIterator struct {
99 ety Type
100 vals []interface{}
101 idx int
102}
103
104func (it *listElementIterator) Element() (Value, Value) {
105 i := it.idx
106 return NumberIntVal(int64(i)), Value{
107 ty: it.ety,
108 v: it.vals[i],
109 }
110}
111
112func (it *listElementIterator) Next() bool {
113 it.idx++
114 return it.idx < len(it.vals)
115}
116
117type mapElementIterator struct {
118 ety Type
119 vals map[string]interface{}
120 keys []string
121 idx int
122}
123
124func (it *mapElementIterator) Element() (Value, Value) {
125 key := it.keys[it.idx]
126 return StringVal(key), Value{
127 ty: it.ety,
128 v: it.vals[key],
129 }
130}
131
132func (it *mapElementIterator) Next() bool {
133 it.idx++
134 return it.idx < len(it.keys)
135}
136
137type setElementIterator struct {
138 ety Type
139 setIt *set.Iterator
140}
141
142func (it *setElementIterator) Element() (Value, Value) {
143 val := Value{
144 ty: it.ety,
145 v: it.setIt.Value(),
146 }
147 return val, val
148}
149
150func (it *setElementIterator) Next() bool {
151 return it.setIt.Next()
152}
153
154type tupleElementIterator struct {
155 etys []Type
156 vals []interface{}
157 idx int
158}
159
160func (it *tupleElementIterator) Element() (Value, Value) {
161 i := it.idx
162 return NumberIntVal(int64(i)), Value{
163 ty: it.etys[i],
164 v: it.vals[i],
165 }
166}
167
168func (it *tupleElementIterator) Next() bool {
169 it.idx++
170 return it.idx < len(it.vals)
171}
172
173type objectElementIterator struct {
174 atys map[string]Type
175 vals map[string]interface{}
176 attrNames []string
177 idx int
178}
179
180func (it *objectElementIterator) Element() (Value, Value) {
181 key := it.attrNames[it.idx]
182 return StringVal(key), Value{
183 ty: it.atys[key],
184 v: it.vals[key],
185 }
186}
187
188func (it *objectElementIterator) Next() bool {
189 it.idx++
190 return it.idx < len(it.attrNames)
191}
diff --git a/vendor/github.com/zclconf/go-cty/cty/error.go b/vendor/github.com/zclconf/go-cty/cty/error.go
new file mode 100644
index 0000000..dd139f7
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/error.go
@@ -0,0 +1,55 @@
1package cty
2
3import (
4 "fmt"
5)
6
7// PathError is a specialization of error that represents where in a
8// potentially-deep data structure an error occured, using a Path.
9type PathError struct {
10 error
11 Path Path
12}
13
14func errorf(path Path, f string, args ...interface{}) error {
15 // We need to copy the Path because often our caller builds it by
16 // continually mutating the same underlying buffer.
17 sPath := make(Path, len(path))
18 copy(sPath, path)
19 return PathError{
20 error: fmt.Errorf(f, args...),
21 Path: sPath,
22 }
23}
24
25// NewErrorf creates a new PathError for the current path by passing the
26// given format and arguments to fmt.Errorf and then wrapping the result
27// similarly to NewError.
28func (p Path) NewErrorf(f string, args ...interface{}) error {
29 return errorf(p, f, args...)
30}
31
32// NewError creates a new PathError for the current path, wrapping the given
33// error.
34func (p Path) NewError(err error) error {
35 // if we're being asked to wrap an existing PathError then our new
36 // PathError will be the concatenation of the two paths, ensuring
37 // that we still get a single flat PathError that's thus easier for
38 // callers to deal with.
39 perr, wrappingPath := err.(PathError)
40 pathLen := len(p)
41 if wrappingPath {
42 pathLen = pathLen + len(perr.Path)
43 }
44
45 sPath := make(Path, pathLen)
46 copy(sPath, p)
47 if wrappingPath {
48 copy(sPath[len(p):], perr.Path)
49 }
50
51 return PathError{
52 error: err,
53 Path: sPath,
54 }
55}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/argument.go b/vendor/github.com/zclconf/go-cty/cty/function/argument.go
new file mode 100644
index 0000000..bfd3015
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/argument.go
@@ -0,0 +1,50 @@
1package function
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// Parameter represents a parameter to a function.
8type Parameter struct {
9 // Name is an optional name for the argument. This package ignores this
10 // value, but callers may use it for documentation, etc.
11 Name string
12
13 // A type that any argument for this parameter must conform to.
14 // cty.DynamicPseudoType can be used, either at top-level or nested
15 // in a parameterized type, to indicate that any type should be
16 // permitted, to allow the definition of type-generic functions.
17 Type cty.Type
18
19 // If AllowNull is set then null values may be passed into this
20 // argument's slot in both the type-check function and the implementation
21 // function. If not set, such values are rejected by the built-in
22 // checking rules.
23 AllowNull bool
24
25 // If AllowUnknown is set then unknown values may be passed into this
26 // argument's slot in the implementation function. If not set, any
27 // unknown values will cause the function to immediately return
28 // an unkonwn value without calling the implementation function, thus
29 // freeing the function implementer from dealing with this case.
30 AllowUnknown bool
31
32 // If AllowDynamicType is set then DynamicVal may be passed into this
33 // argument's slot in the implementation function. If not set, any
34 // dynamic values will cause the function to immediately return
35 // DynamicVal value without calling the implementation function, thus
36 // freeing the function implementer from dealing with this case.
37 //
38 // Note that DynamicVal is also unknown, so in order to receive dynamic
39 // *values* it is also necessary to set AllowUnknown.
40 //
41 // However, it is valid to set AllowDynamicType without AllowUnknown, in
42 // which case a dynamic value may be passed to the type checking function
43 // but will not make it to the *implementation* function. Instead, an
44 // unknown value of the type returned by the type-check function will be
45 // returned. This is suggested for functions that have a static return
46 // type since it allows the return value to be typed even if the input
47 // values are not, thus improving the type-check accuracy of derived
48 // values.
49 AllowDynamicType bool
50}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/doc.go b/vendor/github.com/zclconf/go-cty/cty/function/doc.go
new file mode 100644
index 0000000..393b311
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/doc.go
@@ -0,0 +1,6 @@
1// Package function builds on the functionality of cty by modeling functions
2// that operate on cty Values.
3//
4// Functions are, at their core, Go anonymous functions. However, this package
5// wraps around them utility functions for parameter type checking, etc.
6package function
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/error.go b/vendor/github.com/zclconf/go-cty/cty/function/error.go
new file mode 100644
index 0000000..2b56779
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/error.go
@@ -0,0 +1,50 @@
1package function
2
3import (
4 "fmt"
5 "runtime/debug"
6)
7
8// ArgError represents an error with one of the arguments in a call. The
9// attribute Index represents the zero-based index of the argument in question.
10//
11// Its error *may* be a cty.PathError, in which case the error actually
12// pertains to a nested value within the data structure passed as the argument.
13type ArgError struct {
14 error
15 Index int
16}
17
18func NewArgErrorf(i int, f string, args ...interface{}) error {
19 return ArgError{
20 error: fmt.Errorf(f, args...),
21 Index: i,
22 }
23}
24
25func NewArgError(i int, err error) error {
26 return ArgError{
27 error: err,
28 Index: i,
29 }
30}
31
32// PanicError indicates that a panic occurred while executing either a
33// function's type or implementation function. This is captured and wrapped
34// into a normal error so that callers (expected to be language runtimes)
35// are freed from having to deal with panics in buggy functions.
36type PanicError struct {
37 Value interface{}
38 Stack []byte
39}
40
41func errorForPanic(val interface{}) error {
42 return PanicError{
43 Value: val,
44 Stack: debug.Stack(),
45 }
46}
47
48func (e PanicError) Error() string {
49 return fmt.Sprintf("panic in function implementation: %s\n%s", e.Value, e.Stack)
50}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/function.go b/vendor/github.com/zclconf/go-cty/cty/function/function.go
new file mode 100644
index 0000000..162f7bf
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/function.go
@@ -0,0 +1,291 @@
1package function
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7)
8
9// Function represents a function. This is the main type in this package.
10type Function struct {
11 spec *Spec
12}
13
14// Spec is the specification of a function, used to instantiate
15// a new Function.
16type Spec struct {
17 // Params is a description of the positional parameters for the function.
18 // The standard checking logic rejects any calls that do not provide
19 // arguments conforming to this definition, freeing the function
20 // implementer from dealing with such inconsistencies.
21 Params []Parameter
22
23 // VarParam is an optional specification of additional "varargs" the
24 // function accepts. If this is non-nil then callers may provide an
25 // arbitrary number of additional arguments (after those matching with
26 // the fixed parameters in Params) that conform to the given specification,
27 // which will appear as additional values in the slices of values
28 // provided to the type and implementation functions.
29 VarParam *Parameter
30
31 // Type is the TypeFunc that decides the return type of the function
32 // given its arguments, which may be Unknown. See the documentation
33 // of TypeFunc for more information.
34 //
35 // Use StaticReturnType if the function's return type does not vary
36 // depending on its arguments.
37 Type TypeFunc
38
39 // Impl is the ImplFunc that implements the function's behavior.
40 //
41 // Functions are expected to behave as pure functions, and not create
42 // any visible side-effects.
43 //
44 // If a TypeFunc is also provided, the value returned from Impl *must*
45 // conform to the type it returns, or a call to the function will panic.
46 Impl ImplFunc
47}
48
49// New creates a new function with the given specification.
50//
51// After passing a Spec to this function, the caller must no longer read from
52// or mutate it.
53func New(spec *Spec) Function {
54 f := Function{
55 spec: spec,
56 }
57 return f
58}
59
60// TypeFunc is a callback type for determining the return type of a function
61// given its arguments.
62//
63// Any of the values passed to this function may be unknown, even if the
64// parameters are not configured to accept unknowns.
65//
66// If any of the given values are *not* unknown, the TypeFunc may use the
67// values for pre-validation and for choosing the return type. For example,
68// a hypothetical JSON-unmarshalling function could return
69// cty.DynamicPseudoType if the given JSON string is unknown, but return
70// a concrete type based on the JSON structure if the JSON string is already
71// known.
72type TypeFunc func(args []cty.Value) (cty.Type, error)
73
74// ImplFunc is a callback type for the main implementation of a function.
75//
76// "args" are the values for the arguments, and this slice will always be at
77// least as long as the argument definition slice for the function.
78//
79// "retType" is the type returned from the Type callback, included as a
80// convenience to avoid the need to re-compute the return type for generic
81// functions whose return type is a function of the arguments.
82type ImplFunc func(args []cty.Value, retType cty.Type) (cty.Value, error)
83
84// StaticReturnType returns a TypeFunc that always returns the given type.
85//
86// This is provided as a convenience for defining a function whose return
87// type does not depend on the argument types.
88func StaticReturnType(ty cty.Type) TypeFunc {
89 return func([]cty.Value) (cty.Type, error) {
90 return ty, nil
91 }
92}
93
94// ReturnType returns the return type of a function given a set of candidate
95// argument types, or returns an error if the given types are unacceptable.
96//
97// If the caller already knows values for at least some of the arguments
98// it can be better to call ReturnTypeForValues, since certain functions may
99// determine their return types from their values and return DynamicVal if
100// the values are unknown.
101func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) {
102 vals := make([]cty.Value, len(argTypes))
103 for i, ty := range argTypes {
104 vals[i] = cty.UnknownVal(ty)
105 }
106 return f.ReturnTypeForValues(vals)
107}
108
109// ReturnTypeForValues is similar to ReturnType but can be used if the caller
110// already knows the values of some or all of the arguments, in which case
111// the function may be able to determine a more definite result if its
112// return type depends on the argument *values*.
113//
114// For any arguments whose values are not known, pass an Unknown value of
115// the appropriate type.
116func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) {
117 var posArgs []cty.Value
118 var varArgs []cty.Value
119
120 if f.spec.VarParam == nil {
121 if len(args) != len(f.spec.Params) {
122 return cty.Type{}, fmt.Errorf(
123 "wrong number of arguments (%d required; %d given)",
124 len(f.spec.Params), len(args),
125 )
126 }
127
128 posArgs = args
129 varArgs = nil
130 } else {
131 if len(args) < len(f.spec.Params) {
132 return cty.Type{}, fmt.Errorf(
133 "wrong number of arguments (at least %d required; %d given)",
134 len(f.spec.Params), len(args),
135 )
136 }
137
138 posArgs = args[0:len(f.spec.Params)]
139 varArgs = args[len(f.spec.Params):]
140 }
141
142 for i, spec := range f.spec.Params {
143 val := posArgs[i]
144
145 if val.IsNull() && !spec.AllowNull {
146 return cty.Type{}, NewArgErrorf(i, "must not be null")
147 }
148
149 // AllowUnknown is ignored for type-checking, since we expect to be
150 // able to type check with unknown values. We *do* still need to deal
151 // with DynamicPseudoType here though, since the Type function might
152 // not be ready to deal with that.
153
154 if val.Type() == cty.DynamicPseudoType {
155 if !spec.AllowDynamicType {
156 return cty.DynamicPseudoType, nil
157 }
158 } else if errs := val.Type().TestConformance(spec.Type); errs != nil {
159 // For now we'll just return the first error in the set, since
160 // we don't have a good way to return the whole list here.
161 // Would be good to do something better at some point...
162 return cty.Type{}, NewArgError(i, errs[0])
163 }
164 }
165
166 if varArgs != nil {
167 spec := f.spec.VarParam
168 for i, val := range varArgs {
169 realI := i + len(posArgs)
170
171 if val.IsNull() && !spec.AllowNull {
172 return cty.Type{}, NewArgErrorf(realI, "must not be null")
173 }
174
175 if val.Type() == cty.DynamicPseudoType {
176 if !spec.AllowDynamicType {
177 return cty.DynamicPseudoType, nil
178 }
179 } else if errs := val.Type().TestConformance(spec.Type); errs != nil {
180 // For now we'll just return the first error in the set, since
181 // we don't have a good way to return the whole list here.
182 // Would be good to do something better at some point...
183 return cty.Type{}, NewArgError(i, errs[0])
184 }
185 }
186 }
187
188 // Intercept any panics from the function and return them as normal errors,
189 // so a calling language runtime doesn't need to deal with panics.
190 defer func() {
191 if r := recover(); r != nil {
192 ty = cty.NilType
193 err = errorForPanic(r)
194 }
195 }()
196
197 return f.spec.Type(args)
198}
199
200// Call actually calls the function with the given arguments, which must
201// conform to the function's parameter specification or an error will be
202// returned.
203func (f Function) Call(args []cty.Value) (val cty.Value, err error) {
204 expectedType, err := f.ReturnTypeForValues(args)
205 if err != nil {
206 return cty.NilVal, err
207 }
208
209 // Type checking already dealt with most situations relating to our
210 // parameter specification, but we still need to deal with unknown
211 // values.
212 posArgs := args[:len(f.spec.Params)]
213 varArgs := args[len(f.spec.Params):]
214
215 for i, spec := range f.spec.Params {
216 val := posArgs[i]
217
218 if !val.IsKnown() && !spec.AllowUnknown {
219 return cty.UnknownVal(expectedType), nil
220 }
221 }
222
223 if f.spec.VarParam != nil {
224 spec := f.spec.VarParam
225 for _, val := range varArgs {
226 if !val.IsKnown() && !spec.AllowUnknown {
227 return cty.UnknownVal(expectedType), nil
228 }
229 }
230 }
231
232 var retVal cty.Value
233 {
234 // Intercept any panics from the function and return them as normal errors,
235 // so a calling language runtime doesn't need to deal with panics.
236 defer func() {
237 if r := recover(); r != nil {
238 val = cty.NilVal
239 err = errorForPanic(r)
240 }
241 }()
242
243 retVal, err = f.spec.Impl(args, expectedType)
244 if err != nil {
245 return cty.NilVal, err
246 }
247 }
248
249 // Returned value must conform to what the Type function expected, to
250 // protect callers from having to deal with inconsistencies.
251 if errs := retVal.Type().TestConformance(expectedType); errs != nil {
252 panic(fmt.Errorf(
253 "returned value %#v does not conform to expected return type %#v: %s",
254 retVal, expectedType, errs[0],
255 ))
256 }
257
258 return retVal, nil
259}
260
261// ProxyFunc the type returned by the method Function.Proxy.
262type ProxyFunc func(args ...cty.Value) (cty.Value, error)
263
264// Proxy returns a function that can be called with cty.Value arguments
265// to run the function. This is provided as a convenience for when using
266// a function directly within Go code.
267func (f Function) Proxy() ProxyFunc {
268 return func(args ...cty.Value) (cty.Value, error) {
269 return f.Call(args)
270 }
271}
272
273// Params returns information about the function's fixed positional parameters.
274// This does not include information about any variadic arguments accepted;
275// for that, call VarParam.
276func (f Function) Params() []Parameter {
277 new := make([]Parameter, len(f.spec.Params))
278 copy(new, f.spec.Params)
279 return new
280}
281
282// VarParam returns information about the variadic arguments the function
283// expects, or nil if the function is not variadic.
284func (f Function) VarParam() *Parameter {
285 if f.spec.VarParam == nil {
286 return nil
287 }
288
289 ret := *f.spec.VarParam
290 return &ret
291}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go
new file mode 100644
index 0000000..a473d0e
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go
@@ -0,0 +1,73 @@
1package stdlib
2
3import (
4 "github.com/zclconf/go-cty/cty"
5 "github.com/zclconf/go-cty/cty/function"
6)
7
8var NotFunc = function.New(&function.Spec{
9 Params: []function.Parameter{
10 {
11 Name: "val",
12 Type: cty.Bool,
13 AllowDynamicType: true,
14 },
15 },
16 Type: function.StaticReturnType(cty.Bool),
17 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
18 return args[0].Not(), nil
19 },
20})
21
22var AndFunc = function.New(&function.Spec{
23 Params: []function.Parameter{
24 {
25 Name: "a",
26 Type: cty.Bool,
27 AllowDynamicType: true,
28 },
29 {
30 Name: "b",
31 Type: cty.Bool,
32 AllowDynamicType: true,
33 },
34 },
35 Type: function.StaticReturnType(cty.Bool),
36 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
37 return args[0].And(args[1]), nil
38 },
39})
40
41var OrFunc = function.New(&function.Spec{
42 Params: []function.Parameter{
43 {
44 Name: "a",
45 Type: cty.Bool,
46 AllowDynamicType: true,
47 },
48 {
49 Name: "b",
50 Type: cty.Bool,
51 AllowDynamicType: true,
52 },
53 },
54 Type: function.StaticReturnType(cty.Bool),
55 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
56 return args[0].Or(args[1]), nil
57 },
58})
59
60// Not returns the logical complement of the given boolean value.
61func Not(num cty.Value) (cty.Value, error) {
62 return NotFunc.Call([]cty.Value{num})
63}
64
65// And returns true if and only if both of the given boolean values are true.
66func And(a, b cty.Value) (cty.Value, error) {
67 return AndFunc.Call([]cty.Value{a, b})
68}
69
70// Or returns true if either of the given boolean values are true.
71func Or(a, b cty.Value) (cty.Value, error) {
72 return OrFunc.Call([]cty.Value{a, b})
73}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go
new file mode 100644
index 0000000..a132e0c
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go
@@ -0,0 +1,112 @@
1package stdlib
2
3import (
4 "fmt"
5 "reflect"
6
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/function"
9 "github.com/zclconf/go-cty/cty/gocty"
10)
11
12// Bytes is a capsule type that can be used with the binary functions to
13// support applications that need to support raw buffers in addition to
14// UTF-8 strings.
15var Bytes = cty.Capsule("bytes", reflect.TypeOf([]byte(nil)))
16
17// BytesVal creates a new Bytes value from the given buffer, which must be
18// non-nil or this function will panic.
19//
20// Once a byte slice has been wrapped in a Bytes capsule, its underlying array
21// must be considered immutable.
22func BytesVal(buf []byte) cty.Value {
23 if buf == nil {
24 panic("can't make Bytes value from nil slice")
25 }
26
27 return cty.CapsuleVal(Bytes, &buf)
28}
29
30// BytesLen is a Function that returns the length of the buffer encapsulated
31// in a Bytes value.
32var BytesLenFunc = function.New(&function.Spec{
33 Params: []function.Parameter{
34 {
35 Name: "buf",
36 Type: Bytes,
37 AllowDynamicType: true,
38 },
39 },
40 Type: function.StaticReturnType(cty.Number),
41 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
42 bufPtr := args[0].EncapsulatedValue().(*[]byte)
43 return cty.NumberIntVal(int64(len(*bufPtr))), nil
44 },
45})
46
47// BytesSlice is a Function that returns a slice of the given Bytes value.
48var BytesSliceFunc = function.New(&function.Spec{
49 Params: []function.Parameter{
50 {
51 Name: "buf",
52 Type: Bytes,
53 AllowDynamicType: true,
54 },
55 {
56 Name: "offset",
57 Type: cty.Number,
58 AllowDynamicType: true,
59 },
60 {
61 Name: "length",
62 Type: cty.Number,
63 AllowDynamicType: true,
64 },
65 },
66 Type: function.StaticReturnType(Bytes),
67 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
68 bufPtr := args[0].EncapsulatedValue().(*[]byte)
69
70 var offset, length int
71
72 var err error
73 err = gocty.FromCtyValue(args[1], &offset)
74 if err != nil {
75 return cty.NilVal, err
76 }
77 err = gocty.FromCtyValue(args[2], &length)
78 if err != nil {
79 return cty.NilVal, err
80 }
81
82 if offset < 0 || length < 0 {
83 return cty.NilVal, fmt.Errorf("offset and length must be non-negative")
84 }
85
86 if offset > len(*bufPtr) {
87 return cty.NilVal, fmt.Errorf(
88 "offset %d is greater than total buffer length %d",
89 offset, len(*bufPtr),
90 )
91 }
92
93 end := offset + length
94
95 if end > len(*bufPtr) {
96 return cty.NilVal, fmt.Errorf(
97 "offset %d + length %d is greater than total buffer length %d",
98 offset, length, len(*bufPtr),
99 )
100 }
101
102 return BytesVal((*bufPtr)[offset:end]), nil
103 },
104})
105
106func BytesLen(buf cty.Value) (cty.Value, error) {
107 return BytesLenFunc.Call([]cty.Value{buf})
108}
109
110func BytesSlice(buf cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) {
111 return BytesSliceFunc.Call([]cty.Value{buf, offset, length})
112}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go
new file mode 100644
index 0000000..967ba03
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go
@@ -0,0 +1,140 @@
1package stdlib
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/function"
8 "github.com/zclconf/go-cty/cty/gocty"
9)
10
11var HasIndexFunc = function.New(&function.Spec{
12 Params: []function.Parameter{
13 {
14 Name: "collection",
15 Type: cty.DynamicPseudoType,
16 AllowDynamicType: true,
17 },
18 {
19 Name: "key",
20 Type: cty.DynamicPseudoType,
21 AllowDynamicType: true,
22 },
23 },
24 Type: func(args []cty.Value) (ret cty.Type, err error) {
25 collTy := args[0].Type()
26 if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy == cty.DynamicPseudoType) {
27 return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple")
28 }
29 return cty.Bool, nil
30 },
31 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
32 return args[0].HasIndex(args[1]), nil
33 },
34})
35
36var IndexFunc = function.New(&function.Spec{
37 Params: []function.Parameter{
38 {
39 Name: "collection",
40 Type: cty.DynamicPseudoType,
41 },
42 {
43 Name: "key",
44 Type: cty.DynamicPseudoType,
45 AllowDynamicType: true,
46 },
47 },
48 Type: func(args []cty.Value) (ret cty.Type, err error) {
49 collTy := args[0].Type()
50 key := args[1]
51 keyTy := key.Type()
52 switch {
53 case collTy.IsTupleType():
54 if keyTy != cty.Number && keyTy != cty.DynamicPseudoType {
55 return cty.NilType, fmt.Errorf("key for tuple must be number")
56 }
57 if !key.IsKnown() {
58 return cty.DynamicPseudoType, nil
59 }
60 var idx int
61 err := gocty.FromCtyValue(key, &idx)
62 if err != nil {
63 return cty.NilType, fmt.Errorf("invalid key for tuple: %s", err)
64 }
65
66 etys := collTy.TupleElementTypes()
67
68 if idx >= len(etys) || idx < 0 {
69 return cty.NilType, fmt.Errorf("key must be between 0 and %d inclusive", len(etys))
70 }
71
72 return etys[idx], nil
73
74 case collTy.IsListType():
75 if keyTy != cty.Number && keyTy != cty.DynamicPseudoType {
76 return cty.NilType, fmt.Errorf("key for list must be number")
77 }
78
79 return collTy.ElementType(), nil
80
81 case collTy.IsMapType():
82 if keyTy != cty.String && keyTy != cty.DynamicPseudoType {
83 return cty.NilType, fmt.Errorf("key for map must be string")
84 }
85
86 return collTy.ElementType(), nil
87
88 default:
89 return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple")
90 }
91 },
92 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
93 has, err := HasIndex(args[0], args[1])
94 if err != nil {
95 return cty.NilVal, err
96 }
97 if has.False() { // safe because collection and key are guaranteed known here
98 return cty.NilVal, fmt.Errorf("invalid index")
99 }
100
101 return args[0].Index(args[1]), nil
102 },
103})
104
105var LengthFunc = function.New(&function.Spec{
106 Params: []function.Parameter{
107 {
108 Name: "collection",
109 Type: cty.DynamicPseudoType,
110 AllowDynamicType: true,
111 },
112 },
113 Type: func(args []cty.Value) (ret cty.Type, err error) {
114 collTy := args[0].Type()
115 if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType) {
116 return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple")
117 }
118 return cty.Number, nil
119 },
120 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
121 return args[0].Length(), nil
122 },
123})
124
125// HasIndex determines whether the given collection can be indexed with the
126// given key.
127func HasIndex(collection cty.Value, key cty.Value) (cty.Value, error) {
128 return HasIndexFunc.Call([]cty.Value{collection, key})
129}
130
131// Index returns an element from the given collection using the given key,
132// or returns an error if there is no element for the given key.
133func Index(collection cty.Value, key cty.Value) (cty.Value, error) {
134 return IndexFunc.Call([]cty.Value{collection, key})
135}
136
137// Length returns the number of elements in the given collection.
138func Length(collection cty.Value) (cty.Value, error) {
139 return LengthFunc.Call([]cty.Value{collection})
140}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go
new file mode 100644
index 0000000..5070a5a
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go
@@ -0,0 +1,93 @@
1package stdlib
2
3import (
4 "encoding/csv"
5 "fmt"
6 "io"
7 "strings"
8
9 "github.com/zclconf/go-cty/cty"
10 "github.com/zclconf/go-cty/cty/function"
11)
12
13var CSVDecodeFunc = function.New(&function.Spec{
14 Params: []function.Parameter{
15 {
16 Name: "str",
17 Type: cty.String,
18 },
19 },
20 Type: func(args []cty.Value) (cty.Type, error) {
21 str := args[0]
22 if !str.IsKnown() {
23 return cty.DynamicPseudoType, nil
24 }
25
26 r := strings.NewReader(str.AsString())
27 cr := csv.NewReader(r)
28 headers, err := cr.Read()
29 if err == io.EOF {
30 return cty.DynamicPseudoType, fmt.Errorf("missing header line")
31 }
32 if err != nil {
33 return cty.DynamicPseudoType, err
34 }
35
36 atys := make(map[string]cty.Type, len(headers))
37 for _, name := range headers {
38 if _, exists := atys[name]; exists {
39 return cty.DynamicPseudoType, fmt.Errorf("duplicate column name %q", name)
40 }
41 atys[name] = cty.String
42 }
43 return cty.List(cty.Object(atys)), nil
44 },
45 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
46 ety := retType.ElementType()
47 atys := ety.AttributeTypes()
48 str := args[0]
49 r := strings.NewReader(str.AsString())
50 cr := csv.NewReader(r)
51 cr.FieldsPerRecord = len(atys)
52
53 // Read the header row first, since that'll tell us which indices
54 // map to which attribute names.
55 headers, err := cr.Read()
56 if err != nil {
57 return cty.DynamicVal, err
58 }
59
60 var rows []cty.Value
61 for {
62 cols, err := cr.Read()
63 if err == io.EOF {
64 break
65 }
66 if err != nil {
67 return cty.DynamicVal, err
68 }
69
70 vals := make(map[string]cty.Value, len(cols))
71 for i, str := range cols {
72 name := headers[i]
73 vals[name] = cty.StringVal(str)
74 }
75 rows = append(rows, cty.ObjectVal(vals))
76 }
77
78 if len(rows) == 0 {
79 return cty.ListValEmpty(ety), nil
80 }
81 return cty.ListVal(rows), nil
82 },
83})
84
85// CSVDecode parses the given CSV (RFC 4180) string and, if it is valid,
86// returns a list of objects representing the rows.
87//
88// The result is always a list of some object type. The first row of the
89// input is used to determine the object attributes, and subsequent rows
90// determine the values of those attributes.
91func CSVDecode(str cty.Value) (cty.Value, error) {
92 return CSVDecodeFunc.Call([]cty.Value{str})
93}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go
new file mode 100644
index 0000000..cfb613e
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go
@@ -0,0 +1,13 @@
1// Package stdlib is a collection of cty functions that are expected to be
2// generally useful, and are thus factored out into this shared library in
3// the hope that cty-using applications will have consistent behavior when
4// using these functions.
5//
6// See the parent package "function" for more information on the purpose
7// and usage of cty functions.
8//
9// This package contains both Go functions, which provide convenient access
10// to call the functions from Go code, and the Function objects themselves.
11// The latter follow the naming scheme of appending "Func" to the end of
12// the function name.
13package stdlib
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go
new file mode 100644
index 0000000..fb24f20
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go
@@ -0,0 +1,496 @@
1package stdlib
2
3import (
4 "bytes"
5 "fmt"
6 "math/big"
7 "strings"
8
9 "github.com/apparentlymart/go-textseg/textseg"
10
11 "github.com/zclconf/go-cty/cty"
12 "github.com/zclconf/go-cty/cty/convert"
13 "github.com/zclconf/go-cty/cty/function"
14 "github.com/zclconf/go-cty/cty/json"
15)
16
17//go:generate ragel -Z format_fsm.rl
18//go:generate gofmt -w format_fsm.go
19
20var FormatFunc = function.New(&function.Spec{
21 Params: []function.Parameter{
22 {
23 Name: "format",
24 Type: cty.String,
25 },
26 },
27 VarParam: &function.Parameter{
28 Name: "args",
29 Type: cty.DynamicPseudoType,
30 AllowNull: true,
31 },
32 Type: function.StaticReturnType(cty.String),
33 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
34 for _, arg := range args[1:] {
35 if !arg.IsWhollyKnown() {
36 // We require all nested values to be known because the only
37 // thing we can do for a collection/structural type is print
38 // it as JSON and that requires it to be wholly known.
39 return cty.UnknownVal(cty.String), nil
40 }
41 }
42 str, err := formatFSM(args[0].AsString(), args[1:])
43 return cty.StringVal(str), err
44 },
45})
46
47var FormatListFunc = function.New(&function.Spec{
48 Params: []function.Parameter{
49 {
50 Name: "format",
51 Type: cty.String,
52 },
53 },
54 VarParam: &function.Parameter{
55 Name: "args",
56 Type: cty.DynamicPseudoType,
57 AllowNull: true,
58 AllowUnknown: true,
59 },
60 Type: function.StaticReturnType(cty.List(cty.String)),
61 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
62 fmtVal := args[0]
63 args = args[1:]
64
65 if len(args) == 0 {
66 // With no arguments, this function is equivalent to Format, but
67 // returning a single-element list result.
68 result, err := Format(fmtVal, args...)
69 return cty.ListVal([]cty.Value{result}), err
70 }
71
72 fmtStr := fmtVal.AsString()
73
74 // Each of our arguments will be dealt with either as an iterator
75 // or as a single value. Iterators are used for sequence-type values
76 // (lists, sets, tuples) while everything else is treated as a
77 // single value. The sequences we iterate over are required to be
78 // all the same length.
79 iterLen := -1
80 lenChooser := -1
81 iterators := make([]cty.ElementIterator, len(args))
82 singleVals := make([]cty.Value, len(args))
83 for i, arg := range args {
84 argTy := arg.Type()
85 switch {
86 case (argTy.IsListType() || argTy.IsSetType() || argTy.IsTupleType()) && !arg.IsNull():
87 thisLen := arg.LengthInt()
88 if iterLen == -1 {
89 iterLen = thisLen
90 lenChooser = i
91 } else {
92 if thisLen != iterLen {
93 return cty.NullVal(cty.List(cty.String)), function.NewArgErrorf(
94 i+1,
95 "argument %d has length %d, which is inconsistent with argument %d of length %d",
96 i+1, thisLen,
97 lenChooser+1, iterLen,
98 )
99 }
100 }
101 iterators[i] = arg.ElementIterator()
102 default:
103 singleVals[i] = arg
104 }
105 }
106
107 if iterLen == 0 {
108 // If our sequences are all empty then our result must be empty.
109 return cty.ListValEmpty(cty.String), nil
110 }
111
112 if iterLen == -1 {
113 // If we didn't encounter any iterables at all then we're going
114 // to just do one iteration with items from singleVals.
115 iterLen = 1
116 }
117
118 ret := make([]cty.Value, 0, iterLen)
119 fmtArgs := make([]cty.Value, len(iterators))
120 Results:
121 for iterIdx := 0; iterIdx < iterLen; iterIdx++ {
122
123 // Construct our arguments for a single format call
124 for i := range fmtArgs {
125 switch {
126 case iterators[i] != nil:
127 iterator := iterators[i]
128 iterator.Next()
129 _, val := iterator.Element()
130 fmtArgs[i] = val
131 default:
132 fmtArgs[i] = singleVals[i]
133 }
134
135 // If any of the arguments to this call would be unknown then
136 // this particular result is unknown, but we'll keep going
137 // to see if any other iterations can produce known values.
138 if !fmtArgs[i].IsWhollyKnown() {
139 // We require all nested values to be known because the only
140 // thing we can do for a collection/structural type is print
141 // it as JSON and that requires it to be wholly known.
142 ret = append(ret, cty.UnknownVal(cty.String))
143 continue Results
144 }
145 }
146
147 str, err := formatFSM(fmtStr, fmtArgs)
148 if err != nil {
149 return cty.NullVal(cty.List(cty.String)), fmt.Errorf(
150 "error on format iteration %d: %s", iterIdx, err,
151 )
152 }
153
154 ret = append(ret, cty.StringVal(str))
155 }
156
157 return cty.ListVal(ret), nil
158 },
159})
160
161// Format produces a string representation of zero or more values using a
162// format string similar to the "printf" function in C.
163//
164// It supports the following "verbs":
165//
166// %% Literal percent sign, consuming no value
167// %v A default formatting of the value based on type, as described below.
168// %#v JSON serialization of the value
169// %t Converts to boolean and then produces "true" or "false"
170// %b Converts to number, requires integer, produces binary representation
171// %d Converts to number, requires integer, produces decimal representation
172// %o Converts to number, requires integer, produces octal representation
173// %x Converts to number, requires integer, produces hexadecimal representation
174// with lowercase letters
175// %X Like %x but with uppercase letters
176// %e Converts to number, produces scientific notation like -1.234456e+78
177// %E Like %e but with an uppercase "E" representing the exponent
178// %f Converts to number, produces decimal representation with fractional
179// part but no exponent, like 123.456
180// %g %e for large exponents or %f otherwise
181// %G %E for large exponents or %f otherwise
182// %s Converts to string and produces the string's characters
183// %q Converts to string and produces JSON-quoted string representation,
184// like %v.
185//
186// The default format selections made by %v are:
187//
188// string %s
189// number %g
190// bool %t
191// other %#v
192//
193// Null values produce the literal keyword "null" for %v and %#v, and produce
194// an error otherwise.
195//
196// Width is specified by an optional decimal number immediately preceding the
197// verb letter. If absent, the width is whatever is necessary to represent the
198// value. Precision is specified after the (optional) width by a period
199// followed by a decimal number. If no period is present, a default precision
200// is used. A period with no following number is invalid.
201// For examples:
202//
203// %f default width, default precision
204// %9f width 9, default precision
205// %.2f default width, precision 2
206// %9.2f width 9, precision 2
207//
208// Width and precision are measured in unicode characters (grapheme clusters).
209//
210// For most values, width is the minimum number of characters to output,
211// padding the formatted form with spaces if necessary.
212//
213// For strings, precision limits the length of the input to be formatted (not
214// the size of the output), truncating if necessary.
215//
216// For numbers, width sets the minimum width of the field and precision sets
217// the number of places after the decimal, if appropriate, except that for
218// %g/%G precision sets the total number of significant digits.
219//
220// The following additional symbols can be used immediately after the percent
221// introducer as flags:
222//
223// (a space) leave a space where the sign would be if number is positive
224// + Include a sign for a number even if it is positive (numeric only)
225// - Pad with spaces on the left rather than the right
226// 0 Pad with zeros rather than spaces.
227//
228// Flag characters are ignored for verbs that do not support them.
229//
230// By default, % sequences consume successive arguments starting with the first.
231// Introducing a [n] sequence immediately before the verb letter, where n is a
232// decimal integer, explicitly chooses a particular value argument by its
233// one-based index. Subsequent calls without an explicit index will then
234// proceed with n+1, n+2, etc.
235//
236// An error is produced if the format string calls for an impossible conversion
237// or accesses more values than are given. An error is produced also for
238// an unsupported format verb.
239func Format(format cty.Value, vals ...cty.Value) (cty.Value, error) {
240 args := make([]cty.Value, 0, len(vals)+1)
241 args = append(args, format)
242 args = append(args, vals...)
243 return FormatFunc.Call(args)
244}
245
246// FormatList applies the same formatting behavior as Format, but accepts
247// a mixture of list and non-list values as arguments. Any list arguments
248// passed must have the same length, which dictates the length of the
249// resulting list.
250//
251// Any non-list arguments are used repeatedly for each iteration over the
252// list arguments. The list arguments are iterated in order by key, so
253// corresponding items are formatted together.
254func FormatList(format cty.Value, vals ...cty.Value) (cty.Value, error) {
255 args := make([]cty.Value, 0, len(vals)+1)
256 args = append(args, format)
257 args = append(args, vals...)
258 return FormatListFunc.Call(args)
259}
260
261type formatVerb struct {
262 Raw string
263 Offset int
264
265 ArgNum int
266 Mode rune
267
268 Zero bool
269 Sharp bool
270 Plus bool
271 Minus bool
272 Space bool
273
274 HasPrec bool
275 Prec int
276
277 HasWidth bool
278 Width int
279}
280
281// formatAppend is called by formatFSM (generated by format_fsm.rl) for each
282// formatting sequence that is encountered.
283func formatAppend(verb *formatVerb, buf *bytes.Buffer, args []cty.Value) error {
284 argIdx := verb.ArgNum - 1
285 if argIdx >= len(args) {
286 return fmt.Errorf(
287 "not enough arguments for %q at %d: need index %d but have %d total",
288 verb.Raw, verb.Offset,
289 verb.ArgNum, len(args),
290 )
291 }
292 arg := args[argIdx]
293
294 if verb.Mode != 'v' && arg.IsNull() {
295 return fmt.Errorf("unsupported value for %q at %d: null value cannot be formatted", verb.Raw, verb.Offset)
296 }
297
298 // Normalize to make some things easier for downstream formatters
299 if !verb.HasWidth {
300 verb.Width = -1
301 }
302 if !verb.HasPrec {
303 verb.Prec = -1
304 }
305
306 // For our first pass we'll ensure the verb is supported and then fan
307 // out to other functions based on what conversion is needed.
308 switch verb.Mode {
309
310 case 'v':
311 return formatAppendAsIs(verb, buf, arg)
312
313 case 't':
314 return formatAppendBool(verb, buf, arg)
315
316 case 'b', 'd', 'o', 'x', 'X', 'e', 'E', 'f', 'g', 'G':
317 return formatAppendNumber(verb, buf, arg)
318
319 case 's', 'q':
320 return formatAppendString(verb, buf, arg)
321
322 default:
323 return fmt.Errorf("unsupported format verb %q in %q at offset %d", verb.Mode, verb.Raw, verb.Offset)
324 }
325}
326
327func formatAppendAsIs(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error {
328
329 if !verb.Sharp && !arg.IsNull() {
330 // Unless the caller overrode it with the sharp flag, we'll try some
331 // specialized formats before we fall back on JSON.
332 switch arg.Type() {
333 case cty.String:
334 fmted := arg.AsString()
335 fmted = formatPadWidth(verb, fmted)
336 buf.WriteString(fmted)
337 return nil
338 case cty.Number:
339 bf := arg.AsBigFloat()
340 fmted := bf.Text('g', -1)
341 fmted = formatPadWidth(verb, fmted)
342 buf.WriteString(fmted)
343 return nil
344 }
345 }
346
347 jb, err := json.Marshal(arg, arg.Type())
348 if err != nil {
349 return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err)
350 }
351 fmted := formatPadWidth(verb, string(jb))
352 buf.WriteString(fmted)
353
354 return nil
355}
356
357func formatAppendBool(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error {
358 var err error
359 arg, err = convert.Convert(arg, cty.Bool)
360 if err != nil {
361 return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err)
362 }
363
364 if arg.True() {
365 buf.WriteString("true")
366 } else {
367 buf.WriteString("false")
368 }
369 return nil
370}
371
372func formatAppendNumber(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error {
373 var err error
374 arg, err = convert.Convert(arg, cty.Number)
375 if err != nil {
376 return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err)
377 }
378
379 switch verb.Mode {
380 case 'b', 'd', 'o', 'x', 'X':
381 return formatAppendInteger(verb, buf, arg)
382 default:
383 bf := arg.AsBigFloat()
384
385 // For floats our format syntax is a subset of Go's, so it's
386 // safe for us to just lean on the existing Go implementation.
387 fmtstr := formatStripIndexSegment(verb.Raw)
388 fmted := fmt.Sprintf(fmtstr, bf)
389 buf.WriteString(fmted)
390 return nil
391 }
392}
393
394func formatAppendInteger(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error {
395 bf := arg.AsBigFloat()
396 bi, acc := bf.Int(nil)
397 if acc != big.Exact {
398 return fmt.Errorf("unsupported value for %q at %d: an integer is required", verb.Raw, verb.Offset)
399 }
400
401 // For integers our format syntax is a subset of Go's, so it's
402 // safe for us to just lean on the existing Go implementation.
403 fmtstr := formatStripIndexSegment(verb.Raw)
404 fmted := fmt.Sprintf(fmtstr, bi)
405 buf.WriteString(fmted)
406 return nil
407}
408
409func formatAppendString(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error {
410 var err error
411 arg, err = convert.Convert(arg, cty.String)
412 if err != nil {
413 return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err)
414 }
415
416 // We _cannot_ directly use the Go fmt.Sprintf implementation for strings
417 // because it measures widths and precisions in runes rather than grapheme
418 // clusters.
419
420 str := arg.AsString()
421 if verb.Prec > 0 {
422 strB := []byte(str)
423 pos := 0
424 wanted := verb.Prec
425 for i := 0; i < wanted; i++ {
426 next := strB[pos:]
427 if len(next) == 0 {
428 // ran out of characters before we hit our max width
429 break
430 }
431 d, _, _ := textseg.ScanGraphemeClusters(strB[pos:], true)
432 pos += d
433 }
434 str = str[:pos]
435 }
436
437 switch verb.Mode {
438 case 's':
439 fmted := formatPadWidth(verb, str)
440 buf.WriteString(fmted)
441 case 'q':
442 jb, err := json.Marshal(cty.StringVal(str), cty.String)
443 if err != nil {
444 // Should never happen, since we know this is a known, non-null string
445 panic(fmt.Errorf("failed to marshal %#v as JSON: %s", arg, err))
446 }
447 fmted := formatPadWidth(verb, string(jb))
448 buf.WriteString(fmted)
449 default:
450 // Should never happen because formatAppend should've already validated
451 panic(fmt.Errorf("invalid string formatting mode %q", verb.Mode))
452 }
453 return nil
454}
455
456func formatPadWidth(verb *formatVerb, fmted string) string {
457 if verb.Width < 0 {
458 return fmted
459 }
460
461 // Safe to ignore errors because ScanGraphemeClusters cannot produce errors
462 givenLen, _ := textseg.TokenCount([]byte(fmted), textseg.ScanGraphemeClusters)
463 wantLen := verb.Width
464 if givenLen >= wantLen {
465 return fmted
466 }
467
468 padLen := wantLen - givenLen
469 padChar := " "
470 if verb.Zero {
471 padChar = "0"
472 }
473 pads := strings.Repeat(padChar, padLen)
474
475 if verb.Minus {
476 return fmted + pads
477 }
478 return pads + fmted
479}
480
481// formatStripIndexSegment strips out any [nnn] segment present in a verb
482// string so that we can pass it through to Go's fmt.Sprintf with a single
483// argument. This is used in cases where we're just leaning on Go's formatter
484// because it's a superset of ours.
485func formatStripIndexSegment(rawVerb string) string {
486 // We assume the string has already been validated here, since we should
487 // only be using this function with strings that were accepted by our
488 // scanner in formatFSM.
489 start := strings.Index(rawVerb, "[")
490 end := strings.Index(rawVerb, "]")
491 if start == -1 || end == -1 {
492 return rawVerb
493 }
494
495 return rawVerb[:start] + rawVerb[end+1:]
496}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go
new file mode 100644
index 0000000..86876ba
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go
@@ -0,0 +1,358 @@
1// line 1 "format_fsm.rl"
2// This file is generated from format_fsm.rl. DO NOT EDIT.
3
4// line 5 "format_fsm.rl"
5
6package stdlib
7
8import (
9 "bytes"
10 "fmt"
11 "unicode/utf8"
12
13 "github.com/zclconf/go-cty/cty"
14)
15
16// line 20 "format_fsm.go"
17var _formatfsm_actions []byte = []byte{
18 0, 1, 0, 1, 1, 1, 2, 1, 4,
19 1, 5, 1, 6, 1, 7, 1, 8,
20 1, 9, 1, 10, 1, 11, 1, 14,
21 1, 16, 1, 17, 1, 18, 2, 3,
22 4, 2, 12, 10, 2, 12, 16, 2,
23 12, 18, 2, 13, 14, 2, 15, 10,
24 2, 15, 18,
25}
26
27var _formatfsm_key_offsets []byte = []byte{
28 0, 0, 14, 27, 34, 36, 39, 43,
29 51,
30}
31
32var _formatfsm_trans_keys []byte = []byte{
33 32, 35, 37, 43, 45, 46, 48, 91,
34 49, 57, 65, 90, 97, 122, 32, 35,
35 43, 45, 46, 48, 91, 49, 57, 65,
36 90, 97, 122, 91, 48, 57, 65, 90,
37 97, 122, 49, 57, 93, 48, 57, 65,
38 90, 97, 122, 46, 91, 48, 57, 65,
39 90, 97, 122, 37,
40}
41
42var _formatfsm_single_lengths []byte = []byte{
43 0, 8, 7, 1, 0, 1, 0, 2,
44 1,
45}
46
47var _formatfsm_range_lengths []byte = []byte{
48 0, 3, 3, 3, 1, 1, 2, 3,
49 0,
50}
51
52var _formatfsm_index_offsets []byte = []byte{
53 0, 0, 12, 23, 28, 30, 33, 36,
54 42,
55}
56
57var _formatfsm_indicies []byte = []byte{
58 1, 2, 3, 4, 5, 6, 7, 10,
59 8, 9, 9, 0, 1, 2, 4, 5,
60 6, 7, 10, 8, 9, 9, 0, 13,
61 11, 12, 12, 0, 14, 0, 15, 14,
62 0, 9, 9, 0, 16, 19, 17, 18,
63 18, 0, 20, 3,
64}
65
66var _formatfsm_trans_targs []byte = []byte{
67 0, 2, 2, 8, 2, 2, 3, 2,
68 7, 8, 4, 3, 8, 4, 5, 6,
69 3, 7, 8, 4, 1,
70}
71
72var _formatfsm_trans_actions []byte = []byte{
73 7, 17, 9, 3, 15, 13, 25, 11,
74 43, 29, 19, 27, 49, 46, 21, 0,
75 37, 23, 40, 34, 1,
76}
77
78var _formatfsm_eof_actions []byte = []byte{
79 0, 31, 31, 31, 31, 31, 31, 31,
80 5,
81}
82
83const formatfsm_start int = 8
84const formatfsm_first_final int = 8
85const formatfsm_error int = 0
86
87const formatfsm_en_main int = 8
88
89// line 19 "format_fsm.rl"
90
91func formatFSM(format string, a []cty.Value) (string, error) {
92 var buf bytes.Buffer
93 data := format
94 nextArg := 1 // arg numbers are 1-based
95 var verb formatVerb
96
97 // line 153 "format_fsm.rl"
98
99 // Ragel state
100 p := 0 // "Pointer" into data
101 pe := len(data) // End-of-data "pointer"
102 cs := 0 // current state (will be initialized by ragel-generated code)
103 ts := 0
104 te := 0
105 eof := pe
106
107 // Keep Go compiler happy even if generated code doesn't use these
108 _ = ts
109 _ = te
110 _ = eof
111
112 // line 121 "format_fsm.go"
113 {
114 cs = formatfsm_start
115 }
116
117 // line 126 "format_fsm.go"
118 {
119 var _klen int
120 var _trans int
121 var _acts int
122 var _nacts uint
123 var _keys int
124 if p == pe {
125 goto _test_eof
126 }
127 if cs == 0 {
128 goto _out
129 }
130 _resume:
131 _keys = int(_formatfsm_key_offsets[cs])
132 _trans = int(_formatfsm_index_offsets[cs])
133
134 _klen = int(_formatfsm_single_lengths[cs])
135 if _klen > 0 {
136 _lower := int(_keys)
137 var _mid int
138 _upper := int(_keys + _klen - 1)
139 for {
140 if _upper < _lower {
141 break
142 }
143
144 _mid = _lower + ((_upper - _lower) >> 1)
145 switch {
146 case data[p] < _formatfsm_trans_keys[_mid]:
147 _upper = _mid - 1
148 case data[p] > _formatfsm_trans_keys[_mid]:
149 _lower = _mid + 1
150 default:
151 _trans += int(_mid - int(_keys))
152 goto _match
153 }
154 }
155 _keys += _klen
156 _trans += _klen
157 }
158
159 _klen = int(_formatfsm_range_lengths[cs])
160 if _klen > 0 {
161 _lower := int(_keys)
162 var _mid int
163 _upper := int(_keys + (_klen << 1) - 2)
164 for {
165 if _upper < _lower {
166 break
167 }
168
169 _mid = _lower + (((_upper - _lower) >> 1) & ^1)
170 switch {
171 case data[p] < _formatfsm_trans_keys[_mid]:
172 _upper = _mid - 2
173 case data[p] > _formatfsm_trans_keys[_mid+1]:
174 _lower = _mid + 2
175 default:
176 _trans += int((_mid - int(_keys)) >> 1)
177 goto _match
178 }
179 }
180 _trans += _klen
181 }
182
183 _match:
184 _trans = int(_formatfsm_indicies[_trans])
185 cs = int(_formatfsm_trans_targs[_trans])
186
187 if _formatfsm_trans_actions[_trans] == 0 {
188 goto _again
189 }
190
191 _acts = int(_formatfsm_trans_actions[_trans])
192 _nacts = uint(_formatfsm_actions[_acts])
193 _acts++
194 for ; _nacts > 0; _nacts-- {
195 _acts++
196 switch _formatfsm_actions[_acts-1] {
197 case 0:
198 // line 29 "format_fsm.rl"
199
200 verb = formatVerb{
201 ArgNum: nextArg,
202 Prec: -1,
203 Width: -1,
204 }
205 ts = p
206
207 case 1:
208 // line 38 "format_fsm.rl"
209
210 buf.WriteByte(data[p])
211
212 case 4:
213 // line 49 "format_fsm.rl"
214
215 // We'll try to slurp a whole UTF-8 sequence here, to give the user
216 // better feedback.
217 r, _ := utf8.DecodeRuneInString(data[p:])
218 return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p)
219
220 case 5:
221 // line 56 "format_fsm.rl"
222
223 verb.Sharp = true
224
225 case 6:
226 // line 59 "format_fsm.rl"
227
228 verb.Zero = true
229
230 case 7:
231 // line 62 "format_fsm.rl"
232
233 verb.Minus = true
234
235 case 8:
236 // line 65 "format_fsm.rl"
237
238 verb.Plus = true
239
240 case 9:
241 // line 68 "format_fsm.rl"
242
243 verb.Space = true
244
245 case 10:
246 // line 72 "format_fsm.rl"
247
248 verb.ArgNum = 0
249
250 case 11:
251 // line 75 "format_fsm.rl"
252
253 verb.ArgNum = (10 * verb.ArgNum) + (int(data[p]) - '0')
254
255 case 12:
256 // line 79 "format_fsm.rl"
257
258 verb.HasWidth = true
259
260 case 13:
261 // line 82 "format_fsm.rl"
262
263 verb.Width = 0
264
265 case 14:
266 // line 85 "format_fsm.rl"
267
268 verb.Width = (10 * verb.Width) + (int(data[p]) - '0')
269
270 case 15:
271 // line 89 "format_fsm.rl"
272
273 verb.HasPrec = true
274
275 case 16:
276 // line 92 "format_fsm.rl"
277
278 verb.Prec = 0
279
280 case 17:
281 // line 95 "format_fsm.rl"
282
283 verb.Prec = (10 * verb.Prec) + (int(data[p]) - '0')
284
285 case 18:
286 // line 99 "format_fsm.rl"
287
288 verb.Mode = rune(data[p])
289 te = p + 1
290 verb.Raw = data[ts:te]
291 verb.Offset = ts
292
293 err := formatAppend(&verb, &buf, a)
294 if err != nil {
295 return buf.String(), err
296 }
297 nextArg = verb.ArgNum + 1
298
299 // line 324 "format_fsm.go"
300 }
301 }
302
303 _again:
304 if cs == 0 {
305 goto _out
306 }
307 p++
308 if p != pe {
309 goto _resume
310 }
311 _test_eof:
312 {
313 }
314 if p == eof {
315 __acts := _formatfsm_eof_actions[cs]
316 __nacts := uint(_formatfsm_actions[__acts])
317 __acts++
318 for ; __nacts > 0; __nacts-- {
319 __acts++
320 switch _formatfsm_actions[__acts-1] {
321 case 2:
322 // line 42 "format_fsm.rl"
323
324 case 3:
325 // line 45 "format_fsm.rl"
326
327 return buf.String(), fmt.Errorf("invalid format string starting at offset %d", p)
328
329 case 4:
330 // line 49 "format_fsm.rl"
331
332 // We'll try to slurp a whole UTF-8 sequence here, to give the user
333 // better feedback.
334 r, _ := utf8.DecodeRuneInString(data[p:])
335 return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p)
336
337 // line 363 "format_fsm.go"
338 }
339 }
340 }
341
342 _out:
343 {
344 }
345 }
346
347 // line 171 "format_fsm.rl"
348
349 // If we fall out here without being in a final state then we've
350 // encountered something that the scanner can't match, which should
351 // be impossible (the scanner matches all bytes _somehow_) but we'll
352 // flag it anyway rather than just losing data from the end.
353 if cs < formatfsm_first_final {
354 return buf.String(), fmt.Errorf("extraneous characters beginning at offset %i", p)
355 }
356
357 return buf.String(), nil
358}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl
new file mode 100644
index 0000000..85d43bb
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl
@@ -0,0 +1,182 @@
1// This file is generated from format_fsm.rl. DO NOT EDIT.
2%%{
3 # (except you are actually in scan_tokens.rl here, so edit away!)
4 machine formatfsm;
5}%%
6
7package stdlib
8
9import (
10 "bytes"
11 "fmt"
12 "unicode/utf8"
13
14 "github.com/zclconf/go-cty/cty"
15)
16
17%%{
18 write data;
19}%%
20
21func formatFSM(format string, a []cty.Value) (string, error) {
22 var buf bytes.Buffer
23 data := format
24 nextArg := 1 // arg numbers are 1-based
25 var verb formatVerb
26
27 %%{
28
29 action begin {
30 verb = formatVerb{
31 ArgNum: nextArg,
32 Prec: -1,
33 Width: -1,
34 }
35 ts = p
36 }
37
38 action emit {
39 buf.WriteByte(fc);
40 }
41
42 action finish_ok {
43 }
44
45 action finish_err {
46 return buf.String(), fmt.Errorf("invalid format string starting at offset %d", p)
47 }
48
49 action err_char {
50 // We'll try to slurp a whole UTF-8 sequence here, to give the user
51 // better feedback.
52 r, _ := utf8.DecodeRuneInString(data[p:])
53 return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p)
54 }
55
56 action flag_sharp {
57 verb.Sharp = true
58 }
59 action flag_zero {
60 verb.Zero = true
61 }
62 action flag_minus {
63 verb.Minus = true
64 }
65 action flag_plus {
66 verb.Plus = true
67 }
68 action flag_space {
69 verb.Space = true
70 }
71
72 action argidx_reset {
73 verb.ArgNum = 0
74 }
75 action argidx_num {
76 verb.ArgNum = (10 * verb.ArgNum) + (int(fc) - '0')
77 }
78
79 action has_width {
80 verb.HasWidth = true
81 }
82 action width_reset {
83 verb.Width = 0
84 }
85 action width_num {
86 verb.Width = (10 * verb.Width) + (int(fc) - '0')
87 }
88
89 action has_prec {
90 verb.HasPrec = true
91 }
92 action prec_reset {
93 verb.Prec = 0
94 }
95 action prec_num {
96 verb.Prec = (10 * verb.Prec) + (int(fc) - '0')
97 }
98
99 action mode {
100 verb.Mode = rune(fc)
101 te = p+1
102 verb.Raw = data[ts:te]
103 verb.Offset = ts
104
105 err := formatAppend(&verb, &buf, a)
106 if err != nil {
107 return buf.String(), err
108 }
109 nextArg = verb.ArgNum + 1
110 }
111
112 # a number that isn't zero and doesn't have a leading zero
113 num = [1-9] [0-9]*;
114
115 flags = (
116 '0' @flag_zero |
117 '#' @flag_sharp |
118 '-' @flag_minus |
119 '+' @flag_plus |
120 ' ' @flag_space
121 )*;
122
123 argidx = ((
124 '[' (num $argidx_num) ']'
125 ) >argidx_reset)?;
126
127 width = (
128 ( num $width_num ) >width_reset %has_width
129 )?;
130
131 precision = (
132 ('.' ( digit* $prec_num )) >prec_reset %has_prec
133 )?;
134
135 # We accept any letter here, but will be more picky in formatAppend
136 mode = ('a'..'z' | 'A'..'Z') @mode;
137
138 fmt_verb = (
139 '%' @begin
140 flags
141 width
142 precision
143 argidx
144 mode
145 );
146
147 main := (
148 [^%] @emit |
149 '%%' @emit |
150 fmt_verb
151 )* @/finish_err %/finish_ok $!err_char;
152
153 }%%
154
155 // Ragel state
156 p := 0 // "Pointer" into data
157 pe := len(data) // End-of-data "pointer"
158 cs := 0 // current state (will be initialized by ragel-generated code)
159 ts := 0
160 te := 0
161 eof := pe
162
163 // Keep Go compiler happy even if generated code doesn't use these
164 _ = ts
165 _ = te
166 _ = eof
167
168 %%{
169 write init;
170 write exec;
171 }%%
172
173 // If we fall out here without being in a final state then we've
174 // encountered something that the scanner can't match, which should
175 // be impossible (the scanner matches all bytes _somehow_) but we'll
176 // flag it anyway rather than just losing data from the end.
177 if cs < formatfsm_first_final {
178 return buf.String(), fmt.Errorf("extraneous characters beginning at offset %i", p)
179 }
180
181 return buf.String(), nil
182}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go
new file mode 100644
index 0000000..6b31f26
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go
@@ -0,0 +1,107 @@
1package stdlib
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/convert"
8 "github.com/zclconf/go-cty/cty/function"
9)
10
11var EqualFunc = function.New(&function.Spec{
12 Params: []function.Parameter{
13 {
14 Name: "a",
15 Type: cty.DynamicPseudoType,
16 AllowUnknown: true,
17 AllowDynamicType: true,
18 AllowNull: true,
19 },
20 {
21 Name: "b",
22 Type: cty.DynamicPseudoType,
23 AllowUnknown: true,
24 AllowDynamicType: true,
25 AllowNull: true,
26 },
27 },
28 Type: function.StaticReturnType(cty.Bool),
29 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
30 return args[0].Equals(args[1]), nil
31 },
32})
33
34var NotEqualFunc = function.New(&function.Spec{
35 Params: []function.Parameter{
36 {
37 Name: "a",
38 Type: cty.DynamicPseudoType,
39 AllowUnknown: true,
40 AllowDynamicType: true,
41 AllowNull: true,
42 },
43 {
44 Name: "b",
45 Type: cty.DynamicPseudoType,
46 AllowUnknown: true,
47 AllowDynamicType: true,
48 AllowNull: true,
49 },
50 },
51 Type: function.StaticReturnType(cty.Bool),
52 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
53 return args[0].Equals(args[1]).Not(), nil
54 },
55})
56
57var CoalesceFunc = function.New(&function.Spec{
58 Params: []function.Parameter{},
59 VarParam: &function.Parameter{
60 Name: "vals",
61 Type: cty.DynamicPseudoType,
62 AllowUnknown: true,
63 AllowDynamicType: true,
64 AllowNull: true,
65 },
66 Type: func(args []cty.Value) (ret cty.Type, err error) {
67 argTypes := make([]cty.Type, len(args))
68 for i, val := range args {
69 argTypes[i] = val.Type()
70 }
71 retType, _ := convert.UnifyUnsafe(argTypes)
72 if retType == cty.NilType {
73 return cty.NilType, fmt.Errorf("all arguments must have the same type")
74 }
75 return retType, nil
76 },
77 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
78 for _, argVal := range args {
79 if !argVal.IsKnown() {
80 return cty.UnknownVal(retType), nil
81 }
82 if argVal.IsNull() {
83 continue
84 }
85
86 return convert.Convert(argVal, retType)
87 }
88 return cty.NilVal, fmt.Errorf("no non-null arguments")
89 },
90})
91
92// Equal determines whether the two given values are equal, returning a
93// bool value.
94func Equal(a cty.Value, b cty.Value) (cty.Value, error) {
95 return EqualFunc.Call([]cty.Value{a, b})
96}
97
98// NotEqual is the opposite of Equal.
99func NotEqual(a cty.Value, b cty.Value) (cty.Value, error) {
100 return NotEqualFunc.Call([]cty.Value{a, b})
101}
102
103// Coalesce returns the first of the given arguments that is not null. If
104// all arguments are null, an error is produced.
105func Coalesce(vals ...cty.Value) (cty.Value, error) {
106 return CoalesceFunc.Call(vals)
107}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go
new file mode 100644
index 0000000..07901c6
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go
@@ -0,0 +1,72 @@
1package stdlib
2
3import (
4 "github.com/zclconf/go-cty/cty"
5 "github.com/zclconf/go-cty/cty/function"
6 "github.com/zclconf/go-cty/cty/json"
7)
8
9var JSONEncodeFunc = function.New(&function.Spec{
10 Params: []function.Parameter{
11 {
12 Name: "val",
13 Type: cty.DynamicPseudoType,
14 AllowDynamicType: true,
15 },
16 },
17 Type: function.StaticReturnType(cty.String),
18 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
19 val := args[0]
20 if !val.IsWhollyKnown() {
21 // We can't serialize unknowns, so if the value is unknown or
22 // contains any _nested_ unknowns then our result must be
23 // unknown.
24 return cty.UnknownVal(retType), nil
25 }
26
27 buf, err := json.Marshal(val, val.Type())
28 if err != nil {
29 return cty.NilVal, err
30 }
31
32 return cty.StringVal(string(buf)), nil
33 },
34})
35
36var JSONDecodeFunc = function.New(&function.Spec{
37 Params: []function.Parameter{
38 {
39 Name: "str",
40 Type: cty.String,
41 },
42 },
43 Type: func(args []cty.Value) (cty.Type, error) {
44 str := args[0]
45 if !str.IsKnown() {
46 return cty.DynamicPseudoType, nil
47 }
48
49 buf := []byte(str.AsString())
50 return json.ImpliedType(buf)
51 },
52 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
53 buf := []byte(args[0].AsString())
54 return json.Unmarshal(buf, retType)
55 },
56})
57
58// JSONEncode returns a JSON serialization of the given value.
59func JSONEncode(val cty.Value) (cty.Value, error) {
60 return JSONEncodeFunc.Call([]cty.Value{val})
61}
62
63// JSONDecode parses the given JSON string and, if it is valid, returns the
64// value it represents.
65//
66// Note that applying JSONDecode to the result of JSONEncode may not produce
67// an identically-typed result, since JSON encoding is lossy for cty Types.
68// The resulting value will consist only of primitive types, object types, and
69// tuple types.
70func JSONDecode(str cty.Value) (cty.Value, error) {
71 return JSONDecodeFunc.Call([]cty.Value{str})
72}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go
new file mode 100644
index 0000000..bd9b2e5
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go
@@ -0,0 +1,428 @@
1package stdlib
2
3import (
4 "fmt"
5 "math/big"
6
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/function"
9)
10
11var AbsoluteFunc = function.New(&function.Spec{
12 Params: []function.Parameter{
13 {
14 Name: "num",
15 Type: cty.Number,
16 AllowDynamicType: true,
17 },
18 },
19 Type: function.StaticReturnType(cty.Number),
20 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
21 return args[0].Absolute(), nil
22 },
23})
24
25var AddFunc = function.New(&function.Spec{
26 Params: []function.Parameter{
27 {
28 Name: "a",
29 Type: cty.Number,
30 AllowDynamicType: true,
31 },
32 {
33 Name: "b",
34 Type: cty.Number,
35 AllowDynamicType: true,
36 },
37 },
38 Type: function.StaticReturnType(cty.Number),
39 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
40 // big.Float.Add can panic if the input values are opposing infinities,
41 // so we must catch that here in order to remain within
42 // the cty Function abstraction.
43 defer func() {
44 if r := recover(); r != nil {
45 if _, ok := r.(big.ErrNaN); ok {
46 ret = cty.NilVal
47 err = fmt.Errorf("can't compute sum of opposing infinities")
48 } else {
49 // not a panic we recognize
50 panic(r)
51 }
52 }
53 }()
54 return args[0].Add(args[1]), nil
55 },
56})
57
58var SubtractFunc = function.New(&function.Spec{
59 Params: []function.Parameter{
60 {
61 Name: "a",
62 Type: cty.Number,
63 AllowDynamicType: true,
64 },
65 {
66 Name: "b",
67 Type: cty.Number,
68 AllowDynamicType: true,
69 },
70 },
71 Type: function.StaticReturnType(cty.Number),
72 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
73 // big.Float.Sub can panic if the input values are infinities,
74 // so we must catch that here in order to remain within
75 // the cty Function abstraction.
76 defer func() {
77 if r := recover(); r != nil {
78 if _, ok := r.(big.ErrNaN); ok {
79 ret = cty.NilVal
80 err = fmt.Errorf("can't subtract infinity from itself")
81 } else {
82 // not a panic we recognize
83 panic(r)
84 }
85 }
86 }()
87 return args[0].Subtract(args[1]), nil
88 },
89})
90
91var MultiplyFunc = function.New(&function.Spec{
92 Params: []function.Parameter{
93 {
94 Name: "a",
95 Type: cty.Number,
96 AllowDynamicType: true,
97 },
98 {
99 Name: "b",
100 Type: cty.Number,
101 AllowDynamicType: true,
102 },
103 },
104 Type: function.StaticReturnType(cty.Number),
105 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
106 // big.Float.Mul can panic if the input values are both zero or both
107 // infinity, so we must catch that here in order to remain within
108 // the cty Function abstraction.
109 defer func() {
110 if r := recover(); r != nil {
111 if _, ok := r.(big.ErrNaN); ok {
112 ret = cty.NilVal
113 err = fmt.Errorf("can't multiply zero by infinity")
114 } else {
115 // not a panic we recognize
116 panic(r)
117 }
118 }
119 }()
120
121 return args[0].Multiply(args[1]), nil
122 },
123})
124
125var DivideFunc = function.New(&function.Spec{
126 Params: []function.Parameter{
127 {
128 Name: "a",
129 Type: cty.Number,
130 AllowDynamicType: true,
131 },
132 {
133 Name: "b",
134 Type: cty.Number,
135 AllowDynamicType: true,
136 },
137 },
138 Type: function.StaticReturnType(cty.Number),
139 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
140 // big.Float.Quo can panic if the input values are both zero or both
141 // infinity, so we must catch that here in order to remain within
142 // the cty Function abstraction.
143 defer func() {
144 if r := recover(); r != nil {
145 if _, ok := r.(big.ErrNaN); ok {
146 ret = cty.NilVal
147 err = fmt.Errorf("can't divide zero by zero or infinity by infinity")
148 } else {
149 // not a panic we recognize
150 panic(r)
151 }
152 }
153 }()
154
155 return args[0].Divide(args[1]), nil
156 },
157})
158
159var ModuloFunc = function.New(&function.Spec{
160 Params: []function.Parameter{
161 {
162 Name: "a",
163 Type: cty.Number,
164 AllowDynamicType: true,
165 },
166 {
167 Name: "b",
168 Type: cty.Number,
169 AllowDynamicType: true,
170 },
171 },
172 Type: function.StaticReturnType(cty.Number),
173 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
174 // big.Float.Mul can panic if the input values are both zero or both
175 // infinity, so we must catch that here in order to remain within
176 // the cty Function abstraction.
177 defer func() {
178 if r := recover(); r != nil {
179 if _, ok := r.(big.ErrNaN); ok {
180 ret = cty.NilVal
181 err = fmt.Errorf("can't use modulo with zero and infinity")
182 } else {
183 // not a panic we recognize
184 panic(r)
185 }
186 }
187 }()
188
189 return args[0].Modulo(args[1]), nil
190 },
191})
192
193var GreaterThanFunc = function.New(&function.Spec{
194 Params: []function.Parameter{
195 {
196 Name: "a",
197 Type: cty.Number,
198 AllowDynamicType: true,
199 },
200 {
201 Name: "b",
202 Type: cty.Number,
203 AllowDynamicType: true,
204 },
205 },
206 Type: function.StaticReturnType(cty.Bool),
207 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
208 return args[0].GreaterThan(args[1]), nil
209 },
210})
211
212var GreaterThanOrEqualToFunc = function.New(&function.Spec{
213 Params: []function.Parameter{
214 {
215 Name: "a",
216 Type: cty.Number,
217 AllowDynamicType: true,
218 },
219 {
220 Name: "b",
221 Type: cty.Number,
222 AllowDynamicType: true,
223 },
224 },
225 Type: function.StaticReturnType(cty.Bool),
226 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
227 return args[0].GreaterThanOrEqualTo(args[1]), nil
228 },
229})
230
231var LessThanFunc = function.New(&function.Spec{
232 Params: []function.Parameter{
233 {
234 Name: "a",
235 Type: cty.Number,
236 AllowDynamicType: true,
237 },
238 {
239 Name: "b",
240 Type: cty.Number,
241 AllowDynamicType: true,
242 },
243 },
244 Type: function.StaticReturnType(cty.Bool),
245 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
246 return args[0].LessThan(args[1]), nil
247 },
248})
249
250var LessThanOrEqualToFunc = function.New(&function.Spec{
251 Params: []function.Parameter{
252 {
253 Name: "a",
254 Type: cty.Number,
255 AllowDynamicType: true,
256 },
257 {
258 Name: "b",
259 Type: cty.Number,
260 AllowDynamicType: true,
261 },
262 },
263 Type: function.StaticReturnType(cty.Bool),
264 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
265 return args[0].LessThanOrEqualTo(args[1]), nil
266 },
267})
268
269var NegateFunc = function.New(&function.Spec{
270 Params: []function.Parameter{
271 {
272 Name: "num",
273 Type: cty.Number,
274 AllowDynamicType: true,
275 },
276 },
277 Type: function.StaticReturnType(cty.Number),
278 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
279 return args[0].Negate(), nil
280 },
281})
282
283var MinFunc = function.New(&function.Spec{
284 Params: []function.Parameter{},
285 VarParam: &function.Parameter{
286 Name: "numbers",
287 Type: cty.Number,
288 AllowDynamicType: true,
289 },
290 Type: function.StaticReturnType(cty.Number),
291 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
292 if len(args) == 0 {
293 return cty.NilVal, fmt.Errorf("must pass at least one number")
294 }
295
296 min := cty.PositiveInfinity
297 for _, num := range args {
298 if num.LessThan(min).True() {
299 min = num
300 }
301 }
302
303 return min, nil
304 },
305})
306
307var MaxFunc = function.New(&function.Spec{
308 Params: []function.Parameter{},
309 VarParam: &function.Parameter{
310 Name: "numbers",
311 Type: cty.Number,
312 AllowDynamicType: true,
313 },
314 Type: function.StaticReturnType(cty.Number),
315 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
316 if len(args) == 0 {
317 return cty.NilVal, fmt.Errorf("must pass at least one number")
318 }
319
320 max := cty.NegativeInfinity
321 for _, num := range args {
322 if num.GreaterThan(max).True() {
323 max = num
324 }
325 }
326
327 return max, nil
328 },
329})
330
331var IntFunc = function.New(&function.Spec{
332 Params: []function.Parameter{
333 {
334 Name: "num",
335 Type: cty.Number,
336 AllowDynamicType: true,
337 },
338 },
339 Type: function.StaticReturnType(cty.Number),
340 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
341 bf := args[0].AsBigFloat()
342 if bf.IsInt() {
343 return args[0], nil
344 }
345 bi, _ := bf.Int(nil)
346 bf = (&big.Float{}).SetInt(bi)
347 return cty.NumberVal(bf), nil
348 },
349})
350
351// Absolute returns the magnitude of the given number, without its sign.
352// That is, it turns negative values into positive values.
353func Absolute(num cty.Value) (cty.Value, error) {
354 return AbsoluteFunc.Call([]cty.Value{num})
355}
356
357// Add returns the sum of the two given numbers.
358func Add(a cty.Value, b cty.Value) (cty.Value, error) {
359 return AddFunc.Call([]cty.Value{a, b})
360}
361
362// Subtract returns the difference between the two given numbers.
363func Subtract(a cty.Value, b cty.Value) (cty.Value, error) {
364 return SubtractFunc.Call([]cty.Value{a, b})
365}
366
367// Multiply returns the product of the two given numbers.
368func Multiply(a cty.Value, b cty.Value) (cty.Value, error) {
369 return MultiplyFunc.Call([]cty.Value{a, b})
370}
371
372// Divide returns a divided by b, where both a and b are numbers.
373func Divide(a cty.Value, b cty.Value) (cty.Value, error) {
374 return DivideFunc.Call([]cty.Value{a, b})
375}
376
377// Negate returns the given number multipled by -1.
378func Negate(num cty.Value) (cty.Value, error) {
379 return NegateFunc.Call([]cty.Value{num})
380}
381
382// LessThan returns true if a is less than b.
383func LessThan(a cty.Value, b cty.Value) (cty.Value, error) {
384 return LessThanFunc.Call([]cty.Value{a, b})
385}
386
387// LessThanOrEqualTo returns true if a is less than b.
388func LessThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) {
389 return LessThanOrEqualToFunc.Call([]cty.Value{a, b})
390}
391
392// GreaterThan returns true if a is less than b.
393func GreaterThan(a cty.Value, b cty.Value) (cty.Value, error) {
394 return GreaterThanFunc.Call([]cty.Value{a, b})
395}
396
397// GreaterThanOrEqualTo returns true if a is less than b.
398func GreaterThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) {
399 return GreaterThanOrEqualToFunc.Call([]cty.Value{a, b})
400}
401
402// Modulo returns the remainder of a divided by b under integer division,
403// where both a and b are numbers.
404func Modulo(a cty.Value, b cty.Value) (cty.Value, error) {
405 return ModuloFunc.Call([]cty.Value{a, b})
406}
407
408// Min returns the minimum number from the given numbers.
409func Min(numbers ...cty.Value) (cty.Value, error) {
410 return MinFunc.Call(numbers)
411}
412
413// Max returns the maximum number from the given numbers.
414func Max(numbers ...cty.Value) (cty.Value, error) {
415 return MaxFunc.Call(numbers)
416}
417
418// Int removes the fractional component of the given number returning an
419// integer representing the whole number component, rounding towards zero.
420// For example, -1.5 becomes -1.
421//
422// If an infinity is passed to Int, an error is returned.
423func Int(num cty.Value) (cty.Value, error) {
424 if num == cty.PositiveInfinity || num == cty.NegativeInfinity {
425 return cty.NilVal, fmt.Errorf("can't truncate infinity to an integer")
426 }
427 return IntFunc.Call([]cty.Value{num})
428}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go
new file mode 100644
index 0000000..e2c77c5
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go
@@ -0,0 +1,130 @@
1package stdlib
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/convert"
8 "github.com/zclconf/go-cty/cty/function"
9)
10
11var ConcatFunc = function.New(&function.Spec{
12 Params: []function.Parameter{},
13 VarParam: &function.Parameter{
14 Name: "seqs",
15 Type: cty.DynamicPseudoType,
16 },
17 Type: func(args []cty.Value) (ret cty.Type, err error) {
18 if len(args) == 0 {
19 return cty.NilType, fmt.Errorf("at least one argument is required")
20 }
21
22 if args[0].Type().IsListType() {
23 // Possibly we're going to return a list, if all of our other
24 // args are also lists and we can find a common element type.
25 tys := make([]cty.Type, len(args))
26 for i, val := range args {
27 ty := val.Type()
28 if !ty.IsListType() {
29 tys = nil
30 break
31 }
32 tys[i] = ty
33 }
34
35 if tys != nil {
36 commonType, _ := convert.UnifyUnsafe(tys)
37 if commonType != cty.NilType {
38 return commonType, nil
39 }
40 }
41 }
42
43 etys := make([]cty.Type, 0, len(args))
44 for i, val := range args {
45 ety := val.Type()
46 switch {
47 case ety.IsTupleType():
48 etys = append(etys, ety.TupleElementTypes()...)
49 case ety.IsListType():
50 if !val.IsKnown() {
51 // We need to know the list to count its elements to
52 // build our tuple type, so any concat of an unknown
53 // list can't be typed yet.
54 return cty.DynamicPseudoType, nil
55 }
56
57 l := val.LengthInt()
58 subEty := ety.ElementType()
59 for j := 0; j < l; j++ {
60 etys = append(etys, subEty)
61 }
62 default:
63 return cty.NilType, function.NewArgErrorf(
64 i, "all arguments must be lists or tuples; got %s",
65 ety.FriendlyName(),
66 )
67 }
68 }
69 return cty.Tuple(etys), nil
70 },
71 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
72 switch {
73 case retType.IsListType():
74 // If retType is a list type then we know that all of the
75 // given values will be lists and that they will either be of
76 // retType or of something we can convert to retType.
77 vals := make([]cty.Value, 0, len(args))
78 for i, list := range args {
79 list, err = convert.Convert(list, retType)
80 if err != nil {
81 // Conversion might fail because we used UnifyUnsafe
82 // to choose our return type.
83 return cty.NilVal, function.NewArgError(i, err)
84 }
85
86 it := list.ElementIterator()
87 for it.Next() {
88 _, v := it.Element()
89 vals = append(vals, v)
90 }
91 }
92 if len(vals) == 0 {
93 return cty.ListValEmpty(retType.ElementType()), nil
94 }
95
96 return cty.ListVal(vals), nil
97 case retType.IsTupleType():
98 // If retType is a tuple type then we could have a mixture of
99 // lists and tuples but we know they all have known values
100 // (because our params don't AllowUnknown) and we know that
101 // concatenating them all together will produce a tuple of
102 // retType because of the work we did in the Type function above.
103 vals := make([]cty.Value, 0, len(args))
104
105 for _, seq := range args {
106 // Both lists and tuples support ElementIterator, so this is easy.
107 it := seq.ElementIterator()
108 for it.Next() {
109 _, v := it.Element()
110 vals = append(vals, v)
111 }
112 }
113
114 return cty.TupleVal(vals), nil
115 default:
116 // should never happen if Type is working correctly above
117 panic("unsupported return type")
118 }
119 },
120})
121
122// Concat takes one or more sequences (lists or tuples) and returns the single
123// sequence that results from concatenating them together in order.
124//
125// If all of the given sequences are lists of the same element type then the
126// result is a list of that type. Otherwise, the result is a of a tuple type
127// constructed from the given sequence types.
128func Concat(seqs ...cty.Value) (cty.Value, error) {
129 return ConcatFunc.Call(seqs)
130}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go
new file mode 100644
index 0000000..100078f
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go
@@ -0,0 +1,195 @@
1package stdlib
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty/convert"
7
8 "github.com/zclconf/go-cty/cty"
9 "github.com/zclconf/go-cty/cty/function"
10)
11
12var SetHasElementFunc = function.New(&function.Spec{
13 Params: []function.Parameter{
14 {
15 Name: "set",
16 Type: cty.Set(cty.DynamicPseudoType),
17 AllowDynamicType: true,
18 },
19 {
20 Name: "elem",
21 Type: cty.DynamicPseudoType,
22 AllowDynamicType: true,
23 },
24 },
25 Type: function.StaticReturnType(cty.Bool),
26 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
27 return args[0].HasElement(args[1]), nil
28 },
29})
30
31var SetUnionFunc = function.New(&function.Spec{
32 Params: []function.Parameter{
33 {
34 Name: "first_set",
35 Type: cty.Set(cty.DynamicPseudoType),
36 AllowDynamicType: true,
37 },
38 },
39 VarParam: &function.Parameter{
40 Name: "other_sets",
41 Type: cty.Set(cty.DynamicPseudoType),
42 AllowDynamicType: true,
43 },
44 Type: setOperationReturnType,
45 Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet {
46 return s1.Union(s2)
47 }),
48})
49
50var SetIntersectionFunc = function.New(&function.Spec{
51 Params: []function.Parameter{
52 {
53 Name: "first_set",
54 Type: cty.Set(cty.DynamicPseudoType),
55 AllowDynamicType: true,
56 },
57 },
58 VarParam: &function.Parameter{
59 Name: "other_sets",
60 Type: cty.Set(cty.DynamicPseudoType),
61 AllowDynamicType: true,
62 },
63 Type: setOperationReturnType,
64 Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet {
65 return s1.Intersection(s2)
66 }),
67})
68
69var SetSubtractFunc = function.New(&function.Spec{
70 Params: []function.Parameter{
71 {
72 Name: "a",
73 Type: cty.Set(cty.DynamicPseudoType),
74 AllowDynamicType: true,
75 },
76 {
77 Name: "b",
78 Type: cty.Set(cty.DynamicPseudoType),
79 AllowDynamicType: true,
80 },
81 },
82 Type: setOperationReturnType,
83 Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet {
84 return s1.Subtract(s2)
85 }),
86})
87
88var SetSymmetricDifferenceFunc = function.New(&function.Spec{
89 Params: []function.Parameter{
90 {
91 Name: "first_set",
92 Type: cty.Set(cty.DynamicPseudoType),
93 AllowDynamicType: true,
94 },
95 },
96 VarParam: &function.Parameter{
97 Name: "other_sets",
98 Type: cty.Set(cty.DynamicPseudoType),
99 AllowDynamicType: true,
100 },
101 Type: setOperationReturnType,
102 Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet {
103 return s1.Subtract(s2)
104 }),
105})
106
107// SetHasElement determines whether the given set contains the given value as an
108// element.
109func SetHasElement(set cty.Value, elem cty.Value) (cty.Value, error) {
110 return SetHasElementFunc.Call([]cty.Value{set, elem})
111}
112
113// SetUnion returns a new set containing all of the elements from the given
114// sets, which must have element types that can all be converted to some
115// common type using the standard type unification rules. If conversion
116// is not possible, an error is returned.
117//
118// The union operation is performed after type conversion, which may result
119// in some previously-distinct values being conflated.
120//
121// At least one set must be provided.
122func SetUnion(sets ...cty.Value) (cty.Value, error) {
123 return SetUnionFunc.Call(sets)
124}
125
126// Intersection returns a new set containing the elements that exist
127// in all of the given sets, which must have element types that can all be
128// converted to some common type using the standard type unification rules.
129// If conversion is not possible, an error is returned.
130//
131// The intersection operation is performed after type conversion, which may
132// result in some previously-distinct values being conflated.
133//
134// At least one set must be provided.
135func SetIntersection(sets ...cty.Value) (cty.Value, error) {
136 return SetIntersectionFunc.Call(sets)
137}
138
139// SetSubtract returns a new set containing the elements from the
140// first set that are not present in the second set. The sets must have
141// element types that can both be converted to some common type using the
142// standard type unification rules. If conversion is not possible, an error
143// is returned.
144//
145// The subtract operation is performed after type conversion, which may
146// result in some previously-distinct values being conflated.
147func SetSubtract(a, b cty.Value) (cty.Value, error) {
148 return SetSubtractFunc.Call([]cty.Value{a, b})
149}
150
151// SetSymmetricDifference returns a new set containing elements that appear
152// in any of the given sets but not multiple. The sets must have
153// element types that can all be converted to some common type using the
154// standard type unification rules. If conversion is not possible, an error
155// is returned.
156//
157// The difference operation is performed after type conversion, which may
158// result in some previously-distinct values being conflated.
159func SetSymmetricDifference(sets ...cty.Value) (cty.Value, error) {
160 return SetSymmetricDifferenceFunc.Call(sets)
161}
162
163func setOperationReturnType(args []cty.Value) (ret cty.Type, err error) {
164 var etys []cty.Type
165 for _, arg := range args {
166 etys = append(etys, arg.Type().ElementType())
167 }
168 newEty, _ := convert.UnifyUnsafe(etys)
169 if newEty == cty.NilType {
170 return cty.NilType, fmt.Errorf("given sets must all have compatible element types")
171 }
172 return cty.Set(newEty), nil
173}
174
175func setOperationImpl(f func(s1, s2 cty.ValueSet) cty.ValueSet) function.ImplFunc {
176 return func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
177 first := args[0]
178 first, err = convert.Convert(first, retType)
179 if err != nil {
180 return cty.NilVal, function.NewArgError(0, err)
181 }
182
183 set := first.AsValueSet()
184 for i, arg := range args[1:] {
185 arg, err := convert.Convert(arg, retType)
186 if err != nil {
187 return cty.NilVal, function.NewArgError(i+1, err)
188 }
189
190 argSet := arg.AsValueSet()
191 set = f(set, argSet)
192 }
193 return cty.SetValFromValueSet(set), nil
194 }
195}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go
new file mode 100644
index 0000000..d7c89fa
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go
@@ -0,0 +1,234 @@
1package stdlib
2
3import (
4 "strings"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/function"
8 "github.com/zclconf/go-cty/cty/gocty"
9 "github.com/apparentlymart/go-textseg/textseg"
10)
11
12var UpperFunc = function.New(&function.Spec{
13 Params: []function.Parameter{
14 {
15 Name: "str",
16 Type: cty.String,
17 AllowDynamicType: true,
18 },
19 },
20 Type: function.StaticReturnType(cty.String),
21 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
22 in := args[0].AsString()
23 out := strings.ToUpper(in)
24 return cty.StringVal(out), nil
25 },
26})
27
28var LowerFunc = function.New(&function.Spec{
29 Params: []function.Parameter{
30 {
31 Name: "str",
32 Type: cty.String,
33 AllowDynamicType: true,
34 },
35 },
36 Type: function.StaticReturnType(cty.String),
37 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
38 in := args[0].AsString()
39 out := strings.ToLower(in)
40 return cty.StringVal(out), nil
41 },
42})
43
44var ReverseFunc = function.New(&function.Spec{
45 Params: []function.Parameter{
46 {
47 Name: "str",
48 Type: cty.String,
49 AllowDynamicType: true,
50 },
51 },
52 Type: function.StaticReturnType(cty.String),
53 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
54 in := []byte(args[0].AsString())
55 out := make([]byte, len(in))
56 pos := len(out)
57
58 inB := []byte(in)
59 for i := 0; i < len(in); {
60 d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true)
61 cluster := in[i : i+d]
62 pos -= len(cluster)
63 copy(out[pos:], cluster)
64 i += d
65 }
66
67 return cty.StringVal(string(out)), nil
68 },
69})
70
71var StrlenFunc = function.New(&function.Spec{
72 Params: []function.Parameter{
73 {
74 Name: "str",
75 Type: cty.String,
76 AllowDynamicType: true,
77 },
78 },
79 Type: function.StaticReturnType(cty.Number),
80 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
81 in := args[0].AsString()
82 l := 0
83
84 inB := []byte(in)
85 for i := 0; i < len(in); {
86 d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true)
87 l++
88 i += d
89 }
90
91 return cty.NumberIntVal(int64(l)), nil
92 },
93})
94
95var SubstrFunc = function.New(&function.Spec{
96 Params: []function.Parameter{
97 {
98 Name: "str",
99 Type: cty.String,
100 AllowDynamicType: true,
101 },
102 {
103 Name: "offset",
104 Type: cty.Number,
105 AllowDynamicType: true,
106 },
107 {
108 Name: "length",
109 Type: cty.Number,
110 AllowDynamicType: true,
111 },
112 },
113 Type: function.StaticReturnType(cty.String),
114 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
115 in := []byte(args[0].AsString())
116 var offset, length int
117
118 var err error
119 err = gocty.FromCtyValue(args[1], &offset)
120 if err != nil {
121 return cty.NilVal, err
122 }
123 err = gocty.FromCtyValue(args[2], &length)
124 if err != nil {
125 return cty.NilVal, err
126 }
127
128 if offset < 0 {
129 totalLenNum, err := Strlen(args[0])
130 if err != nil {
131 // should never happen
132 panic("Stdlen returned an error")
133 }
134
135 var totalLen int
136 err = gocty.FromCtyValue(totalLenNum, &totalLen)
137 if err != nil {
138 // should never happen
139 panic("Stdlen returned a non-int number")
140 }
141
142 offset += totalLen
143 }
144
145 sub := in
146 pos := 0
147 var i int
148
149 // First we'll seek forward to our offset
150 if offset > 0 {
151 for i = 0; i < len(sub); {
152 d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true)
153 i += d
154 pos++
155 if pos == offset {
156 break
157 }
158 if i >= len(in) {
159 return cty.StringVal(""), nil
160 }
161 }
162
163 sub = sub[i:]
164 }
165
166 if length < 0 {
167 // Taking the remainder of the string is a fast path since
168 // we can just return the rest of the buffer verbatim.
169 return cty.StringVal(string(sub)), nil
170 }
171
172 // Otherwise we need to start seeking forward again until we
173 // reach the length we want.
174 pos = 0
175 for i = 0; i < len(sub); {
176 d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true)
177 i += d
178 pos++
179 if pos == length {
180 break
181 }
182 }
183
184 sub = sub[:i]
185
186 return cty.StringVal(string(sub)), nil
187 },
188})
189
190// Upper is a Function that converts a given string to uppercase.
191func Upper(str cty.Value) (cty.Value, error) {
192 return UpperFunc.Call([]cty.Value{str})
193}
194
195// Lower is a Function that converts a given string to lowercase.
196func Lower(str cty.Value) (cty.Value, error) {
197 return LowerFunc.Call([]cty.Value{str})
198}
199
200// Reverse is a Function that reverses the order of the characters in the
201// given string.
202//
203// As usual, "character" for the sake of this function is a grapheme cluster,
204// so combining diacritics (for example) will be considered together as a
205// single character.
206func Reverse(str cty.Value) (cty.Value, error) {
207 return ReverseFunc.Call([]cty.Value{str})
208}
209
210// Strlen is a Function that returns the length of the given string in
211// characters.
212//
213// As usual, "character" for the sake of this function is a grapheme cluster,
214// so combining diacritics (for example) will be considered together as a
215// single character.
216func Strlen(str cty.Value) (cty.Value, error) {
217 return StrlenFunc.Call([]cty.Value{str})
218}
219
220// Substr is a Function that extracts a sequence of characters from another
221// string and creates a new string.
222//
223// As usual, "character" for the sake of this function is a grapheme cluster,
224// so combining diacritics (for example) will be considered together as a
225// single character.
226//
227// The "offset" index may be negative, in which case it is relative to the
228// end of the given string.
229//
230// The "length" may be -1, in which case the remainder of the string after
231// the given offset will be returned.
232func Substr(str cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) {
233 return SubstrFunc.Call([]cty.Value{str, offset, length})
234}
diff --git a/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go b/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go
new file mode 100644
index 0000000..3495550
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go
@@ -0,0 +1,31 @@
1package function
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// Unpredictable wraps a given function such that it retains the same arguments
8// and type checking behavior but will return an unknown value when called.
9//
10// It is recommended that most functions be "pure", which is to say that they
11// will always produce the same value given particular input. However,
12// sometimes it is necessary to offer functions whose behavior depends on
13// some external state, such as reading a file or determining the current time.
14// In such cases, an unpredictable wrapper might be used to stand in for
15// the function during some sort of prior "checking" phase in order to delay
16// the actual effect until later.
17//
18// While Unpredictable can support a function that isn't pure in its
19// implementation, it still expects a function to be pure in its type checking
20// behavior, except for the special case of returning cty.DynamicPseudoType
21// if it is not yet able to predict its return value based on current argument
22// information.
23func Unpredictable(f Function) Function {
24 newSpec := *f.spec // shallow copy
25 newSpec.Impl = unpredictableImpl
26 return New(&newSpec)
27}
28
29func unpredictableImpl(args []cty.Value, retType cty.Type) (cty.Value, error) {
30 return cty.UnknownVal(retType), nil
31}
diff --git a/vendor/github.com/zclconf/go-cty/cty/gob.go b/vendor/github.com/zclconf/go-cty/cty/gob.go
new file mode 100644
index 0000000..3d73199
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/gob.go
@@ -0,0 +1,125 @@
1package cty
2
3import (
4 "bytes"
5 "encoding/gob"
6 "fmt"
7 "math/big"
8)
9
10// GobEncode is an implementation of the gob.GobEncoder interface, which
11// allows Values to be included in structures encoded with encoding/gob.
12//
13// Currently it is not possible to represent values of capsule types in gob,
14// because the types themselves cannot be represented.
15func (val Value) GobEncode() ([]byte, error) {
16 buf := &bytes.Buffer{}
17 enc := gob.NewEncoder(buf)
18
19 gv := gobValue{
20 Version: 0,
21 Ty: val.ty,
22 V: val.v,
23 }
24
25 err := enc.Encode(gv)
26 if err != nil {
27 return nil, fmt.Errorf("error encoding cty.Value: %s", err)
28 }
29
30 return buf.Bytes(), nil
31}
32
33// GobDecode is an implementation of the gob.GobDecoder interface, which
34// inverts the operation performed by GobEncode. See the documentation of
35// GobEncode for considerations when using cty.Value instances with gob.
36func (val *Value) GobDecode(buf []byte) error {
37 r := bytes.NewReader(buf)
38 dec := gob.NewDecoder(r)
39
40 var gv gobValue
41 err := dec.Decode(&gv)
42 if err != nil {
43 return fmt.Errorf("error decoding cty.Value: %s", err)
44 }
45 if gv.Version != 0 {
46 return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version)
47 }
48
49 // big.Float seems to, for some reason, lose its "pointerness" when we
50 // round-trip it, so we'll fix that here.
51 if bf, ok := gv.V.(big.Float); ok {
52 gv.V = &bf
53 }
54
55 val.ty = gv.Ty
56 val.v = gv.V
57
58 return nil
59}
60
61// GobEncode is an implementation of the gob.GobEncoder interface, which
62// allows Types to be included in structures encoded with encoding/gob.
63//
64// Currently it is not possible to represent capsule types in gob.
65func (t Type) GobEncode() ([]byte, error) {
66 buf := &bytes.Buffer{}
67 enc := gob.NewEncoder(buf)
68
69 gt := gobType{
70 Version: 0,
71 Impl: t.typeImpl,
72 }
73
74 err := enc.Encode(gt)
75 if err != nil {
76 return nil, fmt.Errorf("error encoding cty.Type: %s", err)
77 }
78
79 return buf.Bytes(), nil
80}
81
82// GobDecode is an implementatino of the gob.GobDecoder interface, which
83// reverses the encoding performed by GobEncode to allow types to be recovered
84// from gob buffers.
85func (t *Type) GobDecode(buf []byte) error {
86 r := bytes.NewReader(buf)
87 dec := gob.NewDecoder(r)
88
89 var gt gobType
90 err := dec.Decode(&gt)
91 if err != nil {
92 return fmt.Errorf("error decoding cty.Type: %s", err)
93 }
94 if gt.Version != 0 {
95 return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version)
96 }
97
98 t.typeImpl = gt.Impl
99
100 return nil
101}
102
103// Capsule types cannot currently be gob-encoded, because they rely on pointer
104// equality and we have no way to recover the original pointer on decode.
105func (t *capsuleType) GobEncode() ([]byte, error) {
106 return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName())
107}
108
109func (t *capsuleType) GobDecode() ([]byte, error) {
110 return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName())
111}
112
113type gobValue struct {
114 Version int
115 Ty Type
116 V interface{}
117}
118
119type gobType struct {
120 Version int
121 Impl typeImpl
122}
123
124type gobCapsuleTypeImpl struct {
125}
diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go b/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go
new file mode 100644
index 0000000..a5177d2
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go
@@ -0,0 +1,7 @@
1// Package gocty deals with converting between cty Values and native go
2// values.
3//
4// It operates under a similar principle to the encoding/json and
5// encoding/xml packages in the standard library, using reflection to
6// populate native Go data structures from cty values and vice-versa.
7package gocty
diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go
new file mode 100644
index 0000000..94ffd2f
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go
@@ -0,0 +1,43 @@
1package gocty
2
3import (
4 "math/big"
5 "reflect"
6
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/set"
9)
10
11var valueType = reflect.TypeOf(cty.Value{})
12var typeType = reflect.TypeOf(cty.Type{})
13
14var setType = reflect.TypeOf(set.Set{})
15
16var bigFloatType = reflect.TypeOf(big.Float{})
17var bigIntType = reflect.TypeOf(big.Int{})
18
19var emptyInterfaceType = reflect.TypeOf(interface{}(nil))
20
21var stringType = reflect.TypeOf("")
22
23// structTagIndices interrogates the fields of the given type (which must
24// be a struct type, or we'll panic) and returns a map from the cty
25// attribute names declared via struct tags to the indices of the
26// fields holding those tags.
27//
28// This function will panic if two fields within the struct are tagged with
29// the same cty attribute name.
30func structTagIndices(st reflect.Type) map[string]int {
31 ct := st.NumField()
32 ret := make(map[string]int, ct)
33
34 for i := 0; i < ct; i++ {
35 field := st.Field(i)
36 attrName := field.Tag.Get("cty")
37 if attrName != "" {
38 ret[attrName] = i
39 }
40 }
41
42 return ret
43}
diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/in.go b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go
new file mode 100644
index 0000000..642501b
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go
@@ -0,0 +1,528 @@
1package gocty
2
3import (
4 "math/big"
5 "reflect"
6
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/set"
9)
10
11// ToCtyValue produces a cty.Value from a Go value. The result will conform
12// to the given type, or an error will be returned if this is not possible.
13//
14// The target type serves as a hint to resolve ambiguities in the mapping.
15// For example, the Go type set.Set tells us that the value is a set but
16// does not describe the set's element type. This also allows for convenient
17// conversions, such as populating a set from a slice rather than having to
18// first explicitly instantiate a set.Set.
19//
20// The audience of this function is assumed to be the developers of Go code
21// that is integrating with cty, and thus the error messages it returns are
22// presented from Go's perspective. These messages are thus not appropriate
23// for display to end-users. An error returned from ToCtyValue represents a
24// bug in the calling program, not user error.
25func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) {
26 // 'path' starts off as empty but will grow for each level of recursive
27 // call we make, so by the time toCtyValue returns it is likely to have
28 // unused capacity on the end of it, depending on how deeply-recursive
29 // the given Type is.
30 path := make(cty.Path, 0)
31 return toCtyValue(reflect.ValueOf(val), ty, path)
32}
33
34func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) {
35
36 switch ty {
37 case cty.Bool:
38 return toCtyBool(val, path)
39 case cty.Number:
40 return toCtyNumber(val, path)
41 case cty.String:
42 return toCtyString(val, path)
43 case cty.DynamicPseudoType:
44 return toCtyDynamic(val, path)
45 }
46
47 switch {
48 case ty.IsListType():
49 return toCtyList(val, ty.ElementType(), path)
50 case ty.IsMapType():
51 return toCtyMap(val, ty.ElementType(), path)
52 case ty.IsSetType():
53 return toCtySet(val, ty.ElementType(), path)
54 case ty.IsObjectType():
55 return toCtyObject(val, ty.AttributeTypes(), path)
56 case ty.IsTupleType():
57 return toCtyTuple(val, ty.TupleElementTypes(), path)
58 case ty.IsCapsuleType():
59 return toCtyCapsule(val, ty, path)
60 }
61
62 // We should never fall out here
63 return cty.NilVal, path.NewErrorf("unsupported target type %#v", ty)
64}
65
66func toCtyBool(val reflect.Value, path cty.Path) (cty.Value, error) {
67 if val = toCtyUnwrapPointer(val); !val.IsValid() {
68 return cty.NullVal(cty.Bool), nil
69 }
70
71 switch val.Kind() {
72
73 case reflect.Bool:
74 return cty.BoolVal(val.Bool()), nil
75
76 default:
77 return cty.NilVal, path.NewErrorf("can't convert Go %s to bool", val.Kind())
78
79 }
80
81}
82
83func toCtyNumber(val reflect.Value, path cty.Path) (cty.Value, error) {
84 if val = toCtyUnwrapPointer(val); !val.IsValid() {
85 return cty.NullVal(cty.Number), nil
86 }
87
88 switch val.Kind() {
89
90 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
91 return cty.NumberIntVal(val.Int()), nil
92
93 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
94 return cty.NumberUIntVal(val.Uint()), nil
95
96 case reflect.Float32, reflect.Float64:
97 return cty.NumberFloatVal(val.Float()), nil
98
99 case reflect.Struct:
100 if val.Type().AssignableTo(bigIntType) {
101 bigInt := val.Interface().(big.Int)
102 bigFloat := (&big.Float{}).SetInt(&bigInt)
103 val = reflect.ValueOf(*bigFloat)
104 }
105
106 if val.Type().AssignableTo(bigFloatType) {
107 bigFloat := val.Interface().(big.Float)
108 return cty.NumberVal(&bigFloat), nil
109 }
110
111 fallthrough
112 default:
113 return cty.NilVal, path.NewErrorf("can't convert Go %s to number", val.Kind())
114
115 }
116
117}
118
119func toCtyString(val reflect.Value, path cty.Path) (cty.Value, error) {
120 if val = toCtyUnwrapPointer(val); !val.IsValid() {
121 return cty.NullVal(cty.String), nil
122 }
123
124 switch val.Kind() {
125
126 case reflect.String:
127 return cty.StringVal(val.String()), nil
128
129 default:
130 return cty.NilVal, path.NewErrorf("can't convert Go %s to string", val.Kind())
131
132 }
133
134}
135
136func toCtyList(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) {
137 if val = toCtyUnwrapPointer(val); !val.IsValid() {
138 return cty.NullVal(cty.List(ety)), nil
139 }
140
141 switch val.Kind() {
142
143 case reflect.Slice:
144 if val.IsNil() {
145 return cty.NullVal(cty.List(ety)), nil
146 }
147 fallthrough
148 case reflect.Array:
149 if val.Len() == 0 {
150 return cty.ListValEmpty(ety), nil
151 }
152
153 // While we work on our elements we'll temporarily grow
154 // path to give us a place to put our index step.
155 path = append(path, cty.PathStep(nil))
156
157 vals := make([]cty.Value, val.Len())
158 for i := range vals {
159 var err error
160 path[len(path)-1] = cty.IndexStep{
161 Key: cty.NumberIntVal(int64(i)),
162 }
163 vals[i], err = toCtyValue(val.Index(i), ety, path)
164 if err != nil {
165 return cty.NilVal, err
166 }
167 }
168
169 // Discard our extra path segment, retaining it as extra capacity
170 // for future appending to the path.
171 path = path[:len(path)-1]
172
173 return cty.ListVal(vals), nil
174
175 default:
176 return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.List(ety))
177
178 }
179}
180
181func toCtyMap(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) {
182 if val = toCtyUnwrapPointer(val); !val.IsValid() {
183 return cty.NullVal(cty.Map(ety)), nil
184 }
185
186 switch val.Kind() {
187
188 case reflect.Map:
189 if val.IsNil() {
190 return cty.NullVal(cty.Map(ety)), nil
191 }
192
193 if val.Len() == 0 {
194 return cty.MapValEmpty(ety), nil
195 }
196
197 keyType := val.Type().Key()
198 if keyType.Kind() != reflect.String {
199 return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType)
200 }
201
202 // While we work on our elements we'll temporarily grow
203 // path to give us a place to put our index step.
204 path = append(path, cty.PathStep(nil))
205
206 vals := make(map[string]cty.Value, val.Len())
207 for _, kv := range val.MapKeys() {
208 k := kv.String()
209 var err error
210 path[len(path)-1] = cty.IndexStep{
211 Key: cty.StringVal(k),
212 }
213 vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), ety, path)
214 if err != nil {
215 return cty.NilVal, err
216 }
217 }
218
219 // Discard our extra path segment, retaining it as extra capacity
220 // for future appending to the path.
221 path = path[:len(path)-1]
222
223 return cty.MapVal(vals), nil
224
225 default:
226 return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Map(ety))
227
228 }
229}
230
231func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) {
232 if val = toCtyUnwrapPointer(val); !val.IsValid() {
233 return cty.NullVal(cty.Set(ety)), nil
234 }
235
236 var vals []cty.Value
237
238 switch val.Kind() {
239
240 case reflect.Slice:
241 if val.IsNil() {
242 return cty.NullVal(cty.Set(ety)), nil
243 }
244 fallthrough
245 case reflect.Array:
246 if val.Len() == 0 {
247 return cty.SetValEmpty(ety), nil
248 }
249
250 vals = make([]cty.Value, val.Len())
251 for i := range vals {
252 var err error
253 vals[i], err = toCtyValue(val.Index(i), ety, path)
254 if err != nil {
255 return cty.NilVal, err
256 }
257 }
258
259 case reflect.Struct:
260
261 if !val.Type().AssignableTo(setType) {
262 return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety))
263 }
264
265 rawSet := val.Interface().(set.Set)
266 inVals := rawSet.Values()
267
268 if len(inVals) == 0 {
269 return cty.SetValEmpty(ety), nil
270 }
271
272 vals = make([]cty.Value, len(inVals))
273 for i := range inVals {
274 var err error
275 vals[i], err = toCtyValue(reflect.ValueOf(inVals[i]), ety, path)
276 if err != nil {
277 return cty.NilVal, err
278 }
279 }
280
281 default:
282 return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Set(ety))
283
284 }
285
286 return cty.SetVal(vals), nil
287}
288
289func toCtyObject(val reflect.Value, attrTypes map[string]cty.Type, path cty.Path) (cty.Value, error) {
290 if val = toCtyUnwrapPointer(val); !val.IsValid() {
291 return cty.NullVal(cty.Object(attrTypes)), nil
292 }
293
294 switch val.Kind() {
295
296 case reflect.Map:
297 if val.IsNil() {
298 return cty.NullVal(cty.Object(attrTypes)), nil
299 }
300
301 keyType := val.Type().Key()
302 if keyType.Kind() != reflect.String {
303 return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType)
304 }
305
306 if len(attrTypes) == 0 {
307 return cty.EmptyObjectVal, nil
308 }
309
310 // While we work on our elements we'll temporarily grow
311 // path to give us a place to put our GetAttr step.
312 path = append(path, cty.PathStep(nil))
313
314 haveKeys := make(map[string]struct{}, val.Len())
315 for _, kv := range val.MapKeys() {
316 haveKeys[kv.String()] = struct{}{}
317 }
318
319 vals := make(map[string]cty.Value, len(attrTypes))
320 for k, at := range attrTypes {
321 var err error
322 path[len(path)-1] = cty.GetAttrStep{
323 Name: k,
324 }
325
326 if _, have := haveKeys[k]; !have {
327 vals[k] = cty.NullVal(at)
328 continue
329 }
330
331 vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), at, path)
332 if err != nil {
333 return cty.NilVal, err
334 }
335 }
336
337 // Discard our extra path segment, retaining it as extra capacity
338 // for future appending to the path.
339 path = path[:len(path)-1]
340
341 return cty.ObjectVal(vals), nil
342
343 case reflect.Struct:
344 if len(attrTypes) == 0 {
345 return cty.EmptyObjectVal, nil
346 }
347
348 // While we work on our elements we'll temporarily grow
349 // path to give us a place to put our GetAttr step.
350 path = append(path, cty.PathStep(nil))
351
352 attrFields := structTagIndices(val.Type())
353
354 vals := make(map[string]cty.Value, len(attrTypes))
355 for k, at := range attrTypes {
356 path[len(path)-1] = cty.GetAttrStep{
357 Name: k,
358 }
359
360 if fieldIdx, have := attrFields[k]; have {
361 var err error
362 vals[k], err = toCtyValue(val.Field(fieldIdx), at, path)
363 if err != nil {
364 return cty.NilVal, err
365 }
366 } else {
367 vals[k] = cty.NullVal(at)
368 }
369 }
370
371 // Discard our extra path segment, retaining it as extra capacity
372 // for future appending to the path.
373 path = path[:len(path)-1]
374
375 return cty.ObjectVal(vals), nil
376
377 default:
378 return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Object(attrTypes))
379
380 }
381}
382
383func toCtyTuple(val reflect.Value, elemTypes []cty.Type, path cty.Path) (cty.Value, error) {
384 if val = toCtyUnwrapPointer(val); !val.IsValid() {
385 return cty.NullVal(cty.Tuple(elemTypes)), nil
386 }
387
388 switch val.Kind() {
389
390 case reflect.Slice:
391 if val.IsNil() {
392 return cty.NullVal(cty.Tuple(elemTypes)), nil
393 }
394
395 if val.Len() != len(elemTypes) {
396 return cty.NilVal, path.NewErrorf("wrong number of elements %d; need %d", val.Len(), len(elemTypes))
397 }
398
399 if len(elemTypes) == 0 {
400 return cty.EmptyTupleVal, nil
401 }
402
403 // While we work on our elements we'll temporarily grow
404 // path to give us a place to put our Index step.
405 path = append(path, cty.PathStep(nil))
406
407 vals := make([]cty.Value, len(elemTypes))
408 for i, ety := range elemTypes {
409 var err error
410
411 path[len(path)-1] = cty.IndexStep{
412 Key: cty.NumberIntVal(int64(i)),
413 }
414
415 vals[i], err = toCtyValue(val.Index(i), ety, path)
416 if err != nil {
417 return cty.NilVal, err
418 }
419 }
420
421 // Discard our extra path segment, retaining it as extra capacity
422 // for future appending to the path.
423 path = path[:len(path)-1]
424
425 return cty.TupleVal(vals), nil
426
427 case reflect.Struct:
428 fieldCount := val.Type().NumField()
429 if fieldCount != len(elemTypes) {
430 return cty.NilVal, path.NewErrorf("wrong number of struct fields %d; need %d", fieldCount, len(elemTypes))
431 }
432
433 if len(elemTypes) == 0 {
434 return cty.EmptyTupleVal, nil
435 }
436
437 // While we work on our elements we'll temporarily grow
438 // path to give us a place to put our Index step.
439 path = append(path, cty.PathStep(nil))
440
441 vals := make([]cty.Value, len(elemTypes))
442 for i, ety := range elemTypes {
443 var err error
444
445 path[len(path)-1] = cty.IndexStep{
446 Key: cty.NumberIntVal(int64(i)),
447 }
448
449 vals[i], err = toCtyValue(val.Field(i), ety, path)
450 if err != nil {
451 return cty.NilVal, err
452 }
453 }
454
455 // Discard our extra path segment, retaining it as extra capacity
456 // for future appending to the path.
457 path = path[:len(path)-1]
458
459 return cty.TupleVal(vals), nil
460
461 default:
462 return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Tuple(elemTypes))
463
464 }
465}
466
467func toCtyCapsule(val reflect.Value, capsuleType cty.Type, path cty.Path) (cty.Value, error) {
468 if val = toCtyUnwrapPointer(val); !val.IsValid() {
469 return cty.NullVal(capsuleType), nil
470 }
471
472 if val.Kind() != reflect.Ptr {
473 if !val.CanAddr() {
474 return cty.NilVal, path.NewErrorf("source value for capsule %#v must be addressable", capsuleType)
475 }
476
477 val = val.Addr()
478 }
479
480 if !val.Type().Elem().AssignableTo(capsuleType.EncapsulatedType()) {
481 return cty.NilVal, path.NewErrorf("value of type %T not compatible with capsule %#v", val.Interface(), capsuleType)
482 }
483
484 return cty.CapsuleVal(capsuleType, val.Interface()), nil
485}
486
487func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) {
488 if val = toCtyUnwrapPointer(val); !val.IsValid() {
489 return cty.NullVal(cty.DynamicPseudoType), nil
490 }
491
492 switch val.Kind() {
493
494 case reflect.Struct:
495 if !val.Type().AssignableTo(valueType) {
496 return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Type())
497 }
498
499 return val.Interface().(cty.Value), nil
500
501 default:
502 return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Kind())
503
504 }
505
506}
507
508// toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three
509// possible outcomes:
510//
511// - Given value isn't a pointer, so it's just returned as-is.
512// - Given value is a non-nil pointer, in which case it is dereferenced
513// and the result returned.
514// - Given value is a nil pointer, in which case an invalid value is returned.
515//
516// For nested pointer types, like **int, they are all dereferenced in turn
517// until a non-pointer value is found, or until a nil pointer is encountered.
518func toCtyUnwrapPointer(val reflect.Value) reflect.Value {
519 for val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface {
520 if val.IsNil() {
521 return reflect.Value{}
522 }
523
524 val = val.Elem()
525 }
526
527 return val
528}
diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/out.go b/vendor/github.com/zclconf/go-cty/cty/gocty/out.go
new file mode 100644
index 0000000..99b65a7
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/gocty/out.go
@@ -0,0 +1,705 @@
1package gocty
2
3import (
4 "math/big"
5 "reflect"
6
7 "math"
8
9 "github.com/zclconf/go-cty/cty"
10)
11
12// FromCtyValue assigns a cty.Value to a reflect.Value, which must be a pointer,
13// using a fixed set of conversion rules.
14//
15// This function considers its audience to be the creator of the cty Value
16// given, and thus the error messages it generates are (unlike with ToCtyValue)
17// presented in cty terminology that is generally appropriate to return to
18// end-users in applications where cty data structures are built from
19// user-provided configuration. In particular this means that if incorrect
20// target types are provided by the calling application the resulting error
21// messages are likely to be confusing, since we assume that the given target
22// type is correct and the cty.Value is where the error lies.
23//
24// If an error is returned, the target data structure may have been partially
25// populated, but the degree to which this is true is an implementation
26// detail that the calling application should not rely on.
27//
28// The function will panic if given a non-pointer as the Go value target,
29// since that is considered to be a bug in the calling program.
30func FromCtyValue(val cty.Value, target interface{}) error {
31 tVal := reflect.ValueOf(target)
32 if tVal.Kind() != reflect.Ptr {
33 panic("target value is not a pointer")
34 }
35 if tVal.IsNil() {
36 panic("target value is nil pointer")
37 }
38
39 // 'path' starts off as empty but will grow for each level of recursive
40 // call we make, so by the time fromCtyValue returns it is likely to have
41 // unused capacity on the end of it, depending on how deeply-recursive
42 // the given cty.Value is.
43 path := make(cty.Path, 0)
44 return fromCtyValue(val, tVal, path)
45}
46
47func fromCtyValue(val cty.Value, target reflect.Value, path cty.Path) error {
48 ty := val.Type()
49
50 deepTarget := fromCtyPopulatePtr(target, false)
51
52 // If we're decoding into a cty.Value then we just pass through the
53 // value as-is, to enable partial decoding. This is the only situation
54 // where unknown values are permitted.
55 if deepTarget.Kind() == reflect.Struct && deepTarget.Type().AssignableTo(valueType) {
56 deepTarget.Set(reflect.ValueOf(val))
57 return nil
58 }
59
60 // Lists and maps can be nil without indirection, but everything else
61 // requires a pointer and we set it immediately to nil.
62 // We also make an exception for capsule types because we want to handle
63 // pointers specially for these.
64 // (fromCtyList and fromCtyMap must therefore deal with val.IsNull, while
65 // other types can assume no nulls after this point.)
66 if val.IsNull() && !val.Type().IsListType() && !val.Type().IsMapType() && !val.Type().IsCapsuleType() {
67 target = fromCtyPopulatePtr(target, true)
68 if target.Kind() != reflect.Ptr {
69 return path.NewErrorf("null value is not allowed")
70 }
71
72 target.Set(reflect.Zero(target.Type()))
73 return nil
74 }
75
76 target = deepTarget
77
78 if !val.IsKnown() {
79 return path.NewErrorf("value must be known")
80 }
81
82 switch ty {
83 case cty.Bool:
84 return fromCtyBool(val, target, path)
85 case cty.Number:
86 return fromCtyNumber(val, target, path)
87 case cty.String:
88 return fromCtyString(val, target, path)
89 }
90
91 switch {
92 case ty.IsListType():
93 return fromCtyList(val, target, path)
94 case ty.IsMapType():
95 return fromCtyMap(val, target, path)
96 case ty.IsSetType():
97 return fromCtySet(val, target, path)
98 case ty.IsObjectType():
99 return fromCtyObject(val, target, path)
100 case ty.IsTupleType():
101 return fromCtyTuple(val, target, path)
102 case ty.IsCapsuleType():
103 return fromCtyCapsule(val, target, path)
104 }
105
106 // We should never fall out here; reaching here indicates a bug in this
107 // function.
108 return path.NewErrorf("unsupported source type %#v", ty)
109}
110
111func fromCtyBool(val cty.Value, target reflect.Value, path cty.Path) error {
112 switch target.Kind() {
113
114 case reflect.Bool:
115 if val.True() {
116 target.Set(reflect.ValueOf(true))
117 } else {
118 target.Set(reflect.ValueOf(false))
119 }
120 return nil
121
122 default:
123 return likelyRequiredTypesError(path, target)
124
125 }
126}
127
128func fromCtyNumber(val cty.Value, target reflect.Value, path cty.Path) error {
129 bf := val.AsBigFloat()
130
131 switch target.Kind() {
132
133 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
134 return fromCtyNumberInt(bf, target, path)
135
136 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
137 return fromCtyNumberUInt(bf, target, path)
138
139 case reflect.Float32, reflect.Float64:
140 return fromCtyNumberFloat(bf, target, path)
141
142 case reflect.Struct:
143 return fromCtyNumberBig(bf, target, path)
144
145 default:
146 return likelyRequiredTypesError(path, target)
147
148 }
149}
150
151func fromCtyNumberInt(bf *big.Float, target reflect.Value, path cty.Path) error {
152 // Doing this with switch rather than << arithmetic because << with
153 // result >32-bits is not portable to 32-bit systems.
154 var min int64
155 var max int64
156 switch target.Type().Bits() {
157 case 8:
158 min = math.MinInt8
159 max = math.MaxInt8
160 case 16:
161 min = math.MinInt16
162 max = math.MaxInt16
163 case 32:
164 min = math.MinInt32
165 max = math.MaxInt32
166 case 64:
167 min = math.MinInt64
168 max = math.MaxInt64
169 default:
170 panic("weird number of bits in target int")
171 }
172
173 iv, accuracy := bf.Int64()
174 if accuracy != big.Exact || iv < min || iv > max {
175 return path.NewErrorf("value must be a whole number, between %d and %d", min, max)
176 }
177
178 target.Set(reflect.ValueOf(iv).Convert(target.Type()))
179
180 return nil
181}
182
183func fromCtyNumberUInt(bf *big.Float, target reflect.Value, path cty.Path) error {
184 // Doing this with switch rather than << arithmetic because << with
185 // result >32-bits is not portable to 32-bit systems.
186 var max uint64
187 switch target.Type().Bits() {
188 case 8:
189 max = math.MaxUint8
190 case 16:
191 max = math.MaxUint16
192 case 32:
193 max = math.MaxUint32
194 case 64:
195 max = math.MaxUint64
196 default:
197 panic("weird number of bits in target uint")
198 }
199
200 iv, accuracy := bf.Uint64()
201 if accuracy != big.Exact || iv > max {
202 return path.NewErrorf("value must be a whole number, between 0 and %d inclusive", max)
203 }
204
205 target.Set(reflect.ValueOf(iv).Convert(target.Type()))
206
207 return nil
208}
209
210func fromCtyNumberFloat(bf *big.Float, target reflect.Value, path cty.Path) error {
211 switch target.Kind() {
212 case reflect.Float32:
213 fv, accuracy := bf.Float32()
214 if accuracy != big.Exact {
215 // We allow the precision to be truncated as part of our conversion,
216 // but we don't want to silently introduce infinities.
217 if math.IsInf(float64(fv), 0) {
218 return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat32, math.MaxFloat32)
219 }
220 }
221 target.Set(reflect.ValueOf(fv))
222 return nil
223 case reflect.Float64:
224 fv, accuracy := bf.Float64()
225 if accuracy != big.Exact {
226 // We allow the precision to be truncated as part of our conversion,
227 // but we don't want to silently introduce infinities.
228 if math.IsInf(fv, 0) {
229 return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat64, math.MaxFloat64)
230 }
231 }
232 target.Set(reflect.ValueOf(fv))
233 return nil
234 default:
235 panic("unsupported kind of float")
236 }
237}
238
239func fromCtyNumberBig(bf *big.Float, target reflect.Value, path cty.Path) error {
240 switch {
241
242 case bigFloatType.AssignableTo(target.Type()):
243 // Easy!
244 target.Set(reflect.ValueOf(bf).Elem())
245 return nil
246
247 case bigIntType.AssignableTo(target.Type()):
248 bi, accuracy := bf.Int(nil)
249 if accuracy != big.Exact {
250 return path.NewErrorf("value must be a whole number")
251 }
252 target.Set(reflect.ValueOf(bi).Elem())
253 return nil
254
255 default:
256 return likelyRequiredTypesError(path, target)
257 }
258}
259
260func fromCtyString(val cty.Value, target reflect.Value, path cty.Path) error {
261 switch target.Kind() {
262
263 case reflect.String:
264 target.Set(reflect.ValueOf(val.AsString()))
265 return nil
266
267 default:
268 return likelyRequiredTypesError(path, target)
269
270 }
271}
272
273func fromCtyList(val cty.Value, target reflect.Value, path cty.Path) error {
274 switch target.Kind() {
275
276 case reflect.Slice:
277 if val.IsNull() {
278 target.Set(reflect.Zero(target.Type()))
279 return nil
280 }
281
282 length := val.LengthInt()
283 tv := reflect.MakeSlice(target.Type(), length, length)
284
285 path = append(path, nil)
286
287 i := 0
288 var err error
289 val.ForEachElement(func(key cty.Value, val cty.Value) bool {
290 path[len(path)-1] = cty.IndexStep{
291 Key: cty.NumberIntVal(int64(i)),
292 }
293
294 targetElem := tv.Index(i)
295 err = fromCtyValue(val, targetElem, path)
296 if err != nil {
297 return true
298 }
299
300 i++
301 return false
302 })
303 if err != nil {
304 return err
305 }
306
307 path = path[:len(path)-1]
308
309 target.Set(tv)
310 return nil
311
312 case reflect.Array:
313 if val.IsNull() {
314 return path.NewErrorf("null value is not allowed")
315 }
316
317 length := val.LengthInt()
318 if length != target.Len() {
319 return path.NewErrorf("must be a list of length %d", target.Len())
320 }
321
322 path = append(path, nil)
323
324 i := 0
325 var err error
326 val.ForEachElement(func(key cty.Value, val cty.Value) bool {
327 path[len(path)-1] = cty.IndexStep{
328 Key: cty.NumberIntVal(int64(i)),
329 }
330
331 targetElem := target.Index(i)
332 err = fromCtyValue(val, targetElem, path)
333 if err != nil {
334 return true
335 }
336
337 i++
338 return false
339 })
340 if err != nil {
341 return err
342 }
343
344 path = path[:len(path)-1]
345
346 return nil
347
348 default:
349 return likelyRequiredTypesError(path, target)
350
351 }
352}
353
354func fromCtyMap(val cty.Value, target reflect.Value, path cty.Path) error {
355
356 switch target.Kind() {
357
358 case reflect.Map:
359 if val.IsNull() {
360 target.Set(reflect.Zero(target.Type()))
361 return nil
362 }
363
364 tv := reflect.MakeMap(target.Type())
365 et := target.Type().Elem()
366
367 path = append(path, nil)
368
369 var err error
370 val.ForEachElement(func(key cty.Value, val cty.Value) bool {
371 path[len(path)-1] = cty.IndexStep{
372 Key: key,
373 }
374
375 ks := key.AsString()
376
377 targetElem := reflect.New(et)
378 err = fromCtyValue(val, targetElem, path)
379
380 tv.SetMapIndex(reflect.ValueOf(ks), targetElem.Elem())
381
382 return err != nil
383 })
384 if err != nil {
385 return err
386 }
387
388 path = path[:len(path)-1]
389
390 target.Set(tv)
391 return nil
392
393 default:
394 return likelyRequiredTypesError(path, target)
395
396 }
397}
398
399func fromCtySet(val cty.Value, target reflect.Value, path cty.Path) error {
400 switch target.Kind() {
401
402 case reflect.Slice:
403 if val.IsNull() {
404 target.Set(reflect.Zero(target.Type()))
405 return nil
406 }
407
408 length := val.LengthInt()
409 tv := reflect.MakeSlice(target.Type(), length, length)
410
411 i := 0
412 var err error
413 val.ForEachElement(func(key cty.Value, val cty.Value) bool {
414 targetElem := tv.Index(i)
415 err = fromCtyValue(val, targetElem, path)
416 if err != nil {
417 return true
418 }
419
420 i++
421 return false
422 })
423 if err != nil {
424 return err
425 }
426
427 target.Set(tv)
428 return nil
429
430 case reflect.Array:
431 if val.IsNull() {
432 return path.NewErrorf("null value is not allowed")
433 }
434
435 length := val.LengthInt()
436 if length != target.Len() {
437 return path.NewErrorf("must be a set of length %d", target.Len())
438 }
439
440 i := 0
441 var err error
442 val.ForEachElement(func(key cty.Value, val cty.Value) bool {
443 targetElem := target.Index(i)
444 err = fromCtyValue(val, targetElem, path)
445 if err != nil {
446 return true
447 }
448
449 i++
450 return false
451 })
452 if err != nil {
453 return err
454 }
455
456 return nil
457
458 // TODO: decode into set.Set instance
459
460 default:
461 return likelyRequiredTypesError(path, target)
462
463 }
464}
465
466func fromCtyObject(val cty.Value, target reflect.Value, path cty.Path) error {
467
468 switch target.Kind() {
469
470 case reflect.Struct:
471
472 attrTypes := val.Type().AttributeTypes()
473 targetFields := structTagIndices(target.Type())
474
475 path = append(path, nil)
476
477 for k, i := range targetFields {
478 if _, exists := attrTypes[k]; !exists {
479 // If the field in question isn't able to represent nil,
480 // that's an error.
481 fk := target.Field(i).Kind()
482 switch fk {
483 case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface:
484 // okay
485 default:
486 return path.NewErrorf("missing required attribute %q", k)
487 }
488 }
489 }
490
491 for k := range attrTypes {
492 path[len(path)-1] = cty.GetAttrStep{
493 Name: k,
494 }
495
496 fieldIdx, exists := targetFields[k]
497 if !exists {
498 return path.NewErrorf("unsupported attribute %q", k)
499 }
500
501 ev := val.GetAttr(k)
502
503 targetField := target.Field(fieldIdx)
504 err := fromCtyValue(ev, targetField, path)
505 if err != nil {
506 return err
507 }
508 }
509
510 path = path[:len(path)-1]
511
512 return nil
513
514 default:
515 return likelyRequiredTypesError(path, target)
516
517 }
518}
519
520func fromCtyTuple(val cty.Value, target reflect.Value, path cty.Path) error {
521
522 switch target.Kind() {
523
524 case reflect.Struct:
525
526 elemTypes := val.Type().TupleElementTypes()
527 fieldCount := target.Type().NumField()
528
529 if fieldCount != len(elemTypes) {
530 return path.NewErrorf("a tuple of %d elements is required", fieldCount)
531 }
532
533 path = append(path, nil)
534
535 for i := range elemTypes {
536 path[len(path)-1] = cty.IndexStep{
537 Key: cty.NumberIntVal(int64(i)),
538 }
539
540 ev := val.Index(cty.NumberIntVal(int64(i)))
541
542 targetField := target.Field(i)
543 err := fromCtyValue(ev, targetField, path)
544 if err != nil {
545 return err
546 }
547 }
548
549 path = path[:len(path)-1]
550
551 return nil
552
553 default:
554 return likelyRequiredTypesError(path, target)
555
556 }
557}
558
559func fromCtyCapsule(val cty.Value, target reflect.Value, path cty.Path) error {
560
561 if target.Kind() == reflect.Ptr {
562 // Walk through indirection until we get to the last pointer,
563 // which we might set to null below.
564 target = fromCtyPopulatePtr(target, true)
565
566 if val.IsNull() {
567 target.Set(reflect.Zero(target.Type()))
568 return nil
569 }
570
571 // Since a capsule contains a pointer to an object, we'll preserve
572 // that pointer on the way out and thus allow the caller to recover
573 // the original object, rather than a copy of it.
574
575 eType := val.Type().EncapsulatedType()
576
577 if !eType.AssignableTo(target.Elem().Type()) {
578 // Our interface contract promises that we won't expose Go
579 // implementation details in error messages, so we need to keep
580 // this vague. This can only arise if a calling application has
581 // more than one capsule type in play and a user mixes them up.
582 return path.NewErrorf("incorrect type %s", val.Type().FriendlyName())
583 }
584
585 target.Set(reflect.ValueOf(val.EncapsulatedValue()))
586
587 return nil
588 } else {
589 if val.IsNull() {
590 return path.NewErrorf("null value is not allowed")
591 }
592
593 // If our target isn't a pointer then we will attempt to copy
594 // the encapsulated value into it.
595
596 eType := val.Type().EncapsulatedType()
597
598 if !eType.AssignableTo(target.Type()) {
599 // Our interface contract promises that we won't expose Go
600 // implementation details in error messages, so we need to keep
601 // this vague. This can only arise if a calling application has
602 // more than one capsule type in play and a user mixes them up.
603 return path.NewErrorf("incorrect type %s", val.Type().FriendlyName())
604 }
605
606 // We know that EncapsulatedValue is always a pointer, so we
607 // can safely call .Elem on its reflect.Value.
608 target.Set(reflect.ValueOf(val.EncapsulatedValue()).Elem())
609
610 return nil
611 }
612
613}
614
615// fromCtyPopulatePtr recognizes when target is a pointer type and allocates
616// a value to assign to that pointer, which it returns.
617//
618// If the given value has multiple levels of indirection, like **int, these
619// will be processed in turn so that the return value is guaranteed to be
620// a non-pointer.
621//
622// As an exception, if decodingNull is true then the returned value will be
623// the final level of pointer, if any, so that the caller can assign it
624// as nil to represent a null value. If the given target value is not a pointer
625// at all then the returned value will be just the given target, so the caller
626// must test if the returned value is a pointer before trying to assign nil
627// to it.
628func fromCtyPopulatePtr(target reflect.Value, decodingNull bool) reflect.Value {
629 for {
630 if target.Kind() == reflect.Interface && !target.IsNil() {
631 e := target.Elem()
632 if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
633 target = e
634 }
635 }
636
637 if target.Kind() != reflect.Ptr {
638 break
639 }
640
641 // Stop early if we're decodingNull and we've found our last indirection
642 if target.Elem().Kind() != reflect.Ptr && decodingNull && target.CanSet() {
643 break
644 }
645
646 if target.IsNil() {
647 target.Set(reflect.New(target.Type().Elem()))
648 }
649
650 target = target.Elem()
651 }
652 return target
653}
654
655// likelyRequiredTypesError returns an error that states which types are
656// acceptable by making some assumptions about what types we support for
657// each target Go kind. It's not a precise science but it allows us to return
658// an error message that is cty-user-oriented rather than Go-oriented.
659//
660// Generally these error messages should be a matter of last resort, since
661// the calling application should be validating user-provided value types
662// before decoding anyway.
663func likelyRequiredTypesError(path cty.Path, target reflect.Value) error {
664 switch target.Kind() {
665
666 case reflect.Bool:
667 return path.NewErrorf("bool value is required")
668
669 case reflect.String:
670 return path.NewErrorf("string value is required")
671
672 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
673 fallthrough
674 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
675 fallthrough
676 case reflect.Float32, reflect.Float64:
677 return path.NewErrorf("number value is required")
678
679 case reflect.Slice, reflect.Array:
680 return path.NewErrorf("list or set value is required")
681
682 case reflect.Map:
683 return path.NewErrorf("map or object value is required")
684
685 case reflect.Struct:
686 switch {
687
688 case target.Type().AssignableTo(bigFloatType) || target.Type().AssignableTo(bigIntType):
689 return path.NewErrorf("number value is required")
690
691 case target.Type().AssignableTo(setType):
692 return path.NewErrorf("set or list value is required")
693
694 default:
695 return path.NewErrorf("object or tuple value is required")
696
697 }
698
699 default:
700 // We should avoid getting into this path, since this error
701 // message is rather useless.
702 return path.NewErrorf("incorrect type")
703
704 }
705}
diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go
new file mode 100644
index 0000000..ce4c8f1
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go
@@ -0,0 +1,108 @@
1package gocty
2
3import (
4 "reflect"
5
6 "github.com/zclconf/go-cty/cty"
7)
8
9// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts
10// to find a suitable cty.Type instance that could be used for a conversion
11// with ToCtyValue.
12//
13// This allows -- for simple situations at least -- types to be defined just
14// once in Go and the cty types derived from the Go types, but in the process
15// it makes some assumptions that may be undesirable so applications are
16// encouraged to build their cty types directly if exacting control is
17// required.
18//
19// Not all Go types can be represented as cty types, so an error may be
20// returned which is usually considered to be a bug in the calling program.
21// In particular, ImpliedType will never use capsule types in its returned
22// type, because it cannot know the capsule types supported by the calling
23// program.
24func ImpliedType(gv interface{}) (cty.Type, error) {
25 rt := reflect.TypeOf(gv)
26 var path cty.Path
27 return impliedType(rt, path)
28}
29
30func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) {
31 switch rt.Kind() {
32
33 case reflect.Ptr:
34 return impliedType(rt.Elem(), path)
35
36 // Primitive types
37 case reflect.Bool:
38 return cty.Bool, nil
39 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
40 return cty.Number, nil
41 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
42 return cty.Number, nil
43 case reflect.Float32, reflect.Float64:
44 return cty.Number, nil
45 case reflect.String:
46 return cty.String, nil
47
48 // Collection types
49 case reflect.Slice:
50 path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)})
51 ety, err := impliedType(rt.Elem(), path)
52 if err != nil {
53 return cty.NilType, err
54 }
55 return cty.List(ety), nil
56 case reflect.Map:
57 if !stringType.AssignableTo(rt.Key()) {
58 return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt)
59 }
60 path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)})
61 ety, err := impliedType(rt.Elem(), path)
62 if err != nil {
63 return cty.NilType, err
64 }
65 return cty.Map(ety), nil
66
67 // Structural types
68 case reflect.Struct:
69 return impliedStructType(rt, path)
70
71 default:
72 return cty.NilType, path.NewErrorf("no cty.Type for %s", rt)
73 }
74}
75
76func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) {
77 if valueType.AssignableTo(rt) {
78 // Special case: cty.Value represents cty.DynamicPseudoType, for
79 // type conformance checking.
80 return cty.DynamicPseudoType, nil
81 }
82
83 fieldIdxs := structTagIndices(rt)
84 if len(fieldIdxs) == 0 {
85 return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt)
86 }
87
88 atys := make(map[string]cty.Type, len(fieldIdxs))
89
90 {
91 // Temporary extension of path for attributes
92 path := append(path, nil)
93
94 for k, fi := range fieldIdxs {
95 path[len(path)-1] = cty.GetAttrStep{Name: k}
96
97 ft := rt.Field(fi).Type
98 aty, err := impliedType(ft, path)
99 if err != nil {
100 return cty.NilType, err
101 }
102
103 atys[k] = aty
104 }
105 }
106
107 return cty.Object(atys), nil
108}
diff --git a/vendor/github.com/zclconf/go-cty/cty/helper.go b/vendor/github.com/zclconf/go-cty/cty/helper.go
new file mode 100644
index 0000000..1b88e9f
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/helper.go
@@ -0,0 +1,99 @@
1package cty
2
3import (
4 "fmt"
5)
6
7// anyUnknown is a helper to easily check if a set of values contains any
8// unknowns, for operations that short-circuit to return unknown in that case.
9func anyUnknown(values ...Value) bool {
10 for _, val := range values {
11 if val.v == unknown {
12 return true
13 }
14 }
15 return false
16}
17
18// typeCheck tests whether all of the given values belong to the given type.
19// If the given types are a mixture of the given type and the dynamic
20// pseudo-type then a short-circuit dynamic value is returned. If the given
21// values are all of the correct type but at least one is unknown then
22// a short-circuit unknown value is returned. If any other types appear then
23// an error is returned. Otherwise (finally!) the result is nil, nil.
24func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) {
25 hasDynamic := false
26 hasUnknown := false
27
28 for i, val := range values {
29 if val.ty == DynamicPseudoType {
30 hasDynamic = true
31 continue
32 }
33
34 if !val.Type().Equals(required) {
35 return nil, fmt.Errorf(
36 "type mismatch: want %s but value %d is %s",
37 required.FriendlyName(),
38 i, val.ty.FriendlyName(),
39 )
40 }
41
42 if val.v == unknown {
43 hasUnknown = true
44 }
45 }
46
47 if hasDynamic {
48 return &DynamicVal, nil
49 }
50
51 if hasUnknown {
52 ret := UnknownVal(ret)
53 return &ret, nil
54 }
55
56 return nil, nil
57}
58
59// mustTypeCheck is a wrapper around typeCheck that immediately panics if
60// any error is returned.
61func mustTypeCheck(required Type, ret Type, values ...Value) *Value {
62 shortCircuit, err := typeCheck(required, ret, values...)
63 if err != nil {
64 panic(err)
65 }
66 return shortCircuit
67}
68
69// shortCircuitForceType takes the return value from mustTypeCheck and
70// replaces it with an unknown of the given type if the original value was
71// DynamicVal.
72//
73// This is useful for operations that are specified to always return a
74// particular type, since then a dynamic result can safely be "upgrade" to
75// a strongly-typed unknown, which then allows subsequent operations to
76// be actually type-checked.
77//
78// It is safe to use this only if the operation in question is defined as
79// returning either a value of the given type or panicking, since we know
80// then that subsequent operations won't run if the operation panics.
81//
82// If the given short-circuit value is *not* DynamicVal then it must be
83// of the given type, or this function will panic.
84func forceShortCircuitType(shortCircuit *Value, ty Type) *Value {
85 if shortCircuit == nil {
86 return nil
87 }
88
89 if shortCircuit.ty == DynamicPseudoType {
90 ret := UnknownVal(ty)
91 return &ret
92 }
93
94 if !shortCircuit.ty.Equals(ty) {
95 panic("forceShortCircuitType got value of wrong type")
96 }
97
98 return shortCircuit
99}
diff --git a/vendor/github.com/zclconf/go-cty/cty/json.go b/vendor/github.com/zclconf/go-cty/cty/json.go
new file mode 100644
index 0000000..c421a62
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/json.go
@@ -0,0 +1,176 @@
1package cty
2
3import (
4 "bytes"
5 "encoding/json"
6 "fmt"
7)
8
9// MarshalJSON is an implementation of json.Marshaler that allows Type
10// instances to be serialized as JSON.
11//
12// All standard types can be serialized, but capsule types cannot since there
13// is no way to automatically recover the original pointer and capsule types
14// compare by equality.
15func (t Type) MarshalJSON() ([]byte, error) {
16 switch impl := t.typeImpl.(type) {
17 case primitiveType:
18 switch impl.Kind {
19 case primitiveTypeBool:
20 return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil
21 case primitiveTypeNumber:
22 return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil
23 case primitiveTypeString:
24 return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil
25 default:
26 panic("unknown primitive type kind")
27 }
28 case typeList, typeMap, typeSet:
29 buf := &bytes.Buffer{}
30 etyJSON, err := t.ElementType().MarshalJSON()
31 if err != nil {
32 return nil, err
33 }
34 buf.WriteRune('[')
35 switch impl.(type) {
36 case typeList:
37 buf.WriteString(`"list"`)
38 case typeMap:
39 buf.WriteString(`"map"`)
40 case typeSet:
41 buf.WriteString(`"set"`)
42 }
43 buf.WriteRune(',')
44 buf.Write(etyJSON)
45 buf.WriteRune(']')
46 return buf.Bytes(), nil
47 case typeObject:
48 buf := &bytes.Buffer{}
49 atysJSON, err := json.Marshal(t.AttributeTypes())
50 if err != nil {
51 return nil, err
52 }
53 buf.WriteString(`["object",`)
54 buf.Write(atysJSON)
55 buf.WriteRune(']')
56 return buf.Bytes(), nil
57 case typeTuple:
58 buf := &bytes.Buffer{}
59 etysJSON, err := json.Marshal(t.TupleElementTypes())
60 if err != nil {
61 return nil, err
62 }
63 buf.WriteString(`["tuple",`)
64 buf.Write(etysJSON)
65 buf.WriteRune(']')
66 return buf.Bytes(), nil
67 case pseudoTypeDynamic:
68 return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil
69 case *capsuleType:
70 return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName())
71 default:
72 // should never happen
73 panic("unknown type implementation")
74 }
75}
76
77// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of
78// MarshalJSON for information on the limitations of JSON serialization of
79// types.
80func (t *Type) UnmarshalJSON(buf []byte) error {
81 r := bytes.NewReader(buf)
82 dec := json.NewDecoder(r)
83
84 tok, err := dec.Token()
85 if err != nil {
86 return err
87 }
88
89 switch v := tok.(type) {
90 case string:
91 switch v {
92 case "bool":
93 *t = Bool
94 case "number":
95 *t = Number
96 case "string":
97 *t = String
98 case "dynamic":
99 *t = DynamicPseudoType
100 default:
101 return fmt.Errorf("invalid primitive type name %q", v)
102 }
103
104 if dec.More() {
105 return fmt.Errorf("extraneous data after type description")
106 }
107 return nil
108 case json.Delim:
109 if rune(v) != '[' {
110 return fmt.Errorf("invalid complex type description")
111 }
112
113 tok, err = dec.Token()
114 if err != nil {
115 return err
116 }
117
118 kind, ok := tok.(string)
119 if !ok {
120 return fmt.Errorf("invalid complex type kind name")
121 }
122
123 switch kind {
124 case "list":
125 var ety Type
126 err = dec.Decode(&ety)
127 if err != nil {
128 return err
129 }
130 *t = List(ety)
131 case "map":
132 var ety Type
133 err = dec.Decode(&ety)
134 if err != nil {
135 return err
136 }
137 *t = Map(ety)
138 case "set":
139 var ety Type
140 err = dec.Decode(&ety)
141 if err != nil {
142 return err
143 }
144 *t = Set(ety)
145 case "object":
146 var atys map[string]Type
147 err = dec.Decode(&atys)
148 if err != nil {
149 return err
150 }
151 *t = Object(atys)
152 case "tuple":
153 var etys []Type
154 err = dec.Decode(&etys)
155 if err != nil {
156 return err
157 }
158 *t = Tuple(etys)
159 default:
160 return fmt.Errorf("invalid complex type kind name")
161 }
162
163 tok, err = dec.Token()
164 if err != nil {
165 return err
166 }
167 if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() {
168 return fmt.Errorf("unexpected extra data in type description")
169 }
170
171 return nil
172
173 default:
174 return fmt.Errorf("invalid type description")
175 }
176}
diff --git a/vendor/github.com/zclconf/go-cty/cty/json/doc.go b/vendor/github.com/zclconf/go-cty/cty/json/doc.go
new file mode 100644
index 0000000..8916513
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/json/doc.go
@@ -0,0 +1,11 @@
1// Package json provides functions for serializing cty types and values in
2// JSON format, and for decoding them again.
3//
4// Since the cty type system is a superset of the JSON type system,
5// round-tripping through JSON is lossy unless type information is provided
6// both at encoding time and decoding time. Callers of this package are
7// therefore suggested to define their expected structure as a cty.Type
8// and pass it in consistently both when encoding and when decoding, though
9// default (type-lossy) behavior is provided for situations where the precise
10// representation of the data is not significant.
11package json
diff --git a/vendor/github.com/zclconf/go-cty/cty/json/marshal.go b/vendor/github.com/zclconf/go-cty/cty/json/marshal.go
new file mode 100644
index 0000000..f7bea1a
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/json/marshal.go
@@ -0,0 +1,189 @@
1package json
2
3import (
4 "bytes"
5 "encoding/json"
6 "sort"
7
8 "github.com/zclconf/go-cty/cty"
9)
10
11func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error {
12 // If we're going to decode as DynamicPseudoType then we need to save
13 // dynamic type information to recover the real type.
14 if t == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType {
15 return marshalDynamic(val, path, b)
16 }
17
18 if val.IsNull() {
19 b.WriteString("null")
20 return nil
21 }
22
23 if !val.IsKnown() {
24 return path.NewErrorf("value is not known")
25 }
26
27 // The caller should've guaranteed that the given val is conformant with
28 // the given type t, so we'll proceed under that assumption here.
29
30 switch {
31 case t.IsPrimitiveType():
32 switch t {
33 case cty.String:
34 json, err := json.Marshal(val.AsString())
35 if err != nil {
36 return path.NewErrorf("failed to serialize value: %s", err)
37 }
38 b.Write(json)
39 return nil
40 case cty.Number:
41 if val.RawEquals(cty.PositiveInfinity) || val.RawEquals(cty.NegativeInfinity) {
42 return path.NewErrorf("cannot serialize infinity as JSON")
43 }
44 b.WriteString(val.AsBigFloat().Text('f', -1))
45 return nil
46 case cty.Bool:
47 if val.True() {
48 b.WriteString("true")
49 } else {
50 b.WriteString("false")
51 }
52 return nil
53 default:
54 panic("unsupported primitive type")
55 }
56 case t.IsListType(), t.IsSetType():
57 b.WriteRune('[')
58 first := true
59 ety := t.ElementType()
60 it := val.ElementIterator()
61 path := append(path, nil) // local override of 'path' with extra element
62 for it.Next() {
63 if !first {
64 b.WriteRune(',')
65 }
66 ek, ev := it.Element()
67 path[len(path)-1] = cty.IndexStep{
68 Key: ek,
69 }
70 err := marshal(ev, ety, path, b)
71 if err != nil {
72 return err
73 }
74 first = false
75 }
76 b.WriteRune(']')
77 return nil
78 case t.IsMapType():
79 b.WriteRune('{')
80 first := true
81 ety := t.ElementType()
82 it := val.ElementIterator()
83 path := append(path, nil) // local override of 'path' with extra element
84 for it.Next() {
85 if !first {
86 b.WriteRune(',')
87 }
88 ek, ev := it.Element()
89 path[len(path)-1] = cty.IndexStep{
90 Key: ek,
91 }
92 var err error
93 err = marshal(ek, ek.Type(), path, b)
94 if err != nil {
95 return err
96 }
97 b.WriteRune(':')
98 err = marshal(ev, ety, path, b)
99 if err != nil {
100 return err
101 }
102 first = false
103 }
104 b.WriteRune('}')
105 return nil
106 case t.IsTupleType():
107 b.WriteRune('[')
108 etys := t.TupleElementTypes()
109 it := val.ElementIterator()
110 path := append(path, nil) // local override of 'path' with extra element
111 i := 0
112 for it.Next() {
113 if i > 0 {
114 b.WriteRune(',')
115 }
116 ety := etys[i]
117 ek, ev := it.Element()
118 path[len(path)-1] = cty.IndexStep{
119 Key: ek,
120 }
121 err := marshal(ev, ety, path, b)
122 if err != nil {
123 return err
124 }
125 i++
126 }
127 b.WriteRune(']')
128 return nil
129 case t.IsObjectType():
130 b.WriteRune('{')
131 atys := t.AttributeTypes()
132 path := append(path, nil) // local override of 'path' with extra element
133
134 names := make([]string, 0, len(atys))
135 for k := range atys {
136 names = append(names, k)
137 }
138 sort.Strings(names)
139
140 for i, k := range names {
141 aty := atys[k]
142 if i > 0 {
143 b.WriteRune(',')
144 }
145 av := val.GetAttr(k)
146 path[len(path)-1] = cty.GetAttrStep{
147 Name: k,
148 }
149 var err error
150 err = marshal(cty.StringVal(k), cty.String, path, b)
151 if err != nil {
152 return err
153 }
154 b.WriteRune(':')
155 err = marshal(av, aty, path, b)
156 if err != nil {
157 return err
158 }
159 }
160 b.WriteRune('}')
161 return nil
162 case t.IsCapsuleType():
163 rawVal := val.EncapsulatedValue()
164 jsonVal, err := json.Marshal(rawVal)
165 if err != nil {
166 return path.NewError(err)
167 }
168 b.Write(jsonVal)
169 return nil
170 default:
171 // should never happen
172 return path.NewErrorf("cannot JSON-serialize %s", t.FriendlyName())
173 }
174}
175
176// marshalDynamic adds an extra wrapping object containing dynamic type
177// information for the given value.
178func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error {
179 typeJSON, err := MarshalType(val.Type())
180 if err != nil {
181 return path.NewErrorf("failed to serialize type: %s", err)
182 }
183 b.WriteString(`{"value":`)
184 marshal(val, val.Type(), path, b)
185 b.WriteString(`,"type":`)
186 b.Write(typeJSON)
187 b.WriteRune('}')
188 return nil
189}
diff --git a/vendor/github.com/zclconf/go-cty/cty/json/simple.go b/vendor/github.com/zclconf/go-cty/cty/json/simple.go
new file mode 100644
index 0000000..507c9cc
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/json/simple.go
@@ -0,0 +1,41 @@
1package json
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// SimpleJSONValue is a wrapper around cty.Value that adds implementations of
8// json.Marshaler and json.Unmarshaler for simple-but-type-lossy automatic
9// encoding and decoding of values.
10//
11// The couplet Marshal and Unmarshal both take extra type information to
12// inform the encoding and decoding process so that all of the cty types
13// can be represented even though JSON's type system is a subset.
14//
15// SimpleJSONValue instead takes the approach of discarding the value's type
16// information and then deriving a new type from the stored structure when
17// decoding. This results in the same data being returned but not necessarily
18// with exactly the same type.
19//
20// For information on how types are inferred when decoding, see the
21// documentation of the function ImpliedType.
22type SimpleJSONValue struct {
23 cty.Value
24}
25
26// MarshalJSON is an implementation of json.Marshaler. See the documentation
27// of SimpleJSONValue for more information.
28func (v SimpleJSONValue) MarshalJSON() ([]byte, error) {
29 return Marshal(v.Value, v.Type())
30}
31
32// UnmarshalJSON is an implementation of json.Unmarshaler. See the
33// documentation of SimpleJSONValue for more information.
34func (v *SimpleJSONValue) UnmarshalJSON(buf []byte) error {
35 t, err := ImpliedType(buf)
36 if err != nil {
37 return err
38 }
39 v.Value, err = Unmarshal(buf, t)
40 return err
41}
diff --git a/vendor/github.com/zclconf/go-cty/cty/json/type.go b/vendor/github.com/zclconf/go-cty/cty/json/type.go
new file mode 100644
index 0000000..9131c6c
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/json/type.go
@@ -0,0 +1,23 @@
1package json
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// MarshalType returns a JSON serialization of the given type.
8//
9// This is just a thin wrapper around t.MarshalJSON, for symmetry with
10// UnmarshalType.
11func MarshalType(t cty.Type) ([]byte, error) {
12 return t.MarshalJSON()
13}
14
15// UnmarshalType decodes a JSON serialization of the given type as produced
16// by either Type.MarshalJSON or MarshalType.
17//
18// This is a convenience wrapper around Type.UnmarshalJSON.
19func UnmarshalType(buf []byte) (cty.Type, error) {
20 var t cty.Type
21 err := t.UnmarshalJSON(buf)
22 return t, err
23}
diff --git a/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go
new file mode 100644
index 0000000..1a97306
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go
@@ -0,0 +1,171 @@
1package json
2
3import (
4 "bytes"
5 "encoding/json"
6 "fmt"
7
8 "github.com/zclconf/go-cty/cty"
9)
10
11// ImpliedType returns the cty Type implied by the structure of the given
12// JSON-compliant buffer. This function implements the default type mapping
13// behavior used when decoding arbitrary JSON without explicit cty Type
14// information.
15//
16// The rules are as follows:
17//
18// JSON strings, numbers and bools map to their equivalent primitive type in
19// cty.
20//
21// JSON objects map to cty object types, with the attributes defined by the
22// object keys and the types of their values.
23//
24// JSON arrays map to cty tuple types, with the elements defined by the
25// types of the array members.
26//
27// Any nulls are typed as DynamicPseudoType, so callers of this function
28// must be prepared to deal with this. Callers that do not wish to deal with
29// dynamic typing should not use this function and should instead describe
30// their required types explicitly with a cty.Type instance when decoding.
31//
32// Any JSON syntax errors will be returned as an error, and the type will
33// be the invalid value cty.NilType.
34func ImpliedType(buf []byte) (cty.Type, error) {
35 r := bytes.NewReader(buf)
36 dec := json.NewDecoder(r)
37 dec.UseNumber()
38
39 ty, err := impliedType(dec)
40 if err != nil {
41 return cty.NilType, err
42 }
43
44 if dec.More() {
45 return cty.NilType, fmt.Errorf("extraneous data after JSON object")
46 }
47
48 return ty, nil
49}
50
51func impliedType(dec *json.Decoder) (cty.Type, error) {
52 tok, err := dec.Token()
53 if err != nil {
54 return cty.NilType, err
55 }
56
57 return impliedTypeForTok(tok, dec)
58}
59
60func impliedTypeForTok(tok json.Token, dec *json.Decoder) (cty.Type, error) {
61 if tok == nil {
62 return cty.DynamicPseudoType, nil
63 }
64
65 switch ttok := tok.(type) {
66 case bool:
67 return cty.Bool, nil
68
69 case json.Number:
70 return cty.Number, nil
71
72 case string:
73 return cty.String, nil
74
75 case json.Delim:
76
77 switch rune(ttok) {
78 case '{':
79 return impliedObjectType(dec)
80 case '[':
81 return impliedTupleType(dec)
82 default:
83 return cty.NilType, fmt.Errorf("unexpected token %q", ttok)
84 }
85
86 default:
87 return cty.NilType, fmt.Errorf("unsupported JSON token %#v", tok)
88 }
89}
90
91func impliedObjectType(dec *json.Decoder) (cty.Type, error) {
92 // By the time we get in here, we've already consumed the { delimiter
93 // and so our next token should be the first object key.
94
95 var atys map[string]cty.Type
96
97 for {
98 // Read the object key first
99 tok, err := dec.Token()
100 if err != nil {
101 return cty.NilType, err
102 }
103
104 if ttok, ok := tok.(json.Delim); ok {
105 if rune(ttok) != '}' {
106 return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok)
107 }
108 break
109 }
110
111 key, ok := tok.(string)
112 if !ok {
113 return cty.NilType, fmt.Errorf("expected string but found %T", tok)
114 }
115
116 // Now read the value
117 tok, err = dec.Token()
118 if err != nil {
119 return cty.NilType, err
120 }
121
122 aty, err := impliedTypeForTok(tok, dec)
123 if err != nil {
124 return cty.NilType, err
125 }
126
127 if atys == nil {
128 atys = make(map[string]cty.Type)
129 }
130 atys[key] = aty
131 }
132
133 if len(atys) == 0 {
134 return cty.EmptyObject, nil
135 }
136
137 return cty.Object(atys), nil
138}
139
140func impliedTupleType(dec *json.Decoder) (cty.Type, error) {
141 // By the time we get in here, we've already consumed the { delimiter
142 // and so our next token should be the first value.
143
144 var etys []cty.Type
145
146 for {
147 tok, err := dec.Token()
148 if err != nil {
149 return cty.NilType, err
150 }
151
152 if ttok, ok := tok.(json.Delim); ok {
153 if rune(ttok) != ']' {
154 return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok)
155 }
156 break
157 }
158
159 ety, err := impliedTypeForTok(tok, dec)
160 if err != nil {
161 return cty.NilType, err
162 }
163 etys = append(etys, ety)
164 }
165
166 if len(etys) == 0 {
167 return cty.EmptyTuple, nil
168 }
169
170 return cty.Tuple(etys), nil
171}
diff --git a/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go
new file mode 100644
index 0000000..155f0b8
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go
@@ -0,0 +1,459 @@
1package json
2
3import (
4 "bytes"
5 "encoding/json"
6 "fmt"
7 "reflect"
8
9 "github.com/zclconf/go-cty/cty"
10 "github.com/zclconf/go-cty/cty/convert"
11)
12
13func unmarshal(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) {
14 dec := bufDecoder(buf)
15
16 tok, err := dec.Token()
17 if err != nil {
18 return cty.NilVal, path.NewError(err)
19 }
20
21 if tok == nil {
22 return cty.NullVal(t), nil
23 }
24
25 if t == cty.DynamicPseudoType {
26 return unmarshalDynamic(buf, path)
27 }
28
29 switch {
30 case t.IsPrimitiveType():
31 val, err := unmarshalPrimitive(tok, t, path)
32 if err != nil {
33 return cty.NilVal, err
34 }
35 return val, nil
36 case t.IsListType():
37 return unmarshalList(buf, t.ElementType(), path)
38 case t.IsSetType():
39 return unmarshalSet(buf, t.ElementType(), path)
40 case t.IsMapType():
41 return unmarshalMap(buf, t.ElementType(), path)
42 case t.IsTupleType():
43 return unmarshalTuple(buf, t.TupleElementTypes(), path)
44 case t.IsObjectType():
45 return unmarshalObject(buf, t.AttributeTypes(), path)
46 case t.IsCapsuleType():
47 return unmarshalCapsule(buf, t, path)
48 default:
49 return cty.NilVal, path.NewErrorf("unsupported type %s", t.FriendlyName())
50 }
51}
52
53func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, error) {
54
55 switch t {
56 case cty.Bool:
57 switch v := tok.(type) {
58 case bool:
59 return cty.BoolVal(v), nil
60 case string:
61 val, err := convert.Convert(cty.StringVal(v), t)
62 if err != nil {
63 return cty.NilVal, path.NewError(err)
64 }
65 return val, nil
66 default:
67 return cty.NilVal, path.NewErrorf("bool is required")
68 }
69 case cty.Number:
70 if v, ok := tok.(json.Number); ok {
71 tok = string(v)
72 }
73 switch v := tok.(type) {
74 case string:
75 val, err := convert.Convert(cty.StringVal(v), t)
76 if err != nil {
77 return cty.NilVal, path.NewError(err)
78 }
79 return val, nil
80 default:
81 return cty.NilVal, path.NewErrorf("number is required")
82 }
83 case cty.String:
84 switch v := tok.(type) {
85 case string:
86 return cty.StringVal(v), nil
87 case json.Number:
88 return cty.StringVal(string(v)), nil
89 case bool:
90 val, err := convert.Convert(cty.BoolVal(v), t)
91 if err != nil {
92 return cty.NilVal, path.NewError(err)
93 }
94 return val, nil
95 default:
96 return cty.NilVal, path.NewErrorf("string is required")
97 }
98 default:
99 // should never happen
100 panic("unsupported primitive type")
101 }
102}
103
104func unmarshalList(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) {
105 dec := bufDecoder(buf)
106 if err := requireDelim(dec, '['); err != nil {
107 return cty.NilVal, path.NewError(err)
108 }
109
110 var vals []cty.Value
111
112 {
113 path := append(path, nil)
114 var idx int64
115
116 for dec.More() {
117 path[len(path)-1] = cty.IndexStep{
118 Key: cty.NumberIntVal(idx),
119 }
120 idx++
121
122 rawVal, err := readRawValue(dec)
123 if err != nil {
124 return cty.NilVal, path.NewErrorf("failed to read list value: %s", err)
125 }
126
127 el, err := unmarshal(rawVal, ety, path)
128 if err != nil {
129 return cty.NilVal, err
130 }
131
132 vals = append(vals, el)
133 }
134 }
135
136 if err := requireDelim(dec, ']'); err != nil {
137 return cty.NilVal, path.NewError(err)
138 }
139
140 if len(vals) == 0 {
141 return cty.ListValEmpty(ety), nil
142 }
143
144 return cty.ListVal(vals), nil
145}
146
147func unmarshalSet(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) {
148 dec := bufDecoder(buf)
149 if err := requireDelim(dec, '['); err != nil {
150 return cty.NilVal, path.NewError(err)
151 }
152
153 var vals []cty.Value
154
155 {
156 path := append(path, nil)
157
158 for dec.More() {
159 path[len(path)-1] = cty.IndexStep{
160 Key: cty.UnknownVal(ety),
161 }
162
163 rawVal, err := readRawValue(dec)
164 if err != nil {
165 return cty.NilVal, path.NewErrorf("failed to read set value: %s", err)
166 }
167
168 el, err := unmarshal(rawVal, ety, path)
169 if err != nil {
170 return cty.NilVal, err
171 }
172
173 vals = append(vals, el)
174 }
175 }
176
177 if err := requireDelim(dec, ']'); err != nil {
178 return cty.NilVal, path.NewError(err)
179 }
180
181 if len(vals) == 0 {
182 return cty.SetValEmpty(ety), nil
183 }
184
185 return cty.SetVal(vals), nil
186}
187
188func unmarshalMap(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) {
189 dec := bufDecoder(buf)
190 if err := requireDelim(dec, '{'); err != nil {
191 return cty.NilVal, path.NewError(err)
192 }
193
194 vals := make(map[string]cty.Value)
195
196 {
197 path := append(path, nil)
198
199 for dec.More() {
200 path[len(path)-1] = cty.IndexStep{
201 Key: cty.UnknownVal(cty.String),
202 }
203
204 var err error
205
206 k, err := requireObjectKey(dec)
207 if err != nil {
208 return cty.NilVal, path.NewErrorf("failed to read map key: %s", err)
209 }
210
211 path[len(path)-1] = cty.IndexStep{
212 Key: cty.StringVal(k),
213 }
214
215 rawVal, err := readRawValue(dec)
216 if err != nil {
217 return cty.NilVal, path.NewErrorf("failed to read map value: %s", err)
218 }
219
220 el, err := unmarshal(rawVal, ety, path)
221 if err != nil {
222 return cty.NilVal, err
223 }
224
225 vals[k] = el
226 }
227 }
228
229 if err := requireDelim(dec, '}'); err != nil {
230 return cty.NilVal, path.NewError(err)
231 }
232
233 if len(vals) == 0 {
234 return cty.MapValEmpty(ety), nil
235 }
236
237 return cty.MapVal(vals), nil
238}
239
240func unmarshalTuple(buf []byte, etys []cty.Type, path cty.Path) (cty.Value, error) {
241 dec := bufDecoder(buf)
242 if err := requireDelim(dec, '['); err != nil {
243 return cty.NilVal, path.NewError(err)
244 }
245
246 var vals []cty.Value
247
248 {
249 path := append(path, nil)
250 var idx int
251
252 for dec.More() {
253 if idx >= len(etys) {
254 return cty.NilVal, path[:len(path)-1].NewErrorf("too many tuple elements (need %d)", len(etys))
255 }
256
257 path[len(path)-1] = cty.IndexStep{
258 Key: cty.NumberIntVal(int64(idx)),
259 }
260 ety := etys[idx]
261 idx++
262
263 rawVal, err := readRawValue(dec)
264 if err != nil {
265 return cty.NilVal, path.NewErrorf("failed to read tuple value: %s", err)
266 }
267
268 el, err := unmarshal(rawVal, ety, path)
269 if err != nil {
270 return cty.NilVal, err
271 }
272
273 vals = append(vals, el)
274 }
275 }
276
277 if err := requireDelim(dec, ']'); err != nil {
278 return cty.NilVal, path.NewError(err)
279 }
280
281 if len(vals) != len(etys) {
282 return cty.NilVal, path[:len(path)-1].NewErrorf("not enough tuple elements (need %d)", len(etys))
283 }
284
285 if len(vals) == 0 {
286 return cty.EmptyTupleVal, nil
287 }
288
289 return cty.TupleVal(vals), nil
290}
291
292func unmarshalObject(buf []byte, atys map[string]cty.Type, path cty.Path) (cty.Value, error) {
293 dec := bufDecoder(buf)
294 if err := requireDelim(dec, '{'); err != nil {
295 return cty.NilVal, path.NewError(err)
296 }
297
298 vals := make(map[string]cty.Value)
299
300 {
301 objPath := path // some errors report from the object's perspective
302 path := append(path, nil) // path to a specific attribute
303
304 for dec.More() {
305
306 var err error
307
308 k, err := requireObjectKey(dec)
309 if err != nil {
310 return cty.NilVal, path.NewErrorf("failed to read object key: %s", err)
311 }
312
313 aty, ok := atys[k]
314 if !ok {
315 return cty.NilVal, objPath.NewErrorf("unsupported attribute %q", k)
316 }
317
318 path[len(path)-1] = cty.GetAttrStep{
319 Name: k,
320 }
321
322 rawVal, err := readRawValue(dec)
323 if err != nil {
324 return cty.NilVal, path.NewErrorf("failed to read object value: %s", err)
325 }
326
327 el, err := unmarshal(rawVal, aty, path)
328 if err != nil {
329 return cty.NilVal, err
330 }
331
332 vals[k] = el
333 }
334 }
335
336 if err := requireDelim(dec, '}'); err != nil {
337 return cty.NilVal, path.NewError(err)
338 }
339
340 // Make sure we have a value for every attribute
341 for k, aty := range atys {
342 if _, exists := vals[k]; !exists {
343 vals[k] = cty.NullVal(aty)
344 }
345 }
346
347 if len(vals) == 0 {
348 return cty.EmptyObjectVal, nil
349 }
350
351 return cty.ObjectVal(vals), nil
352}
353
354func unmarshalCapsule(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) {
355 rawType := t.EncapsulatedType()
356 ptrPtr := reflect.New(reflect.PtrTo(rawType))
357 ptrPtr.Elem().Set(reflect.New(rawType))
358 ptr := ptrPtr.Elem().Interface()
359 err := json.Unmarshal(buf, ptr)
360 if err != nil {
361 return cty.NilVal, path.NewError(err)
362 }
363
364 return cty.CapsuleVal(t, ptr), nil
365}
366
367func unmarshalDynamic(buf []byte, path cty.Path) (cty.Value, error) {
368 dec := bufDecoder(buf)
369 if err := requireDelim(dec, '{'); err != nil {
370 return cty.NilVal, path.NewError(err)
371 }
372
373 var t cty.Type
374 var valBody []byte // defer actual decoding until we know the type
375
376 for dec.More() {
377 var err error
378
379 key, err := requireObjectKey(dec)
380 if err != nil {
381 return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor key: %s", err)
382 }
383
384 rawVal, err := readRawValue(dec)
385 if err != nil {
386 return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor value: %s", err)
387 }
388
389 switch key {
390 case "type":
391 err := json.Unmarshal(rawVal, &t)
392 if err != nil {
393 return cty.NilVal, path.NewErrorf("failed to decode type for dynamic value: %s", err)
394 }
395 case "value":
396 valBody = rawVal
397 default:
398 return cty.NilVal, path.NewErrorf("invalid key %q in dynamically-typed value", key)
399 }
400
401 }
402
403 if err := requireDelim(dec, '}'); err != nil {
404 return cty.NilVal, path.NewError(err)
405 }
406
407 if t == cty.NilType {
408 return cty.NilVal, path.NewErrorf("missing type in dynamically-typed value")
409 }
410 if valBody == nil {
411 return cty.NilVal, path.NewErrorf("missing value in dynamically-typed value")
412 }
413
414 val, err := Unmarshal([]byte(valBody), t)
415 if err != nil {
416 return cty.NilVal, path.NewError(err)
417 }
418 return val, nil
419}
420
421func requireDelim(dec *json.Decoder, d rune) error {
422 tok, err := dec.Token()
423 if err != nil {
424 return err
425 }
426
427 if tok != json.Delim(d) {
428 return fmt.Errorf("missing expected %c", d)
429 }
430
431 return nil
432}
433
434func requireObjectKey(dec *json.Decoder) (string, error) {
435 tok, err := dec.Token()
436 if err != nil {
437 return "", err
438 }
439 if s, ok := tok.(string); ok {
440 return s, nil
441 }
442 return "", fmt.Errorf("missing expected object key")
443}
444
445func readRawValue(dec *json.Decoder) ([]byte, error) {
446 var rawVal json.RawMessage
447 err := dec.Decode(&rawVal)
448 if err != nil {
449 return nil, err
450 }
451 return []byte(rawVal), nil
452}
453
454func bufDecoder(buf []byte) *json.Decoder {
455 r := bytes.NewReader(buf)
456 dec := json.NewDecoder(r)
457 dec.UseNumber()
458 return dec
459}
diff --git a/vendor/github.com/zclconf/go-cty/cty/json/value.go b/vendor/github.com/zclconf/go-cty/cty/json/value.go
new file mode 100644
index 0000000..f2f7dd5
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/json/value.go
@@ -0,0 +1,65 @@
1package json
2
3import (
4 "bytes"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/convert"
8)
9
10// Marshal produces a JSON representation of the given value that can later
11// be decoded into a value of the given type.
12//
13// A type is specified separately to allow for the given type to include
14// cty.DynamicPseudoType to represent situations where any type is permitted
15// and so type information must be included to allow recovery of the stored
16// structure when decoding.
17//
18// The given type will also be used to attempt automatic conversions of any
19// non-conformant types in the given value, although this will not always
20// be possible. If the value cannot be made to be conformant then an error is
21// returned, which may be a cty.PathError.
22//
23// Capsule-typed values can be marshalled, but with some caveats. Since
24// capsule values are compared by pointer equality, it is impossible to recover
25// a value that will compare equal to the original value. Additionally,
26// it's not possible to JSON-serialize the capsule type itself, so it's not
27// valid to use capsule types within parts of the value that are conformed to
28// cty.DynamicPseudoType. Otherwise, a capsule value can be used as long as
29// the encapsulated type itself is serializable with the Marshal function
30// in encoding/json.
31func Marshal(val cty.Value, t cty.Type) ([]byte, error) {
32 errs := val.Type().TestConformance(t)
33 if errs != nil {
34 // Attempt a conversion
35 var err error
36 val, err = convert.Convert(val, t)
37 if err != nil {
38 return nil, err
39 }
40 }
41
42 // From this point onward, val can be assumed to be conforming to t.
43
44 buf := &bytes.Buffer{}
45 var path cty.Path
46 err := marshal(val, t, path, buf)
47
48 if err != nil {
49 return nil, err
50 }
51
52 return buf.Bytes(), nil
53}
54
55// Unmarshal decodes a JSON representation of the given value into a cty Value
56// conforming to the given type.
57//
58// While decoding, type conversions will be done where possible to make
59// the result conformant even if the types given in JSON are not exactly
60// correct. If conversion isn't possible then an error is returned, which
61// may be a cty.PathError.
62func Unmarshal(buf []byte, t cty.Type) (cty.Value, error) {
63 var path cty.Path
64 return unmarshal(buf, t, path)
65}
diff --git a/vendor/github.com/zclconf/go-cty/cty/list_type.go b/vendor/github.com/zclconf/go-cty/cty/list_type.go
new file mode 100644
index 0000000..eadc865
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/list_type.go
@@ -0,0 +1,68 @@
1package cty
2
3import (
4 "fmt"
5)
6
7// TypeList instances represent specific list types. Each distinct ElementType
8// creates a distinct, non-equal list type.
9type typeList struct {
10 typeImplSigil
11 ElementTypeT Type
12}
13
14// List creates a map type with the given element Type.
15//
16// List types are CollectionType implementations.
17func List(elem Type) Type {
18 return Type{
19 typeList{
20 ElementTypeT: elem,
21 },
22 }
23}
24
25// Equals returns true if the other Type is a list whose element type is
26// equal to that of the receiver.
27func (t typeList) Equals(other Type) bool {
28 ot, isList := other.typeImpl.(typeList)
29 if !isList {
30 return false
31 }
32
33 return t.ElementTypeT.Equals(ot.ElementTypeT)
34}
35
36func (t typeList) FriendlyName() string {
37 return "list of " + t.ElementTypeT.FriendlyName()
38}
39
40func (t typeList) ElementType() Type {
41 return t.ElementTypeT
42}
43
44func (t typeList) GoString() string {
45 return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT)
46}
47
48// IsListType returns true if the given type is a list type, regardless of its
49// element type.
50func (t Type) IsListType() bool {
51 _, ok := t.typeImpl.(typeList)
52 return ok
53}
54
55// ListElementType is a convenience method that checks if the given type is
56// a list type, returning a pointer to its element type if so and nil
57// otherwise. This is intended to allow convenient conditional branches,
58// like so:
59//
60// if et := t.ListElementType(); et != nil {
61// // Do something with *et
62// }
63func (t Type) ListElementType() *Type {
64 if lt, ok := t.typeImpl.(typeList); ok {
65 return &lt.ElementTypeT
66 }
67 return nil
68}
diff --git a/vendor/github.com/zclconf/go-cty/cty/map_type.go b/vendor/github.com/zclconf/go-cty/cty/map_type.go
new file mode 100644
index 0000000..ae9abae
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/map_type.go
@@ -0,0 +1,68 @@
1package cty
2
3import (
4 "fmt"
5)
6
7// TypeList instances represent specific list types. Each distinct ElementType
8// creates a distinct, non-equal list type.
9type typeMap struct {
10 typeImplSigil
11 ElementTypeT Type
12}
13
14// Map creates a map type with the given element Type.
15//
16// Map types are CollectionType implementations.
17func Map(elem Type) Type {
18 return Type{
19 typeMap{
20 ElementTypeT: elem,
21 },
22 }
23}
24
25// Equals returns true if the other Type is a map whose element type is
26// equal to that of the receiver.
27func (t typeMap) Equals(other Type) bool {
28 ot, isMap := other.typeImpl.(typeMap)
29 if !isMap {
30 return false
31 }
32
33 return t.ElementTypeT.Equals(ot.ElementTypeT)
34}
35
36func (t typeMap) FriendlyName() string {
37 return "map of " + t.ElementTypeT.FriendlyName()
38}
39
40func (t typeMap) ElementType() Type {
41 return t.ElementTypeT
42}
43
44func (t typeMap) GoString() string {
45 return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT)
46}
47
48// IsMapType returns true if the given type is a list type, regardless of its
49// element type.
50func (t Type) IsMapType() bool {
51 _, ok := t.typeImpl.(typeMap)
52 return ok
53}
54
55// MapElementType is a convenience method that checks if the given type is
56// a map type, returning a pointer to its element type if so and nil
57// otherwise. This is intended to allow convenient conditional branches,
58// like so:
59//
60// if et := t.MapElementType(); et != nil {
61// // Do something with *et
62// }
63func (t Type) MapElementType() *Type {
64 if lt, ok := t.typeImpl.(typeMap); ok {
65 return &lt.ElementTypeT
66 }
67 return nil
68}
diff --git a/vendor/github.com/zclconf/go-cty/cty/null.go b/vendor/github.com/zclconf/go-cty/cty/null.go
new file mode 100644
index 0000000..d58d028
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/null.go
@@ -0,0 +1,14 @@
1package cty
2
3// NullVal returns a null value of the given type. A null can be created of any
4// type, but operations on such values will always panic. Calling applications
5// are encouraged to use nulls only sparingly, particularly when user-provided
6// expressions are to be evaluated, since the precence of nulls creates a
7// much higher chance of evaluation errors that can't be caught by a type
8// checker.
9func NullVal(t Type) Value {
10 return Value{
11 ty: t,
12 v: nil,
13 }
14}
diff --git a/vendor/github.com/zclconf/go-cty/cty/object_type.go b/vendor/github.com/zclconf/go-cty/cty/object_type.go
new file mode 100644
index 0000000..2540883
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/object_type.go
@@ -0,0 +1,135 @@
1package cty
2
3import (
4 "fmt"
5)
6
7type typeObject struct {
8 typeImplSigil
9 AttrTypes map[string]Type
10}
11
12// Object creates an object type with the given attribute types.
13//
14// After a map is passed to this function the caller must no longer access it,
15// since ownership is transferred to this library.
16func Object(attrTypes map[string]Type) Type {
17 attrTypesNorm := make(map[string]Type, len(attrTypes))
18 for k, v := range attrTypes {
19 attrTypesNorm[NormalizeString(k)] = v
20 }
21
22 return Type{
23 typeObject{
24 AttrTypes: attrTypesNorm,
25 },
26 }
27}
28
29func (t typeObject) Equals(other Type) bool {
30 if ot, ok := other.typeImpl.(typeObject); ok {
31 if len(t.AttrTypes) != len(ot.AttrTypes) {
32 // Fast path: if we don't have the same number of attributes
33 // then we can't possibly be equal. This also avoids the need
34 // to test attributes in both directions below, since we know
35 // there can't be extras in "other".
36 return false
37 }
38
39 for attr, ty := range t.AttrTypes {
40 oty, ok := ot.AttrTypes[attr]
41 if !ok {
42 return false
43 }
44 if !oty.Equals(ty) {
45 return false
46 }
47 }
48
49 return true
50 }
51 return false
52}
53
54func (t typeObject) FriendlyName() string {
55 // There isn't really a friendly way to write an object type due to its
56 // complexity, so we'll just do something English-ish. Callers will
57 // probably want to make some extra effort to avoid ever printing out
58 // an object type FriendlyName in its entirety. For example, could
59 // produce an error message by diffing two object types and saying
60 // something like "Expected attribute foo to be string, but got number".
61 // TODO: Finish this
62 return "object"
63}
64
65func (t typeObject) GoString() string {
66 if len(t.AttrTypes) == 0 {
67 return "cty.EmptyObject"
68 }
69 return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes)
70}
71
72// EmptyObject is a shorthand for Object(map[string]Type{}), to more
73// easily talk about the empty object type.
74var EmptyObject Type
75
76// EmptyObjectVal is the only possible non-null, non-unknown value of type
77// EmptyObject.
78var EmptyObjectVal Value
79
80func init() {
81 EmptyObject = Object(map[string]Type{})
82 EmptyObjectVal = Value{
83 ty: EmptyObject,
84 v: map[string]interface{}{},
85 }
86}
87
88// IsObjectType returns true if the given type is an object type, regardless
89// of its element type.
90func (t Type) IsObjectType() bool {
91 _, ok := t.typeImpl.(typeObject)
92 return ok
93}
94
95// HasAttribute returns true if the receiver has an attribute with the given
96// name, regardless of its type. Will panic if the reciever isn't an object
97// type; use IsObjectType to determine whether this operation will succeed.
98func (t Type) HasAttribute(name string) bool {
99 name = NormalizeString(name)
100 if ot, ok := t.typeImpl.(typeObject); ok {
101 _, hasAttr := ot.AttrTypes[name]
102 return hasAttr
103 }
104 panic("HasAttribute on non-object Type")
105}
106
107// AttributeType returns the type of the attribute with the given name. Will
108// panic if the receiver is not an object type (use IsObjectType to confirm)
109// or if the object type has no such attribute (use HasAttribute to confirm).
110func (t Type) AttributeType(name string) Type {
111 name = NormalizeString(name)
112 if ot, ok := t.typeImpl.(typeObject); ok {
113 aty, hasAttr := ot.AttrTypes[name]
114 if !hasAttr {
115 panic("no such attribute")
116 }
117 return aty
118 }
119 panic("AttributeType on non-object Type")
120}
121
122// AttributeTypes returns a map from attribute names to their associated
123// types. Will panic if the receiver is not an object type (use IsObjectType
124// to confirm).
125//
126// The returned map is part of the internal state of the type, and is provided
127// for read access only. It is forbidden for any caller to modify the returned
128// map. For many purposes the attribute-related methods of Value are more
129// appropriate and more convenient to use.
130func (t Type) AttributeTypes() map[string]Type {
131 if ot, ok := t.typeImpl.(typeObject); ok {
132 return ot.AttrTypes
133 }
134 panic("AttributeTypes on non-object Type")
135}
diff --git a/vendor/github.com/zclconf/go-cty/cty/path.go b/vendor/github.com/zclconf/go-cty/cty/path.go
new file mode 100644
index 0000000..84a9de0
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/path.go
@@ -0,0 +1,186 @@
1package cty
2
3import (
4 "errors"
5 "fmt"
6)
7
8// A Path is a sequence of operations to locate a nested value within a
9// data structure.
10//
11// The empty Path represents the given item. Any PathSteps within represent
12// taking a single step down into a data structure.
13//
14// Path has some convenience methods for gradually constructing a path,
15// but callers can also feel free to just produce a slice of PathStep manually
16// and convert to this type, which may be more appropriate in environments
17// where memory pressure is a concern.
18type Path []PathStep
19
20// PathStep represents a single step down into a data structure, as part
21// of a Path. PathStep is a closed interface, meaning that the only
22// permitted implementations are those within this package.
23type PathStep interface {
24 pathStepSigil() pathStepImpl
25 Apply(Value) (Value, error)
26}
27
28// embed pathImpl into a struct to declare it a PathStep implementation
29type pathStepImpl struct{}
30
31func (p pathStepImpl) pathStepSigil() pathStepImpl {
32 return p
33}
34
35// Index returns a new Path that is the reciever with an IndexStep appended
36// to the end.
37//
38// This is provided as a convenient way to construct paths, but each call
39// will create garbage so it should not be used where memory pressure is a
40// concern.
41func (p Path) Index(v Value) Path {
42 ret := make(Path, len(p)+1)
43 copy(ret, p)
44 ret[len(p)] = IndexStep{
45 Key: v,
46 }
47 return ret
48}
49
50// GetAttr returns a new Path that is the reciever with a GetAttrStep appended
51// to the end.
52//
53// This is provided as a convenient way to construct paths, but each call
54// will create garbage so it should not be used where memory pressure is a
55// concern.
56func (p Path) GetAttr(name string) Path {
57 ret := make(Path, len(p)+1)
58 copy(ret, p)
59 ret[len(p)] = GetAttrStep{
60 Name: name,
61 }
62 return ret
63}
64
65// Apply applies each of the steps in turn to successive values starting with
66// the given value, and returns the result. If any step returns an error,
67// the whole operation returns an error.
68func (p Path) Apply(val Value) (Value, error) {
69 var err error
70 for i, step := range p {
71 val, err = step.Apply(val)
72 if err != nil {
73 return NilVal, fmt.Errorf("at step %d: %s", i, err)
74 }
75 }
76 return val, nil
77}
78
79// LastStep applies the given path up to the last step and then returns
80// the resulting value and the final step.
81//
82// This is useful when dealing with assignment operations, since in that
83// case the *value* of the last step is not important (and may not, in fact,
84// present at all) and we care only about its location.
85//
86// Since LastStep applies all steps except the last, it will return errors
87// for those steps in the same way as Apply does.
88//
89// If the path has *no* steps then the returned PathStep will be nil,
90// representing that any operation should be applied directly to the
91// given value.
92func (p Path) LastStep(val Value) (Value, PathStep, error) {
93 var err error
94
95 if len(p) == 0 {
96 return val, nil, nil
97 }
98
99 journey := p[:len(p)-1]
100 val, err = journey.Apply(val)
101 if err != nil {
102 return NilVal, nil, err
103 }
104 return val, p[len(p)-1], nil
105}
106
107// Copy makes a shallow copy of the receiver. Often when paths are passed to
108// caller code they come with the constraint that they are valid only until
109// the caller returns, due to how they are constructed internally. Callers
110// can use Copy to conveniently produce a copy of the value that _they_ control
111// the validity of.
112func (p Path) Copy() Path {
113 ret := make(Path, len(p))
114 copy(ret, p)
115 return ret
116}
117
118// IndexStep is a Step implementation representing applying the index operation
119// to a value, which must be of either a list, map, or set type.
120//
121// When describing a path through a *type* rather than a concrete value,
122// the Key may be an unknown value, indicating that the step applies to
123// *any* key of the given type.
124//
125// When indexing into a set, the Key is actually the element being accessed
126// itself, since in sets elements are their own identity.
127type IndexStep struct {
128 pathStepImpl
129 Key Value
130}
131
132// Apply returns the value resulting from indexing the given value with
133// our key value.
134func (s IndexStep) Apply(val Value) (Value, error) {
135 switch s.Key.Type() {
136 case Number:
137 if !val.Type().IsListType() {
138 return NilVal, errors.New("not a list type")
139 }
140 case String:
141 if !val.Type().IsMapType() {
142 return NilVal, errors.New("not a map type")
143 }
144 default:
145 return NilVal, errors.New("key value not number or string")
146 }
147
148 has := val.HasIndex(s.Key)
149 if !has.IsKnown() {
150 return UnknownVal(val.Type().ElementType()), nil
151 }
152 if !has.True() {
153 return NilVal, errors.New("value does not have given index key")
154 }
155
156 return val.Index(s.Key), nil
157}
158
159func (s IndexStep) GoString() string {
160 return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key)
161}
162
163// GetAttrStep is a Step implementation representing retrieving an attribute
164// from a value, which must be of an object type.
165type GetAttrStep struct {
166 pathStepImpl
167 Name string
168}
169
170// Apply returns the value of our named attribute from the given value, which
171// must be of an object type that has a value of that name.
172func (s GetAttrStep) Apply(val Value) (Value, error) {
173 if !val.Type().IsObjectType() {
174 return NilVal, errors.New("not an object type")
175 }
176
177 if !val.Type().HasAttribute(s.Name) {
178 return NilVal, fmt.Errorf("object has no attribute %q", s.Name)
179 }
180
181 return val.GetAttr(s.Name), nil
182}
183
184func (s GetAttrStep) GoString() string {
185 return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name)
186}
diff --git a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go
new file mode 100644
index 0000000..b8682dd
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go
@@ -0,0 +1,122 @@
1package cty
2
3import "math/big"
4
5// primitiveType is the hidden implementation of the various primitive types
6// that are exposed as variables in this package.
7type primitiveType struct {
8 typeImplSigil
9 Kind primitiveTypeKind
10}
11
12type primitiveTypeKind byte
13
14const (
15 primitiveTypeBool primitiveTypeKind = 'B'
16 primitiveTypeNumber primitiveTypeKind = 'N'
17 primitiveTypeString primitiveTypeKind = 'S'
18)
19
20func (t primitiveType) Equals(other Type) bool {
21 if otherP, ok := other.typeImpl.(primitiveType); ok {
22 return otherP.Kind == t.Kind
23 }
24 return false
25}
26
27func (t primitiveType) FriendlyName() string {
28 switch t.Kind {
29 case primitiveTypeBool:
30 return "bool"
31 case primitiveTypeNumber:
32 return "number"
33 case primitiveTypeString:
34 return "string"
35 default:
36 // should never happen
37 panic("invalid primitive type")
38 }
39}
40
41func (t primitiveType) GoString() string {
42 switch t.Kind {
43 case primitiveTypeBool:
44 return "cty.Bool"
45 case primitiveTypeNumber:
46 return "cty.Number"
47 case primitiveTypeString:
48 return "cty.String"
49 default:
50 // should never happen
51 panic("invalid primitive type")
52 }
53}
54
55// Number is the numeric type. Number values are arbitrary-precision
56// decimal numbers, which can then be converted into Go's various numeric
57// types only if they are in the appropriate range.
58var Number Type
59
60// String is the string type. String values are sequences of unicode codepoints
61// encoded internally as UTF-8.
62var String Type
63
64// Bool is the boolean type. The two values of this type are True and False.
65var Bool Type
66
67// True is the truthy value of type Bool
68var True Value
69
70// False is the falsey value of type Bool
71var False Value
72
73// Zero is a number value representing exactly zero.
74var Zero Value
75
76// PositiveInfinity is a Number value representing positive infinity
77var PositiveInfinity Value
78
79// NegativeInfinity is a Number value representing negative infinity
80var NegativeInfinity Value
81
82func init() {
83 Number = Type{
84 primitiveType{Kind: primitiveTypeNumber},
85 }
86 String = Type{
87 primitiveType{Kind: primitiveTypeString},
88 }
89 Bool = Type{
90 primitiveType{Kind: primitiveTypeBool},
91 }
92 True = Value{
93 ty: Bool,
94 v: true,
95 }
96 False = Value{
97 ty: Bool,
98 v: false,
99 }
100 Zero = Value{
101 ty: Number,
102 v: big.NewFloat(0),
103 }
104 PositiveInfinity = Value{
105 ty: Number,
106 v: (&big.Float{}).SetInf(false),
107 }
108 NegativeInfinity = Value{
109 ty: Number,
110 v: (&big.Float{}).SetInf(true),
111 }
112}
113
114// IsPrimitiveType returns true if and only if the reciever is a primitive
115// type, which means it's either number, string, or bool. Any two primitive
116// types can be safely compared for equality using the standard == operator
117// without panic, which is not a guarantee that holds for all types. Primitive
118// types can therefore also be used in switch statements.
119func (t Type) IsPrimitiveType() bool {
120 _, ok := t.typeImpl.(primitiveType)
121 return ok
122}
diff --git a/vendor/github.com/zclconf/go-cty/cty/set/gob.go b/vendor/github.com/zclconf/go-cty/cty/set/gob.go
new file mode 100644
index 0000000..da2978f
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/set/gob.go
@@ -0,0 +1,76 @@
1package set
2
3import (
4 "bytes"
5 "encoding/gob"
6 "fmt"
7)
8
9// GobEncode is an implementation of the interface gob.GobEncoder, allowing
10// sets to be included in structures encoded via gob.
11//
12// The set rules are included in the serialized value, so the caller must
13// register its concrete rules type with gob.Register before using a
14// set in a gob, and possibly also implement GobEncode/GobDecode to customize
15// how any parameters are persisted.
16//
17// The set elements are also included, so if they are of non-primitive types
18// they too must be registered with gob.
19//
20// If the produced gob values will persist for a long time, the caller must
21// ensure compatibility of the rules implementation. In particular, if the
22// definition of element equivalence changes between encoding and decoding
23// then two distinct stored elements may be considered equivalent on decoding,
24// causing the recovered set to have fewer elements than when it was stored.
25func (s Set) GobEncode() ([]byte, error) {
26 gs := gobSet{
27 Version: 0,
28 Rules: s.rules,
29 Values: s.Values(),
30 }
31
32 buf := &bytes.Buffer{}
33 enc := gob.NewEncoder(buf)
34 err := enc.Encode(gs)
35 if err != nil {
36 return nil, fmt.Errorf("error encoding set.Set: %s", err)
37 }
38
39 return buf.Bytes(), nil
40}
41
42// GobDecode is the opposite of GobEncode. See GobEncode for information
43// on the requirements for and caveats of including set values in gobs.
44func (s *Set) GobDecode(buf []byte) error {
45 r := bytes.NewReader(buf)
46 dec := gob.NewDecoder(r)
47
48 var gs gobSet
49 err := dec.Decode(&gs)
50 if err != nil {
51 return fmt.Errorf("error decoding set.Set: %s", err)
52 }
53 if gs.Version != 0 {
54 return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version)
55 }
56
57 victim := NewSetFromSlice(gs.Rules, gs.Values)
58 s.vals = victim.vals
59 s.rules = victim.rules
60 return nil
61}
62
63type gobSet struct {
64 Version int
65 Rules Rules
66
67 // The bucket-based representation is for efficient in-memory access, but
68 // for serialization it's enough to just retain the values themselves,
69 // which we can re-bucket using the rules (which may have changed!) when
70 // we re-inflate.
71 Values []interface{}
72}
73
74func init() {
75 gob.Register([]interface{}(nil))
76}
diff --git a/vendor/github.com/zclconf/go-cty/cty/set/iterator.go b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go
new file mode 100644
index 0000000..f15498e
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go
@@ -0,0 +1,36 @@
1package set
2
3type Iterator struct {
4 bucketIds []int
5 vals map[int][]interface{}
6 bucketIdx int
7 valIdx int
8}
9
10func (it *Iterator) Value() interface{} {
11 return it.currentBucket()[it.valIdx]
12}
13
14func (it *Iterator) Next() bool {
15 if it.bucketIdx == -1 {
16 // init
17 if len(it.bucketIds) == 0 {
18 return false
19 }
20
21 it.valIdx = 0
22 it.bucketIdx = 0
23 return true
24 }
25
26 it.valIdx++
27 if it.valIdx >= len(it.currentBucket()) {
28 it.valIdx = 0
29 it.bucketIdx++
30 }
31 return it.bucketIdx < len(it.bucketIds)
32}
33
34func (it *Iterator) currentBucket() []interface{} {
35 return it.vals[it.bucketIds[it.bucketIdx]]
36}
diff --git a/vendor/github.com/zclconf/go-cty/cty/set/ops.go b/vendor/github.com/zclconf/go-cty/cty/set/ops.go
new file mode 100644
index 0000000..726e707
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/set/ops.go
@@ -0,0 +1,199 @@
1package set
2
3import (
4 "sort"
5)
6
7// Add inserts the given value into the receiving Set.
8//
9// This mutates the set in-place. This operation is not thread-safe.
10func (s Set) Add(val interface{}) {
11 hv := s.rules.Hash(val)
12 if _, ok := s.vals[hv]; !ok {
13 s.vals[hv] = make([]interface{}, 0, 1)
14 }
15 bucket := s.vals[hv]
16
17 // See if an equivalent value is already present
18 for _, ev := range bucket {
19 if s.rules.Equivalent(val, ev) {
20 return
21 }
22 }
23
24 s.vals[hv] = append(bucket, val)
25}
26
27// Remove deletes the given value from the receiving set, if indeed it was
28// there in the first place. If the value is not present, this is a no-op.
29func (s Set) Remove(val interface{}) {
30 hv := s.rules.Hash(val)
31 bucket, ok := s.vals[hv]
32 if !ok {
33 return
34 }
35
36 for i, ev := range bucket {
37 if s.rules.Equivalent(val, ev) {
38 newBucket := make([]interface{}, 0, len(bucket)-1)
39 newBucket = append(newBucket, bucket[:i]...)
40 newBucket = append(newBucket, bucket[i+1:]...)
41 if len(newBucket) > 0 {
42 s.vals[hv] = newBucket
43 } else {
44 delete(s.vals, hv)
45 }
46 return
47 }
48 }
49}
50
51// Has returns true if the given value is in the receiving set, or false if
52// it is not.
53func (s Set) Has(val interface{}) bool {
54 hv := s.rules.Hash(val)
55 bucket, ok := s.vals[hv]
56 if !ok {
57 return false
58 }
59
60 for _, ev := range bucket {
61 if s.rules.Equivalent(val, ev) {
62 return true
63 }
64 }
65 return false
66}
67
68// Copy performs a shallow copy of the receiving set, returning a new set
69// with the same rules and elements.
70func (s Set) Copy() Set {
71 ret := NewSet(s.rules)
72 for k, v := range s.vals {
73 ret.vals[k] = v
74 }
75 return ret
76}
77
78// Iterator returns an iterator over values in the set, in an undefined order
79// that callers should not depend on.
80//
81// The pattern for using the returned iterator is:
82//
83// it := set.Iterator()
84// for it.Next() {
85// val := it.Value()
86// // ...
87// }
88//
89// Once an iterator has been created for a set, the set *must not* be mutated
90// until the iterator is no longer in use.
91func (s Set) Iterator() *Iterator {
92 // Sort the bucketIds to ensure that we always traverse in a
93 // consistent order.
94 bucketIds := make([]int, 0, len(s.vals))
95 for id := range s.vals {
96 bucketIds = append(bucketIds, id)
97 }
98 sort.Ints(bucketIds)
99
100 return &Iterator{
101 bucketIds: bucketIds,
102 vals: s.vals,
103 bucketIdx: -1,
104 }
105}
106
107// EachValue calls the given callback once for each value in the set, in an
108// undefined order that callers should not depend on.
109func (s Set) EachValue(cb func(interface{})) {
110 it := s.Iterator()
111 for it.Next() {
112 cb(it.Value())
113 }
114}
115
116// Values returns a slice of all of the values in the set in no particular
117// order. This is just a wrapper around EachValue that accumulates the results
118// in a slice for caller convenience.
119//
120// The returned slice will be nil if there are no values in the set.
121func (s Set) Values() []interface{} {
122 var ret []interface{}
123 s.EachValue(func(v interface{}) {
124 ret = append(ret, v)
125 })
126 return ret
127}
128
129// Length returns the number of values in the set.
130func (s Set) Length() int {
131 var count int
132 for _, bucket := range s.vals {
133 count = count + len(bucket)
134 }
135 return count
136}
137
138// Union returns a new set that contains all of the members of both the
139// receiving set and the given set. Both sets must have the same rules, or
140// else this function will panic.
141func (s1 Set) Union(s2 Set) Set {
142 mustHaveSameRules(s1, s2)
143 rs := NewSet(s1.rules)
144 s1.EachValue(func(v interface{}) {
145 rs.Add(v)
146 })
147 s2.EachValue(func(v interface{}) {
148 rs.Add(v)
149 })
150 return rs
151}
152
153// Intersection returns a new set that contains the values that both the
154// receiver and given sets have in common. Both sets must have the same rules,
155// or else this function will panic.
156func (s1 Set) Intersection(s2 Set) Set {
157 mustHaveSameRules(s1, s2)
158 rs := NewSet(s1.rules)
159 s1.EachValue(func(v interface{}) {
160 if s2.Has(v) {
161 rs.Add(v)
162 }
163 })
164 return rs
165}
166
167// Subtract returns a new set that contains all of the values from the receiver
168// that are not also in the given set. Both sets must have the same rules,
169// or else this function will panic.
170func (s1 Set) Subtract(s2 Set) Set {
171 mustHaveSameRules(s1, s2)
172 rs := NewSet(s1.rules)
173 s1.EachValue(func(v interface{}) {
174 if !s2.Has(v) {
175 rs.Add(v)
176 }
177 })
178 return rs
179}
180
181// SymmetricDifference returns a new set that contains all of the values from
182// both the receiver and given sets, except those that both sets have in
183// common. Both sets must have the same rules, or else this function will
184// panic.
185func (s1 Set) SymmetricDifference(s2 Set) Set {
186 mustHaveSameRules(s1, s2)
187 rs := NewSet(s1.rules)
188 s1.EachValue(func(v interface{}) {
189 if !s2.Has(v) {
190 rs.Add(v)
191 }
192 })
193 s2.EachValue(func(v interface{}) {
194 if !s1.Has(v) {
195 rs.Add(v)
196 }
197 })
198 return rs
199}
diff --git a/vendor/github.com/zclconf/go-cty/cty/set/rules.go b/vendor/github.com/zclconf/go-cty/cty/set/rules.go
new file mode 100644
index 0000000..7200184
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/set/rules.go
@@ -0,0 +1,25 @@
1package set
2
3// Rules represents the operations that define membership for a Set.
4//
5// Each Set has a Rules instance, whose methods must satisfy the interface
6// contracts given below for any value that will be added to the set.
7type Rules interface {
8 // Hash returns an int that somewhat-uniquely identifies the given value.
9 //
10 // A good hash function will minimize collisions for values that will be
11 // added to the set, though collisions *are* permitted. Collisions will
12 // simply reduce the efficiency of operations on the set.
13 Hash(interface{}) int
14
15 // Equivalent returns true if and only if the two values are considered
16 // equivalent for the sake of set membership. Two values that are
17 // equivalent cannot exist in the set at the same time, and if two
18 // equivalent values are added it is undefined which one will be
19 // returned when enumerating all of the set members.
20 //
21 // Two values that are equivalent *must* result in the same hash value,
22 // though it is *not* required that two values with the same hash value
23 // be equivalent.
24 Equivalent(interface{}, interface{}) bool
25}
diff --git a/vendor/github.com/zclconf/go-cty/cty/set/set.go b/vendor/github.com/zclconf/go-cty/cty/set/set.go
new file mode 100644
index 0000000..b4fb316
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/set/set.go
@@ -0,0 +1,62 @@
1package set
2
3import (
4 "fmt"
5)
6
7// Set is an implementation of the concept of a set: a collection where all
8// values are conceptually either in or out of the set, but the members are
9// not ordered.
10//
11// This type primarily exists to be the internal type of sets in cty, but
12// it is considered to be at the same level of abstraction as Go's built in
13// slice and map collection types, and so should make no cty-specific
14// assumptions.
15//
16// Set operations are not thread safe. It is the caller's responsibility to
17// provide mutex guarantees where necessary.
18//
19// Set operations are not optimized to minimize memory pressure. Mutating
20// a set will generally create garbage and so should perhaps be avoided in
21// tight loops where memory pressure is a concern.
22type Set struct {
23 vals map[int][]interface{}
24 rules Rules
25}
26
27// NewSet returns an empty set with the membership rules given.
28func NewSet(rules Rules) Set {
29 return Set{
30 vals: map[int][]interface{}{},
31 rules: rules,
32 }
33}
34
35func NewSetFromSlice(rules Rules, vals []interface{}) Set {
36 s := NewSet(rules)
37 for _, v := range vals {
38 s.Add(v)
39 }
40 return s
41}
42
43func sameRules(s1 Set, s2 Set) bool {
44 return s1.rules == s2.rules
45}
46
47func mustHaveSameRules(s1 Set, s2 Set) {
48 if !sameRules(s1, s2) {
49 panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules))
50 }
51}
52
53// HasRules returns true if and only if the receiving set has the given rules
54// instance as its rules.
55func (s Set) HasRules(rules Rules) bool {
56 return s.rules == rules
57}
58
59// Rules returns the receiving set's rules instance.
60func (s Set) Rules() Rules {
61 return s.rules
62}
diff --git a/vendor/github.com/zclconf/go-cty/cty/set_helper.go b/vendor/github.com/zclconf/go-cty/cty/set_helper.go
new file mode 100644
index 0000000..a88ddaf
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/set_helper.go
@@ -0,0 +1,126 @@
1package cty
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty/set"
7)
8
9// ValueSet is to cty.Set what []cty.Value is to cty.List and
10// map[string]cty.Value is to cty.Map. It's provided to allow callers a
11// convenient interface for manipulating sets before wrapping them in cty.Set
12// values using cty.SetValFromValueSet.
13//
14// Unlike value slices and value maps, ValueSet instances have a single
15// homogenous element type because that is a requirement of the underlying
16// set implementation, which uses the element type to select a suitable
17// hashing function.
18//
19// Set mutations are not concurrency-safe.
20type ValueSet struct {
21 // ValueSet is just a thin wrapper around a set.Set with our value-oriented
22 // "rules" applied. We do this so that the caller can work in terms of
23 // cty.Value objects even though the set internals use the raw values.
24 s set.Set
25}
26
27// NewValueSet creates and returns a new ValueSet with the given element type.
28func NewValueSet(ety Type) ValueSet {
29 return newValueSet(set.NewSet(setRules{Type: ety}))
30}
31
32func newValueSet(s set.Set) ValueSet {
33 return ValueSet{
34 s: s,
35 }
36}
37
38// ElementType returns the element type for the receiving ValueSet.
39func (s ValueSet) ElementType() Type {
40 return s.s.Rules().(setRules).Type
41}
42
43// Add inserts the given value into the receiving set.
44func (s ValueSet) Add(v Value) {
45 s.requireElementType(v)
46 s.s.Add(v.v)
47}
48
49// Remove deletes the given value from the receiving set, if indeed it was
50// there in the first place. If the value is not present, this is a no-op.
51func (s ValueSet) Remove(v Value) {
52 s.requireElementType(v)
53 s.s.Remove(v.v)
54}
55
56// Has returns true if the given value is in the receiving set, or false if
57// it is not.
58func (s ValueSet) Has(v Value) bool {
59 s.requireElementType(v)
60 return s.s.Has(v.v)
61}
62
63// Copy performs a shallow copy of the receiving set, returning a new set
64// with the same rules and elements.
65func (s ValueSet) Copy() ValueSet {
66 return newValueSet(s.s.Copy())
67}
68
69// Length returns the number of values in the set.
70func (s ValueSet) Length() int {
71 return s.s.Length()
72}
73
74// Values returns a slice of all of the values in the set in no particular
75// order.
76func (s ValueSet) Values() []Value {
77 l := s.s.Length()
78 if l == 0 {
79 return nil
80 }
81 ret := make([]Value, 0, l)
82 ety := s.ElementType()
83 for it := s.s.Iterator(); it.Next(); {
84 ret = append(ret, Value{
85 ty: ety,
86 v: it.Value(),
87 })
88 }
89 return ret
90}
91
92// Union returns a new set that contains all of the members of both the
93// receiving set and the given set. Both sets must have the same element type,
94// or else this function will panic.
95func (s ValueSet) Union(other ValueSet) ValueSet {
96 return newValueSet(s.s.Union(other.s))
97}
98
99// Intersection returns a new set that contains the values that both the
100// receiver and given sets have in common. Both sets must have the same element
101// type, or else this function will panic.
102func (s ValueSet) Intersection(other ValueSet) ValueSet {
103 return newValueSet(s.s.Intersection(other.s))
104}
105
106// Subtract returns a new set that contains all of the values from the receiver
107// that are not also in the given set. Both sets must have the same element
108// type, or else this function will panic.
109func (s ValueSet) Subtract(other ValueSet) ValueSet {
110 return newValueSet(s.s.Subtract(other.s))
111}
112
113// SymmetricDifference returns a new set that contains all of the values from
114// both the receiver and given sets, except those that both sets have in
115// common. Both sets must have the same element type, or else this function
116// will panic.
117func (s ValueSet) SymmetricDifference(other ValueSet) ValueSet {
118 return newValueSet(s.s.SymmetricDifference(other.s))
119}
120
121// requireElementType panics if the given value is not of the set's element type.
122func (s ValueSet) requireElementType(v Value) {
123 if !v.Type().Equals(s.ElementType()) {
124 panic(fmt.Errorf("attempt to use %#v value with set of %#v", v.Type(), s.ElementType()))
125 }
126}
diff --git a/vendor/github.com/zclconf/go-cty/cty/set_internals.go b/vendor/github.com/zclconf/go-cty/cty/set_internals.go
new file mode 100644
index 0000000..ce738db
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/set_internals.go
@@ -0,0 +1,146 @@
1package cty
2
3import (
4 "bytes"
5 "fmt"
6 "hash/crc32"
7 "math/big"
8 "sort"
9)
10
11// setRules provides a Rules implementation for the ./set package that
12// respects the equality rules for cty values of the given type.
13//
14// This implementation expects that values added to the set will be
15// valid internal values for the given Type, which is to say that wrapping
16// the given value in a Value struct along with the ruleset's type should
17// produce a valid, working Value.
18type setRules struct {
19 Type Type
20}
21
22func (r setRules) Hash(v interface{}) int {
23 hashBytes := makeSetHashBytes(Value{
24 ty: r.Type,
25 v: v,
26 })
27 return int(crc32.ChecksumIEEE(hashBytes))
28}
29
30func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool {
31 v1v := Value{
32 ty: r.Type,
33 v: v1,
34 }
35 v2v := Value{
36 ty: r.Type,
37 v: v2,
38 }
39
40 eqv := v1v.Equals(v2v)
41
42 // By comparing the result to true we ensure that an Unknown result,
43 // which will result if either value is unknown, will be considered
44 // as non-equivalent. Two unknown values are not equivalent for the
45 // sake of set membership.
46 return eqv.v == true
47}
48
49func makeSetHashBytes(val Value) []byte {
50 var buf bytes.Buffer
51 appendSetHashBytes(val, &buf)
52 return buf.Bytes()
53}
54
55func appendSetHashBytes(val Value, buf *bytes.Buffer) {
56 // Exactly what bytes we generate here don't matter as long as the following
57 // constraints hold:
58 // - Unknown and null values all generate distinct strings from
59 // each other and from any normal value of the given type.
60 // - The delimiter used to separate items in a compound structure can
61 // never appear literally in any of its elements.
62 // Since we don't support hetrogenous lists we don't need to worry about
63 // collisions between values of different types, apart from
64 // PseudoTypeDynamic.
65 // If in practice we *do* get a collision then it's not a big deal because
66 // the Equivalent function will still distinguish values, but set
67 // performance will be best if we are able to produce a distinct string
68 // for each distinct value, unknown values notwithstanding.
69 if !val.IsKnown() {
70 buf.WriteRune('?')
71 return
72 }
73 if val.IsNull() {
74 buf.WriteRune('~')
75 return
76 }
77
78 switch val.ty {
79 case Number:
80 buf.WriteString(val.v.(*big.Float).String())
81 return
82 case Bool:
83 if val.v.(bool) {
84 buf.WriteRune('T')
85 } else {
86 buf.WriteRune('F')
87 }
88 return
89 case String:
90 buf.WriteString(fmt.Sprintf("%q", val.v.(string)))
91 return
92 }
93
94 if val.ty.IsMapType() {
95 buf.WriteRune('{')
96 val.ForEachElement(func(keyVal, elementVal Value) bool {
97 appendSetHashBytes(keyVal, buf)
98 buf.WriteRune(':')
99 appendSetHashBytes(elementVal, buf)
100 buf.WriteRune(';')
101 return false
102 })
103 buf.WriteRune('}')
104 return
105 }
106
107 if val.ty.IsListType() || val.ty.IsSetType() {
108 buf.WriteRune('[')
109 val.ForEachElement(func(keyVal, elementVal Value) bool {
110 appendSetHashBytes(elementVal, buf)
111 buf.WriteRune(';')
112 return false
113 })
114 buf.WriteRune(']')
115 return
116 }
117
118 if val.ty.IsObjectType() {
119 buf.WriteRune('<')
120 attrNames := make([]string, 0, len(val.ty.AttributeTypes()))
121 for attrName := range val.ty.AttributeTypes() {
122 attrNames = append(attrNames, attrName)
123 }
124 sort.Strings(attrNames)
125 for _, attrName := range attrNames {
126 appendSetHashBytes(val.GetAttr(attrName), buf)
127 buf.WriteRune(';')
128 }
129 buf.WriteRune('>')
130 return
131 }
132
133 if val.ty.IsTupleType() {
134 buf.WriteRune('<')
135 val.ForEachElement(func(keyVal, elementVal Value) bool {
136 appendSetHashBytes(elementVal, buf)
137 buf.WriteRune(';')
138 return false
139 })
140 buf.WriteRune('>')
141 return
142 }
143
144 // should never get down here
145 panic("unsupported type in set hash")
146}
diff --git a/vendor/github.com/zclconf/go-cty/cty/set_type.go b/vendor/github.com/zclconf/go-cty/cty/set_type.go
new file mode 100644
index 0000000..952a2d2
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/set_type.go
@@ -0,0 +1,66 @@
1package cty
2
3import (
4 "fmt"
5)
6
7type typeSet struct {
8 typeImplSigil
9 ElementTypeT Type
10}
11
12// Set creates a set type with the given element Type.
13//
14// Set types are CollectionType implementations.
15func Set(elem Type) Type {
16 return Type{
17 typeSet{
18 ElementTypeT: elem,
19 },
20 }
21}
22
23// Equals returns true if the other Type is a set whose element type is
24// equal to that of the receiver.
25func (t typeSet) Equals(other Type) bool {
26 ot, isSet := other.typeImpl.(typeSet)
27 if !isSet {
28 return false
29 }
30
31 return t.ElementTypeT.Equals(ot.ElementTypeT)
32}
33
34func (t typeSet) FriendlyName() string {
35 return "set of " + t.ElementTypeT.FriendlyName()
36}
37
38func (t typeSet) ElementType() Type {
39 return t.ElementTypeT
40}
41
42func (t typeSet) GoString() string {
43 return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT)
44}
45
46// IsSetType returns true if the given type is a list type, regardless of its
47// element type.
48func (t Type) IsSetType() bool {
49 _, ok := t.typeImpl.(typeSet)
50 return ok
51}
52
53// SetElementType is a convenience method that checks if the given type is
54// a set type, returning a pointer to its element type if so and nil
55// otherwise. This is intended to allow convenient conditional branches,
56// like so:
57//
58// if et := t.SetElementType(); et != nil {
59// // Do something with *et
60// }
61func (t Type) SetElementType() *Type {
62 if lt, ok := t.typeImpl.(typeSet); ok {
63 return &lt.ElementTypeT
64 }
65 return nil
66}
diff --git a/vendor/github.com/zclconf/go-cty/cty/tuple_type.go b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go
new file mode 100644
index 0000000..b98349e
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go
@@ -0,0 +1,121 @@
1package cty
2
3import (
4 "fmt"
5)
6
7type typeTuple struct {
8 typeImplSigil
9 ElemTypes []Type
10}
11
12// Tuple creates a tuple type with the given element types.
13//
14// After a slice is passed to this function the caller must no longer access
15// the underlying array, since ownership is transferred to this library.
16func Tuple(elemTypes []Type) Type {
17 return Type{
18 typeTuple{
19 ElemTypes: elemTypes,
20 },
21 }
22}
23
24func (t typeTuple) Equals(other Type) bool {
25 if ot, ok := other.typeImpl.(typeTuple); ok {
26 if len(t.ElemTypes) != len(ot.ElemTypes) {
27 // Fast path: if we don't have the same number of elements
28 // then we can't possibly be equal.
29 return false
30 }
31
32 for i, ty := range t.ElemTypes {
33 oty := ot.ElemTypes[i]
34 if !ok {
35 return false
36 }
37 if !oty.Equals(ty) {
38 return false
39 }
40 }
41
42 return true
43 }
44 return false
45}
46
47func (t typeTuple) FriendlyName() string {
48 // There isn't really a friendly way to write a tuple type due to its
49 // complexity, so we'll just do something English-ish. Callers will
50 // probably want to make some extra effort to avoid ever printing out
51 // a tuple type FriendlyName in its entirety. For example, could
52 // produce an error message by diffing two object types and saying
53 // something like "Expected attribute foo to be string, but got number".
54 // TODO: Finish this
55 return "tuple"
56}
57
58func (t typeTuple) GoString() string {
59 if len(t.ElemTypes) == 0 {
60 return "cty.EmptyTuple"
61 }
62 return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes)
63}
64
65// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about
66// the empty tuple type.
67var EmptyTuple Type
68
69// EmptyTupleVal is the only possible non-null, non-unknown value of type
70// EmptyTuple.
71var EmptyTupleVal Value
72
73func init() {
74 EmptyTuple = Tuple([]Type{})
75 EmptyTupleVal = Value{
76 ty: EmptyTuple,
77 v: []interface{}{},
78 }
79}
80
81// IsTupleType returns true if the given type is an object type, regardless
82// of its element type.
83func (t Type) IsTupleType() bool {
84 _, ok := t.typeImpl.(typeTuple)
85 return ok
86}
87
88// Length returns the number of elements of the receiving tuple type.
89// Will panic if the reciever isn't a tuple type; use IsTupleType to determine
90// whether this operation will succeed.
91func (t Type) Length() int {
92 if ot, ok := t.typeImpl.(typeTuple); ok {
93 return len(ot.ElemTypes)
94 }
95 panic("Length on non-tuple Type")
96}
97
98// TupleElementType returns the type of the element with the given index. Will
99// panic if the receiver is not a tuple type (use IsTupleType to confirm)
100// or if the index is out of range (use Length to confirm).
101func (t Type) TupleElementType(idx int) Type {
102 if ot, ok := t.typeImpl.(typeTuple); ok {
103 return ot.ElemTypes[idx]
104 }
105 panic("TupleElementType on non-tuple Type")
106}
107
108// TupleElementTypes returns a slice of the recieving tuple type's element
109// types. Will panic if the receiver is not a tuple type (use IsTupleType
110// to confirm).
111//
112// The returned slice is part of the internal state of the type, and is provided
113// for read access only. It is forbidden for any caller to modify the
114// underlying array. For many purposes the element-related methods of Value
115// are more appropriate and more convenient to use.
116func (t Type) TupleElementTypes() []Type {
117 if ot, ok := t.typeImpl.(typeTuple); ok {
118 return ot.ElemTypes
119 }
120 panic("TupleElementTypes on non-tuple Type")
121}
diff --git a/vendor/github.com/zclconf/go-cty/cty/type.go b/vendor/github.com/zclconf/go-cty/cty/type.go
new file mode 100644
index 0000000..ae5f1c8
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/type.go
@@ -0,0 +1,95 @@
1package cty
2
3// Type represents value types within the type system.
4//
5// This is a closed interface type, meaning that only the concrete
6// implementations provided within this package are considered valid.
7type Type struct {
8 typeImpl
9}
10
11type typeImpl interface {
12 // isTypeImpl is a do-nothing method that exists only to express
13 // that a type is an implementation of typeImpl.
14 isTypeImpl() typeImplSigil
15
16 // Equals returns true if the other given Type exactly equals the
17 // receiver Type.
18 Equals(other Type) bool
19
20 // FriendlyName returns a human-friendly *English* name for the given
21 // type.
22 FriendlyName() string
23
24 // GoString implements the GoStringer interface from package fmt.
25 GoString() string
26}
27
28// Base implementation of Type to embed into concrete implementations
29// to signal that they are implementations of Type.
30type typeImplSigil struct{}
31
32func (t typeImplSigil) isTypeImpl() typeImplSigil {
33 return typeImplSigil{}
34}
35
36// Equals returns true if the other given Type exactly equals the receiver
37// type.
38func (t Type) Equals(other Type) bool {
39 return t.typeImpl.Equals(other)
40}
41
42// FriendlyName returns a human-friendly *English* name for the given type.
43func (t Type) FriendlyName() string {
44 return t.typeImpl.FriendlyName()
45}
46
47// GoString returns a string approximating how the receiver type would be
48// expressed in Go source code.
49func (t Type) GoString() string {
50 if t.typeImpl == nil {
51 return "cty.NilType"
52 }
53
54 return t.typeImpl.GoString()
55}
56
57// NilType is an invalid type used when a function is returning an error
58// and has no useful type to return. It should not be used and any methods
59// called on it will panic.
60var NilType = Type{}
61
62// HasDynamicTypes returns true either if the receiver is itself
63// DynamicPseudoType or if it is a compound type whose descendent elements
64// are DynamicPseudoType.
65func (t Type) HasDynamicTypes() bool {
66 switch {
67 case t == DynamicPseudoType:
68 return true
69 case t.IsPrimitiveType():
70 return false
71 case t.IsCollectionType():
72 return false
73 case t.IsObjectType():
74 attrTypes := t.AttributeTypes()
75 for _, at := range attrTypes {
76 if at.HasDynamicTypes() {
77 return true
78 }
79 }
80 return false
81 case t.IsTupleType():
82 elemTypes := t.TupleElementTypes()
83 for _, et := range elemTypes {
84 if et.HasDynamicTypes() {
85 return true
86 }
87 }
88 return false
89 case t.IsCapsuleType():
90 return false
91 default:
92 // Should never happen, since above should be exhaustive
93 panic("HasDynamicTypes does not support the given type")
94 }
95}
diff --git a/vendor/github.com/zclconf/go-cty/cty/type_conform.go b/vendor/github.com/zclconf/go-cty/cty/type_conform.go
new file mode 100644
index 0000000..b417dc7
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/type_conform.go
@@ -0,0 +1,142 @@
1package cty
2
3// TestConformance recursively walks the receiver and the given other type and
4// returns nil if the receiver *conforms* to the given type.
5//
6// Type conformance is similar to type equality but has one crucial difference:
7// PseudoTypeDynamic can be used within the given type to represent that
8// *any* type is allowed.
9//
10// If any non-conformities are found, the returned slice will be non-nil and
11// contain at least one error value. It will be nil if the type is entirely
12// conformant.
13//
14// Note that the special behavior of PseudoTypeDynamic is the *only* exception
15// to normal type equality. Calling applications may wish to apply their own
16// automatic conversion logic to the given data structure to create a more
17// liberal notion of conformance to a type.
18//
19// Returned errors are usually (but not always) PathError instances that
20// indicate where in the structure the error was found. If a returned error
21// is of that type then the error message is written for (English-speaking)
22// end-users working within the cty type system, not mentioning any Go-oriented
23// implementation details.
24func (t Type) TestConformance(other Type) []error {
25 path := make(Path, 0)
26 var errs []error
27 testConformance(t, other, path, &errs)
28 return errs
29}
30
31func testConformance(given Type, want Type, path Path, errs *[]error) {
32 if want.Equals(DynamicPseudoType) {
33 // anything goes!
34 return
35 }
36
37 if given.Equals(want) {
38 // Any equal types are always conformant
39 return
40 }
41
42 // The remainder of this function is concerned with detecting
43 // and reporting the specific non-conformance, since we wouldn't
44 // have got here if the types were not divergent.
45 // We treat compound structures as special so that we can report
46 // specifically what is non-conforming, rather than simply returning
47 // the entire type names and letting the user puzzle it out.
48
49 if given.IsObjectType() && want.IsObjectType() {
50 givenAttrs := given.AttributeTypes()
51 wantAttrs := want.AttributeTypes()
52
53 if len(givenAttrs) != len(wantAttrs) {
54 // Something is missing from one of them.
55 for k := range givenAttrs {
56 if _, exists := wantAttrs[k]; !exists {
57 *errs = append(
58 *errs,
59 errorf(path, "unsupported attribute %q", k),
60 )
61 }
62 }
63 for k := range wantAttrs {
64 if _, exists := givenAttrs[k]; !exists {
65 *errs = append(
66 *errs,
67 errorf(path, "missing required attribute %q", k),
68 )
69 }
70 }
71 }
72
73 path = append(path, nil)
74 pathIdx := len(path) - 1
75
76 for k, wantAttrType := range wantAttrs {
77 if givenAttrType, exists := givenAttrs[k]; exists {
78 path[pathIdx] = GetAttrStep{Name: k}
79 testConformance(givenAttrType, wantAttrType, path, errs)
80 }
81 }
82
83 path = path[0:pathIdx]
84
85 return
86 }
87
88 if given.IsTupleType() && want.IsTupleType() {
89 givenElems := given.TupleElementTypes()
90 wantElems := want.TupleElementTypes()
91
92 if len(givenElems) != len(wantElems) {
93 *errs = append(
94 *errs,
95 errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)),
96 )
97 return
98 }
99
100 path = append(path, nil)
101 pathIdx := len(path) - 1
102
103 for i, wantElemType := range wantElems {
104 givenElemType := givenElems[i]
105 path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))}
106 testConformance(givenElemType, wantElemType, path, errs)
107 }
108
109 path = path[0:pathIdx]
110
111 return
112 }
113
114 if given.IsListType() && want.IsListType() {
115 path = append(path, IndexStep{Key: UnknownVal(Number)})
116 pathIdx := len(path) - 1
117 testConformance(given.ElementType(), want.ElementType(), path, errs)
118 path = path[0:pathIdx]
119 return
120 }
121
122 if given.IsMapType() && want.IsMapType() {
123 path = append(path, IndexStep{Key: UnknownVal(String)})
124 pathIdx := len(path) - 1
125 testConformance(given.ElementType(), want.ElementType(), path, errs)
126 path = path[0:pathIdx]
127 return
128 }
129
130 if given.IsSetType() && want.IsSetType() {
131 path = append(path, IndexStep{Key: UnknownVal(given.ElementType())})
132 pathIdx := len(path) - 1
133 testConformance(given.ElementType(), want.ElementType(), path, errs)
134 path = path[0:pathIdx]
135 return
136 }
137
138 *errs = append(
139 *errs,
140 errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()),
141 )
142}
diff --git a/vendor/github.com/zclconf/go-cty/cty/types_to_register.go b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go
new file mode 100644
index 0000000..e1e220a
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go
@@ -0,0 +1,57 @@
1package cty
2
3import (
4 "encoding/gob"
5 "fmt"
6 "math/big"
7 "strings"
8
9 "github.com/zclconf/go-cty/cty/set"
10)
11
12// InternalTypesToRegister is a slice of values that covers all of the
13// internal types used in the representation of cty.Type and cty.Value
14// across all cty Types.
15//
16// This is intended to be used to register these types with encoding
17// packages that require registration of types used in interfaces, such as
18// encoding/gob, thus allowing cty types and values to be included in streams
19// created from those packages. However, registering with gob is not necessary
20// since that is done automatically as a side-effect of importing this package.
21//
22// Callers should not do anything with the values here except pass them on
23// verbatim to a registration function.
24//
25// If the calling application uses Capsule types that wrap local structs either
26// directly or indirectly, these structs may also need to be registered in
27// order to support encoding and decoding of values of these types. That is the
28// responsibility of the calling application.
29var InternalTypesToRegister []interface{}
30
31func init() {
32 InternalTypesToRegister = []interface{}{
33 primitiveType{},
34 typeList{},
35 typeMap{},
36 typeObject{},
37 typeSet{},
38 setRules{},
39 set.Set{},
40 typeTuple{},
41 big.Float{},
42 capsuleType{},
43 []interface{}(nil),
44 map[string]interface{}(nil),
45 }
46
47 // Register these with gob here, rather than in gob.go, to ensure
48 // that this will always happen after we build the above.
49 for _, tv := range InternalTypesToRegister {
50 typeName := fmt.Sprintf("%T", tv)
51 if strings.HasPrefix(typeName, "cty.") {
52 gob.RegisterName(fmt.Sprintf("github.com/zclconf/go-cty/%s", typeName), tv)
53 } else {
54 gob.Register(tv)
55 }
56 }
57}
diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown.go b/vendor/github.com/zclconf/go-cty/cty/unknown.go
new file mode 100644
index 0000000..9f6fce9
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/unknown.go
@@ -0,0 +1,79 @@
1package cty
2
3// unknownType is the placeholder type used for the sigil value representing
4// "Unknown", to make it unambigiously distinct from any other possible value.
5type unknownType struct {
6}
7
8// Unknown is a special value that can be
9var unknown interface{} = &unknownType{}
10
11// UnknownVal returns an Value that represents an unknown value of the given
12// type. Unknown values can be used to represent a value that is
13// not yet known. Its meaning is undefined in cty, but it could be used by
14// an calling application to allow partial evaluation.
15//
16// Unknown values of any type can be created of any type. All operations on
17// Unknown values themselves return Unknown.
18func UnknownVal(t Type) Value {
19 return Value{
20 ty: t,
21 v: unknown,
22 }
23}
24
25func (t unknownType) GoString() string {
26 // This is the stringification of our internal unknown marker. The
27 // stringification of the public representation of unknowns is in
28 // Value.GoString.
29 return "cty.unknown"
30}
31
32type pseudoTypeDynamic struct {
33 typeImplSigil
34}
35
36// DynamicPseudoType represents the dynamic pseudo-type.
37//
38// This type can represent situations where a type is not yet known. Its
39// meaning is undefined in cty, but it could be used by a calling
40// application to allow expression type checking with some types not yet known.
41// For example, the application might optimistically permit any operation on
42// values of this type in type checking, allowing a partial type-check result,
43// and then repeat the check when more information is known to get the
44// final, concrete type.
45//
46// It is a pseudo-type because it is used only as a sigil to the calling
47// application. "Unknown" is the only valid value of this pseudo-type, so
48// operations on values of this type will always short-circuit as per
49// the rules for that special value.
50var DynamicPseudoType Type
51
52func (t pseudoTypeDynamic) Equals(other Type) bool {
53 _, ok := other.typeImpl.(pseudoTypeDynamic)
54 return ok
55}
56
57func (t pseudoTypeDynamic) FriendlyName() string {
58 return "dynamic"
59}
60
61func (t pseudoTypeDynamic) GoString() string {
62 return "cty.DynamicPseudoType"
63}
64
65// DynamicVal is the only valid value of the pseudo-type dynamic.
66// This value can be used as a placeholder where a value or expression's
67// type and value are both unknown, thus allowing partial evaluation. See
68// the docs for DynamicPseudoType for more information.
69var DynamicVal Value
70
71func init() {
72 DynamicPseudoType = Type{
73 pseudoTypeDynamic{},
74 }
75 DynamicVal = Value{
76 ty: DynamicPseudoType,
77 v: unknown,
78 }
79}
diff --git a/vendor/github.com/zclconf/go-cty/cty/value.go b/vendor/github.com/zclconf/go-cty/cty/value.go
new file mode 100644
index 0000000..80cb8f7
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/value.go
@@ -0,0 +1,98 @@
1package cty
2
3// Value represents a value of a particular type, and is the interface by
4// which operations are executed on typed values.
5//
6// Value has two different classes of method. Operation methods stay entirely
7// within the type system (methods accept and return Value instances) and
8// are intended for use in implementing a language in terms of cty, while
9// integration methods either enter or leave the type system, working with
10// native Go values. Operation methods are guaranteed to support all of the
11// expected short-circuit behavior for unknown and dynamic values, while
12// integration methods may not.
13//
14// The philosophy for the operations API is that it's the caller's
15// responsibility to ensure that the given types and values satisfy the
16// specified invariants during a separate type check, so that the caller is
17// able to return errors to its user from the application's own perspective.
18//
19// Consequently the design of these methods assumes such checks have already
20// been done and panics if any invariants turn out not to be satisfied. These
21// panic errors are not intended to be handled, but rather indicate a bug in
22// the calling application that should be fixed with more checks prior to
23// executing operations.
24//
25// A related consequence of this philosophy is that no automatic type
26// conversions are done. If a method specifies that its argument must be
27// number then it's the caller's responsibility to do that conversion before
28// the call, thus allowing the application to have more constrained conversion
29// rules than are offered by the built-in converter where necessary.
30type Value struct {
31 ty Type
32 v interface{}
33}
34
35// Type returns the type of the value.
36func (val Value) Type() Type {
37 return val.ty
38}
39
40// IsKnown returns true if the value is known. That is, if it is not
41// the result of the unknown value constructor Unknown(...), and is not
42// the result of an operation on another unknown value.
43//
44// Unknown values are only produced either directly or as a result of
45// operating on other unknown values, and so an application that never
46// introduces Unknown values can be guaranteed to never receive any either.
47func (val Value) IsKnown() bool {
48 return val.v != unknown
49}
50
51// IsNull returns true if the value is null. Values of any type can be
52// null, but any operations on a null value will panic. No operation ever
53// produces null, so an application that never introduces Null values can
54// be guaranteed to never receive any either.
55func (val Value) IsNull() bool {
56 return val.v == nil
57}
58
59// NilVal is an invalid Value that can be used as a placeholder when returning
60// with an error from a function that returns (Value, error).
61//
62// NilVal is *not* a valid error and so no operations may be performed on it.
63// Any attempt to use it will result in a panic.
64//
65// This should not be confused with the idea of a Null value, as returned by
66// NullVal. NilVal is a nil within the *Go* type system, and is invalid in
67// the cty type system. Null values *do* exist in the cty type system.
68var NilVal = Value{
69 ty: Type{typeImpl: nil},
70 v: nil,
71}
72
73// IsWhollyKnown is an extension of IsKnown that also recursively checks
74// inside collections and structures to see if there are any nested unknown
75// values.
76func (val Value) IsWhollyKnown() bool {
77 if !val.IsKnown() {
78 return false
79 }
80
81 if val.IsNull() {
82 // Can't recurse into a null, so we're done
83 return true
84 }
85
86 switch {
87 case val.CanIterateElements():
88 for it := val.ElementIterator(); it.Next(); {
89 _, ev := it.Element()
90 if !ev.IsWhollyKnown() {
91 return false
92 }
93 }
94 return true
95 default:
96 return true
97 }
98}
diff --git a/vendor/github.com/zclconf/go-cty/cty/value_init.go b/vendor/github.com/zclconf/go-cty/cty/value_init.go
new file mode 100644
index 0000000..495a83e
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/value_init.go
@@ -0,0 +1,276 @@
1package cty
2
3import (
4 "fmt"
5 "math/big"
6 "reflect"
7
8 "golang.org/x/text/unicode/norm"
9
10 "github.com/zclconf/go-cty/cty/set"
11)
12
13// BoolVal returns a Value of type Number whose internal value is the given
14// bool.
15func BoolVal(v bool) Value {
16 return Value{
17 ty: Bool,
18 v: v,
19 }
20}
21
22// NumberVal returns a Value of type Number whose internal value is the given
23// big.Float. The returned value becomes the owner of the big.Float object,
24// and so it's forbidden for the caller to mutate the object after it's
25// wrapped in this way.
26func NumberVal(v *big.Float) Value {
27 return Value{
28 ty: Number,
29 v: v,
30 }
31}
32
33// NumberIntVal returns a Value of type Number whose internal value is equal
34// to the given integer.
35func NumberIntVal(v int64) Value {
36 return NumberVal(new(big.Float).SetInt64(v))
37}
38
39// NumberUIntVal returns a Value of type Number whose internal value is equal
40// to the given unsigned integer.
41func NumberUIntVal(v uint64) Value {
42 return NumberVal(new(big.Float).SetUint64(v))
43}
44
45// NumberFloatVal returns a Value of type Number whose internal value is
46// equal to the given float.
47func NumberFloatVal(v float64) Value {
48 return NumberVal(new(big.Float).SetFloat64(v))
49}
50
51// StringVal returns a Value of type String whose internal value is the
52// given string.
53//
54// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and
55// they are NFC-normalized on entry into the world of cty values.
56//
57// If the given string is not valid UTF-8 then behavior of string operations
58// is undefined.
59func StringVal(v string) Value {
60 return Value{
61 ty: String,
62 v: NormalizeString(v),
63 }
64}
65
66// NormalizeString applies the same normalization that cty applies when
67// constructing string values.
68//
69// A return value from this function can be meaningfully compared byte-for-byte
70// with a Value.AsString result.
71func NormalizeString(s string) string {
72 return norm.NFC.String(s)
73}
74
75// ObjectVal returns a Value of an object type whose structure is defined
76// by the key names and value types in the given map.
77func ObjectVal(attrs map[string]Value) Value {
78 attrTypes := make(map[string]Type, len(attrs))
79 attrVals := make(map[string]interface{}, len(attrs))
80
81 for attr, val := range attrs {
82 attr = NormalizeString(attr)
83 attrTypes[attr] = val.ty
84 attrVals[attr] = val.v
85 }
86
87 return Value{
88 ty: Object(attrTypes),
89 v: attrVals,
90 }
91}
92
93// TupleVal returns a Value of a tuple type whose element types are
94// defined by the value types in the given slice.
95func TupleVal(elems []Value) Value {
96 elemTypes := make([]Type, len(elems))
97 elemVals := make([]interface{}, len(elems))
98
99 for i, val := range elems {
100 elemTypes[i] = val.ty
101 elemVals[i] = val.v
102 }
103
104 return Value{
105 ty: Tuple(elemTypes),
106 v: elemVals,
107 }
108}
109
110// ListVal returns a Value of list type whose element type is defined by
111// the types of the given values, which must be homogenous.
112//
113// If the types are not all consistent (aside from elements that are of the
114// dynamic pseudo-type) then this function will panic. It will panic also
115// if the given list is empty, since then the element type cannot be inferred.
116// (See also ListValEmpty.)
117func ListVal(vals []Value) Value {
118 if len(vals) == 0 {
119 panic("must not call ListVal with empty slice")
120 }
121 elementType := DynamicPseudoType
122 rawList := make([]interface{}, len(vals))
123
124 for i, val := range vals {
125 if elementType == DynamicPseudoType {
126 elementType = val.ty
127 } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) {
128 panic(fmt.Errorf(
129 "inconsistent list element types (%#v then %#v)",
130 elementType, val.ty,
131 ))
132 }
133
134 rawList[i] = val.v
135 }
136
137 return Value{
138 ty: List(elementType),
139 v: rawList,
140 }
141}
142
143// ListValEmpty returns an empty list of the given element type.
144func ListValEmpty(element Type) Value {
145 return Value{
146 ty: List(element),
147 v: []interface{}{},
148 }
149}
150
151// MapVal returns a Value of a map type whose element type is defined by
152// the types of the given values, which must be homogenous.
153//
154// If the types are not all consistent (aside from elements that are of the
155// dynamic pseudo-type) then this function will panic. It will panic also
156// if the given map is empty, since then the element type cannot be inferred.
157// (See also MapValEmpty.)
158func MapVal(vals map[string]Value) Value {
159 if len(vals) == 0 {
160 panic("must not call MapVal with empty map")
161 }
162 elementType := DynamicPseudoType
163 rawMap := make(map[string]interface{}, len(vals))
164
165 for key, val := range vals {
166 if elementType == DynamicPseudoType {
167 elementType = val.ty
168 } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) {
169 panic(fmt.Errorf(
170 "inconsistent map element types (%#v then %#v)",
171 elementType, val.ty,
172 ))
173 }
174
175 rawMap[NormalizeString(key)] = val.v
176 }
177
178 return Value{
179 ty: Map(elementType),
180 v: rawMap,
181 }
182}
183
184// MapValEmpty returns an empty map of the given element type.
185func MapValEmpty(element Type) Value {
186 return Value{
187 ty: Map(element),
188 v: map[string]interface{}{},
189 }
190}
191
192// SetVal returns a Value of set type whose element type is defined by
193// the types of the given values, which must be homogenous.
194//
195// If the types are not all consistent (aside from elements that are of the
196// dynamic pseudo-type) then this function will panic. It will panic also
197// if the given list is empty, since then the element type cannot be inferred.
198// (See also SetValEmpty.)
199func SetVal(vals []Value) Value {
200 if len(vals) == 0 {
201 panic("must not call SetVal with empty slice")
202 }
203 elementType := DynamicPseudoType
204 rawList := make([]interface{}, len(vals))
205
206 for i, val := range vals {
207 if elementType == DynamicPseudoType {
208 elementType = val.ty
209 } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) {
210 panic(fmt.Errorf(
211 "inconsistent set element types (%#v then %#v)",
212 elementType, val.ty,
213 ))
214 }
215
216 rawList[i] = val.v
217 }
218
219 rawVal := set.NewSetFromSlice(setRules{elementType}, rawList)
220
221 return Value{
222 ty: Set(elementType),
223 v: rawVal,
224 }
225}
226
227// SetValFromValueSet returns a Value of set type based on an already-constructed
228// ValueSet.
229//
230// The element type of the returned value is the element type of the given
231// set.
232func SetValFromValueSet(s ValueSet) Value {
233 ety := s.ElementType()
234 rawVal := s.s.Copy() // copy so caller can't mutate what we wrap
235
236 return Value{
237 ty: Set(ety),
238 v: rawVal,
239 }
240}
241
242// SetValEmpty returns an empty set of the given element type.
243func SetValEmpty(element Type) Value {
244 return Value{
245 ty: Set(element),
246 v: set.NewSet(setRules{element}),
247 }
248}
249
250// CapsuleVal creates a value of the given capsule type using the given
251// wrapVal, which must be a pointer to a value of the capsule type's native
252// type.
253//
254// This function will panic if the given type is not a capsule type, if
255// the given wrapVal is not compatible with the given capsule type, or if
256// wrapVal is not a pointer.
257func CapsuleVal(ty Type, wrapVal interface{}) Value {
258 if !ty.IsCapsuleType() {
259 panic("not a capsule type")
260 }
261
262 wv := reflect.ValueOf(wrapVal)
263 if wv.Kind() != reflect.Ptr {
264 panic("wrapVal is not a pointer")
265 }
266
267 it := ty.typeImpl.(*capsuleType).GoType
268 if !wv.Type().Elem().AssignableTo(it) {
269 panic("wrapVal target is not compatible with the given capsule type")
270 }
271
272 return Value{
273 ty: ty,
274 v: wrapVal,
275 }
276}
diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go
new file mode 100644
index 0000000..967aa76
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go
@@ -0,0 +1,1071 @@
1package cty
2
3import (
4 "fmt"
5 "math/big"
6
7 "reflect"
8
9 "github.com/zclconf/go-cty/cty/set"
10)
11
12func (val Value) GoString() string {
13 if val == NilVal {
14 return "cty.NilVal"
15 }
16
17 if val.ty == DynamicPseudoType {
18 return "cty.DynamicVal"
19 }
20
21 if !val.IsKnown() {
22 return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty)
23 }
24 if val.IsNull() {
25 return fmt.Sprintf("cty.NullVal(%#v)", val.ty)
26 }
27
28 // By the time we reach here we've dealt with all of the exceptions around
29 // unknowns and nulls, so we're guaranteed that the values are the
30 // canonical internal representation of the given type.
31
32 switch val.ty {
33 case Bool:
34 if val.v.(bool) {
35 return "cty.True"
36 } else {
37 return "cty.False"
38 }
39 case Number:
40 fv := val.v.(*big.Float)
41 // We'll try to use NumberIntVal or NumberFloatVal if we can, since
42 // the fully-general initializer call is pretty ugly-looking.
43 if fv.IsInt() {
44 return fmt.Sprintf("cty.NumberIntVal(%#v)", fv)
45 }
46 if rfv, accuracy := fv.Float64(); accuracy == big.Exact {
47 return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv)
48 }
49 return fmt.Sprintf("cty.NumberVal(new(big.Float).Parse(\"%#v\", 10))", fv)
50 case String:
51 return fmt.Sprintf("cty.StringVal(%#v)", val.v)
52 }
53
54 switch {
55 case val.ty.IsSetType():
56 vals := val.v.(set.Set).Values()
57 if vals == nil || len(vals) == 0 {
58 return fmt.Sprintf("cty.SetValEmpty()")
59 } else {
60 return fmt.Sprintf("cty.SetVal(%#v)", vals)
61 }
62 case val.ty.IsCapsuleType():
63 return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v)
64 }
65
66 // Default exposes implementation details, so should actually cover
67 // all of the cases above for good caller UX.
68 return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v)
69}
70
71// Equals returns True if the receiver and the given other value have the
72// same type and are exactly equal in value.
73//
74// The usual short-circuit rules apply, so the result can be unknown or typed
75// as dynamic if either of the given values are. Use RawEquals to compare
76// if two values are equal *ignoring* the short-circuit rules.
77func (val Value) Equals(other Value) Value {
78 if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() {
79 return UnknownVal(Bool)
80 }
81
82 if !val.ty.Equals(other.ty) {
83 return BoolVal(false)
84 }
85
86 if !(val.IsKnown() && other.IsKnown()) {
87 return UnknownVal(Bool)
88 }
89
90 if val.IsNull() || other.IsNull() {
91 if val.IsNull() && other.IsNull() {
92 return BoolVal(true)
93 }
94 return BoolVal(false)
95 }
96
97 ty := val.ty
98 result := false
99
100 switch {
101 case ty == Number:
102 result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0
103 case ty == Bool:
104 result = val.v.(bool) == other.v.(bool)
105 case ty == String:
106 // Simple equality is safe because we NFC-normalize strings as they
107 // enter our world from StringVal, and so we can assume strings are
108 // always in normal form.
109 result = val.v.(string) == other.v.(string)
110 case ty.IsObjectType():
111 oty := ty.typeImpl.(typeObject)
112 result = true
113 for attr, aty := range oty.AttrTypes {
114 lhs := Value{
115 ty: aty,
116 v: val.v.(map[string]interface{})[attr],
117 }
118 rhs := Value{
119 ty: aty,
120 v: other.v.(map[string]interface{})[attr],
121 }
122 eq := lhs.Equals(rhs)
123 if !eq.IsKnown() {
124 return UnknownVal(Bool)
125 }
126 if eq.False() {
127 result = false
128 break
129 }
130 }
131 case ty.IsTupleType():
132 tty := ty.typeImpl.(typeTuple)
133 result = true
134 for i, ety := range tty.ElemTypes {
135 lhs := Value{
136 ty: ety,
137 v: val.v.([]interface{})[i],
138 }
139 rhs := Value{
140 ty: ety,
141 v: other.v.([]interface{})[i],
142 }
143 eq := lhs.Equals(rhs)
144 if !eq.IsKnown() {
145 return UnknownVal(Bool)
146 }
147 if eq.False() {
148 result = false
149 break
150 }
151 }
152 case ty.IsListType():
153 ety := ty.typeImpl.(typeList).ElementTypeT
154 if len(val.v.([]interface{})) == len(other.v.([]interface{})) {
155 result = true
156 for i := range val.v.([]interface{}) {
157 lhs := Value{
158 ty: ety,
159 v: val.v.([]interface{})[i],
160 }
161 rhs := Value{
162 ty: ety,
163 v: other.v.([]interface{})[i],
164 }
165 eq := lhs.Equals(rhs)
166 if !eq.IsKnown() {
167 return UnknownVal(Bool)
168 }
169 if eq.False() {
170 result = false
171 break
172 }
173 }
174 }
175 case ty.IsSetType():
176 s1 := val.v.(set.Set)
177 s2 := other.v.(set.Set)
178 equal := true
179
180 // Note that by our definition of sets it's never possible for two
181 // sets that contain unknown values (directly or indicrectly) to
182 // ever be equal, even if they are otherwise identical.
183
184 // FIXME: iterating both lists and checking each item is not the
185 // ideal implementation here, but it works with the primitives we
186 // have in the set implementation. Perhaps the set implementation
187 // can provide its own equality test later.
188 s1.EachValue(func(v interface{}) {
189 if !s2.Has(v) {
190 equal = false
191 }
192 })
193 s2.EachValue(func(v interface{}) {
194 if !s1.Has(v) {
195 equal = false
196 }
197 })
198
199 result = equal
200 case ty.IsMapType():
201 ety := ty.typeImpl.(typeMap).ElementTypeT
202 if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) {
203 result = true
204 for k := range val.v.(map[string]interface{}) {
205 if _, ok := other.v.(map[string]interface{})[k]; !ok {
206 result = false
207 break
208 }
209 lhs := Value{
210 ty: ety,
211 v: val.v.(map[string]interface{})[k],
212 }
213 rhs := Value{
214 ty: ety,
215 v: other.v.(map[string]interface{})[k],
216 }
217 eq := lhs.Equals(rhs)
218 if !eq.IsKnown() {
219 return UnknownVal(Bool)
220 }
221 if eq.False() {
222 result = false
223 break
224 }
225 }
226 }
227 case ty.IsCapsuleType():
228 // A capsule type's encapsulated value is a pointer to a value of its
229 // native type, so we can just compare these to get the identity test
230 // we need.
231 return BoolVal(val.v == other.v)
232
233 default:
234 // should never happen
235 panic(fmt.Errorf("unsupported value type %#v in Equals", ty))
236 }
237
238 return BoolVal(result)
239}
240
241// NotEqual is a shorthand for Equals followed by Not.
242func (val Value) NotEqual(other Value) Value {
243 return val.Equals(other).Not()
244}
245
246// True returns true if the receiver is True, false if False, and panics if
247// the receiver is not of type Bool.
248//
249// This is a helper function to help write application logic that works with
250// values, rather than a first-class operation. It does not work with unknown
251// or null values. For more robust handling with unknown value
252// short-circuiting, use val.Equals(cty.True).
253func (val Value) True() bool {
254 if val.ty != Bool {
255 panic("not bool")
256 }
257 return val.Equals(True).v.(bool)
258}
259
260// False is the opposite of True.
261func (val Value) False() bool {
262 return !val.True()
263}
264
265// RawEquals returns true if and only if the two given values have the same
266// type and equal value, ignoring the usual short-circuit rules about
267// unknowns and dynamic types.
268//
269// This method is more appropriate for testing than for real use, since it
270// skips over usual semantics around unknowns but as a consequence allows
271// testing the result of another operation that is expected to return unknown.
272// It returns a primitive Go bool rather than a Value to remind us that it
273// is not a first-class value operation.
274func (val Value) RawEquals(other Value) bool {
275 if !val.ty.Equals(other.ty) {
276 return false
277 }
278 if (!val.IsKnown()) && (!other.IsKnown()) {
279 return true
280 }
281 if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) {
282 return false
283 }
284 if val.IsNull() && other.IsNull() {
285 return true
286 }
287 if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) {
288 return false
289 }
290 if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType {
291 return true
292 }
293
294 ty := val.ty
295 switch {
296 case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType:
297 return val.Equals(other).True()
298 case ty.IsObjectType():
299 oty := ty.typeImpl.(typeObject)
300 for attr, aty := range oty.AttrTypes {
301 lhs := Value{
302 ty: aty,
303 v: val.v.(map[string]interface{})[attr],
304 }
305 rhs := Value{
306 ty: aty,
307 v: other.v.(map[string]interface{})[attr],
308 }
309 eq := lhs.RawEquals(rhs)
310 if !eq {
311 return false
312 }
313 }
314 return true
315 case ty.IsTupleType():
316 tty := ty.typeImpl.(typeTuple)
317 for i, ety := range tty.ElemTypes {
318 lhs := Value{
319 ty: ety,
320 v: val.v.([]interface{})[i],
321 }
322 rhs := Value{
323 ty: ety,
324 v: other.v.([]interface{})[i],
325 }
326 eq := lhs.RawEquals(rhs)
327 if !eq {
328 return false
329 }
330 }
331 return true
332 case ty.IsListType():
333 ety := ty.typeImpl.(typeList).ElementTypeT
334 if len(val.v.([]interface{})) == len(other.v.([]interface{})) {
335 for i := range val.v.([]interface{}) {
336 lhs := Value{
337 ty: ety,
338 v: val.v.([]interface{})[i],
339 }
340 rhs := Value{
341 ty: ety,
342 v: other.v.([]interface{})[i],
343 }
344 eq := lhs.RawEquals(rhs)
345 if !eq {
346 return false
347 }
348 }
349 return true
350 }
351 return false
352 case ty.IsSetType():
353 s1 := val.v.(set.Set)
354 s2 := other.v.(set.Set)
355
356 // Since we're intentionally ignoring our rule that two unknowns
357 // are never equal, we can cheat here.
358 // (This isn't 100% right since e.g. it will fail if the set contains
359 // numbers that are infinite, which DeepEqual can't compare properly.
360 // We're accepting that limitation for simplicity here, since this
361 // function is here primarily for testing.)
362 return reflect.DeepEqual(s1, s2)
363
364 case ty.IsMapType():
365 ety := ty.typeImpl.(typeMap).ElementTypeT
366 if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) {
367 for k := range val.v.(map[string]interface{}) {
368 if _, ok := other.v.(map[string]interface{})[k]; !ok {
369 return false
370 }
371 lhs := Value{
372 ty: ety,
373 v: val.v.(map[string]interface{})[k],
374 }
375 rhs := Value{
376 ty: ety,
377 v: other.v.(map[string]interface{})[k],
378 }
379 eq := lhs.RawEquals(rhs)
380 if !eq {
381 return false
382 }
383 }
384 return true
385 }
386 return false
387 case ty.IsCapsuleType():
388 // A capsule type's encapsulated value is a pointer to a value of its
389 // native type, so we can just compare these to get the identity test
390 // we need.
391 return val.v == other.v
392
393 default:
394 // should never happen
395 panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty))
396 }
397}
398
399// Add returns the sum of the receiver and the given other value. Both values
400// must be numbers; this method will panic if not.
401func (val Value) Add(other Value) Value {
402 if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
403 shortCircuit = forceShortCircuitType(shortCircuit, Number)
404 return *shortCircuit
405 }
406
407 ret := new(big.Float)
408 ret.Add(val.v.(*big.Float), other.v.(*big.Float))
409 return NumberVal(ret)
410}
411
412// Subtract returns receiver minus the given other value. Both values must be
413// numbers; this method will panic if not.
414func (val Value) Subtract(other Value) Value {
415 if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
416 shortCircuit = forceShortCircuitType(shortCircuit, Number)
417 return *shortCircuit
418 }
419
420 return val.Add(other.Negate())
421}
422
423// Negate returns the numeric negative of the receiver, which must be a number.
424// This method will panic when given a value of any other type.
425func (val Value) Negate() Value {
426 if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil {
427 shortCircuit = forceShortCircuitType(shortCircuit, Number)
428 return *shortCircuit
429 }
430
431 ret := new(big.Float).Neg(val.v.(*big.Float))
432 return NumberVal(ret)
433}
434
435// Multiply returns the product of the receiver and the given other value.
436// Both values must be numbers; this method will panic if not.
437func (val Value) Multiply(other Value) Value {
438 if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
439 shortCircuit = forceShortCircuitType(shortCircuit, Number)
440 return *shortCircuit
441 }
442
443 ret := new(big.Float)
444 ret.Mul(val.v.(*big.Float), other.v.(*big.Float))
445 return NumberVal(ret)
446}
447
448// Divide returns the quotient of the receiver and the given other value.
449// Both values must be numbers; this method will panic if not.
450//
451// If the "other" value is exactly zero, this operation will return either
452// PositiveInfinity or NegativeInfinity, depending on the sign of the
453// receiver value. For some use-cases the presence of infinities may be
454// undesirable, in which case the caller should check whether the
455// other value equals zero before calling and raise an error instead.
456//
457// If both values are zero or infinity, this function will panic with
458// an instance of big.ErrNaN.
459func (val Value) Divide(other Value) Value {
460 if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
461 shortCircuit = forceShortCircuitType(shortCircuit, Number)
462 return *shortCircuit
463 }
464
465 ret := new(big.Float)
466 ret.Quo(val.v.(*big.Float), other.v.(*big.Float))
467 return NumberVal(ret)
468}
469
470// Modulo returns the remainder of an integer division of the receiver and
471// the given other value. Both values must be numbers; this method will panic
472// if not.
473//
474// If the "other" value is exactly zero, this operation will return either
475// PositiveInfinity or NegativeInfinity, depending on the sign of the
476// receiver value. For some use-cases the presence of infinities may be
477// undesirable, in which case the caller should check whether the
478// other value equals zero before calling and raise an error instead.
479//
480// This operation is primarily here for use with nonzero natural numbers.
481// Modulo with "other" as a non-natural number gets somewhat philosophical,
482// and this function takes a position on what that should mean, but callers
483// may wish to disallow such things outright or implement their own modulo
484// if they disagree with the interpretation used here.
485func (val Value) Modulo(other Value) Value {
486 if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
487 shortCircuit = forceShortCircuitType(shortCircuit, Number)
488 return *shortCircuit
489 }
490
491 // We cheat a bit here with infinities, just abusing the Multiply operation
492 // to get an infinite result of the correct sign.
493 if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity {
494 return val.Multiply(other)
495 }
496
497 if other.RawEquals(Zero) {
498 return val
499 }
500
501 // FIXME: This is a bit clumsy. Should come back later and see if there's a
502 // more straightforward way to do this.
503 rat := val.Divide(other)
504 ratFloorInt := &big.Int{}
505 rat.v.(*big.Float).Int(ratFloorInt)
506 work := (&big.Float{}).SetInt(ratFloorInt)
507 work.Mul(other.v.(*big.Float), work)
508 work.Sub(val.v.(*big.Float), work)
509
510 return NumberVal(work)
511}
512
513// Absolute returns the absolute (signless) value of the receiver, which must
514// be a number or this method will panic.
515func (val Value) Absolute() Value {
516 if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil {
517 shortCircuit = forceShortCircuitType(shortCircuit, Number)
518 return *shortCircuit
519 }
520
521 ret := (&big.Float{}).Abs(val.v.(*big.Float))
522 return NumberVal(ret)
523}
524
525// GetAttr returns the value of the given attribute of the receiver, which
526// must be of an object type that has an attribute of the given name.
527// This method will panic if the receiver type is not compatible.
528//
529// The method will also panic if the given attribute name is not defined
530// for the value's type. Use the attribute-related methods on Type to
531// check for the validity of an attribute before trying to use it.
532//
533// This method may be called on a value whose type is DynamicPseudoType,
534// in which case the result will also be DynamicVal.
535func (val Value) GetAttr(name string) Value {
536 if val.ty == DynamicPseudoType {
537 return DynamicVal
538 }
539
540 if !val.ty.IsObjectType() {
541 panic("value is not an object")
542 }
543
544 name = NormalizeString(name)
545 if !val.ty.HasAttribute(name) {
546 panic("value has no attribute of that name")
547 }
548
549 attrType := val.ty.AttributeType(name)
550
551 if !val.IsKnown() {
552 return UnknownVal(attrType)
553 }
554
555 return Value{
556 ty: attrType,
557 v: val.v.(map[string]interface{})[name],
558 }
559}
560
561// Index returns the value of an element of the receiver, which must have
562// either a list, map or tuple type. This method will panic if the receiver
563// type is not compatible.
564//
565// The key value must be the correct type for the receving collection: a
566// number if the collection is a list or tuple, or a string if it is a map.
567// In the case of a list or tuple, the given number must be convertable to int
568// or this method will panic. The key may alternatively be of
569// DynamicPseudoType, in which case the result itself is an unknown of the
570// collection's element type.
571//
572// The result is of the receiver collection's element type, or in the case
573// of a tuple the type of the specific element index requested.
574//
575// This method may be called on a value whose type is DynamicPseudoType,
576// in which case the result will also be the DynamicValue.
577func (val Value) Index(key Value) Value {
578 if val.ty == DynamicPseudoType {
579 return DynamicVal
580 }
581
582 switch {
583 case val.Type().IsListType():
584 elty := val.Type().ElementType()
585 if key.Type() == DynamicPseudoType {
586 return UnknownVal(elty)
587 }
588
589 if key.Type() != Number {
590 panic("element key for list must be number")
591 }
592 if !key.IsKnown() {
593 return UnknownVal(elty)
594 }
595
596 if !val.IsKnown() {
597 return UnknownVal(elty)
598 }
599
600 index, accuracy := key.v.(*big.Float).Int64()
601 if accuracy != big.Exact || index < 0 {
602 panic("element key for list must be non-negative integer")
603 }
604
605 return Value{
606 ty: elty,
607 v: val.v.([]interface{})[index],
608 }
609 case val.Type().IsMapType():
610 elty := val.Type().ElementType()
611 if key.Type() == DynamicPseudoType {
612 return UnknownVal(elty)
613 }
614
615 if key.Type() != String {
616 panic("element key for map must be string")
617 }
618 if !key.IsKnown() {
619 return UnknownVal(elty)
620 }
621
622 if !val.IsKnown() {
623 return UnknownVal(elty)
624 }
625
626 keyStr := key.v.(string)
627
628 return Value{
629 ty: elty,
630 v: val.v.(map[string]interface{})[keyStr],
631 }
632 case val.Type().IsTupleType():
633 if key.Type() == DynamicPseudoType {
634 return DynamicVal
635 }
636
637 if key.Type() != Number {
638 panic("element key for tuple must be number")
639 }
640 if !key.IsKnown() {
641 return DynamicVal
642 }
643
644 index, accuracy := key.v.(*big.Float).Int64()
645 if accuracy != big.Exact || index < 0 {
646 panic("element key for list must be non-negative integer")
647 }
648
649 eltys := val.Type().TupleElementTypes()
650
651 if !val.IsKnown() {
652 return UnknownVal(eltys[index])
653 }
654
655 return Value{
656 ty: eltys[index],
657 v: val.v.([]interface{})[index],
658 }
659 default:
660 panic("not a list, map, or tuple type")
661 }
662}
663
664// HasIndex returns True if the receiver (which must be supported for Index)
665// has an element with the given index key, or False if it does not.
666//
667// The result will be UnknownVal(Bool) if either the collection or the
668// key value are unknown.
669//
670// This method will panic if the receiver is not indexable, but does not
671// impose any panic-causing type constraints on the key.
672func (val Value) HasIndex(key Value) Value {
673 if val.ty == DynamicPseudoType {
674 return UnknownVal(Bool)
675 }
676
677 switch {
678 case val.Type().IsListType():
679 if key.Type() == DynamicPseudoType {
680 return UnknownVal(Bool)
681 }
682
683 if key.Type() != Number {
684 return False
685 }
686 if !key.IsKnown() {
687 return UnknownVal(Bool)
688 }
689 if !val.IsKnown() {
690 return UnknownVal(Bool)
691 }
692
693 index, accuracy := key.v.(*big.Float).Int64()
694 if accuracy != big.Exact || index < 0 {
695 return False
696 }
697
698 return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0)
699 case val.Type().IsMapType():
700 if key.Type() == DynamicPseudoType {
701 return UnknownVal(Bool)
702 }
703
704 if key.Type() != String {
705 return False
706 }
707 if !key.IsKnown() {
708 return UnknownVal(Bool)
709 }
710 if !val.IsKnown() {
711 return UnknownVal(Bool)
712 }
713
714 keyStr := key.v.(string)
715 _, exists := val.v.(map[string]interface{})[keyStr]
716
717 return BoolVal(exists)
718 case val.Type().IsTupleType():
719 if key.Type() == DynamicPseudoType {
720 return UnknownVal(Bool)
721 }
722
723 if key.Type() != Number {
724 return False
725 }
726 if !key.IsKnown() {
727 return UnknownVal(Bool)
728 }
729
730 index, accuracy := key.v.(*big.Float).Int64()
731 if accuracy != big.Exact || index < 0 {
732 return False
733 }
734
735 length := val.Type().Length()
736 return BoolVal(int(index) < length && index >= 0)
737 default:
738 panic("not a list, map, or tuple type")
739 }
740}
741
742// HasElement returns True if the receiver (which must be of a set type)
743// has the given value as an element, or False if it does not.
744//
745// The result will be UnknownVal(Bool) if either the set or the
746// given value are unknown.
747//
748// This method will panic if the receiver is not a set, or if it is a null set.
749func (val Value) HasElement(elem Value) Value {
750 ty := val.Type()
751
752 if !ty.IsSetType() {
753 panic("not a set type")
754 }
755 if !val.IsKnown() || !elem.IsKnown() {
756 return UnknownVal(Bool)
757 }
758 if val.IsNull() {
759 panic("can't call HasElement on a nil value")
760 }
761 if ty.ElementType() != elem.Type() {
762 return False
763 }
764
765 s := val.v.(set.Set)
766 return BoolVal(s.Has(elem.v))
767}
768
769// Length returns the length of the receiver, which must be a collection type
770// or tuple type, as a number value. If the receiver is not a compatible type
771// then this method will panic.
772//
773// If the receiver is unknown then the result is also unknown.
774//
775// If the receiver is null then this function will panic.
776//
777// Note that Length is not supported for strings. To determine the length
778// of a string, call AsString and take the length of the native Go string
779// that is returned.
780func (val Value) Length() Value {
781 if val.Type().IsTupleType() {
782 // For tuples, we can return the length even if the value is not known.
783 return NumberIntVal(int64(val.Type().Length()))
784 }
785
786 if !val.IsKnown() {
787 return UnknownVal(Number)
788 }
789
790 return NumberIntVal(int64(val.LengthInt()))
791}
792
793// LengthInt is like Length except it returns an int. It has the same behavior
794// as Length except that it will panic if the receiver is unknown.
795//
796// This is an integration method provided for the convenience of code bridging
797// into Go's type system.
798func (val Value) LengthInt() int {
799 if val.Type().IsTupleType() {
800 // For tuples, we can return the length even if the value is not known.
801 return val.Type().Length()
802 }
803 if !val.IsKnown() {
804 panic("value is not known")
805 }
806 if val.IsNull() {
807 panic("value is null")
808 }
809
810 switch {
811
812 case val.ty.IsListType():
813 return len(val.v.([]interface{}))
814
815 case val.ty.IsSetType():
816 return val.v.(set.Set).Length()
817
818 case val.ty.IsMapType():
819 return len(val.v.(map[string]interface{}))
820
821 default:
822 panic("value is not a collection")
823 }
824}
825
826// ElementIterator returns an ElementIterator for iterating the elements
827// of the receiver, which must be a collection type, a tuple type, or an object
828// type. If called on a method of any other type, this method will panic.
829//
830// The value must be Known and non-Null, or this method will panic.
831//
832// If the receiver is of a list type, the returned keys will be of type Number
833// and the values will be of the list's element type.
834//
835// If the receiver is of a map type, the returned keys will be of type String
836// and the value will be of the map's element type. Elements are passed in
837// ascending lexicographical order by key.
838//
839// If the receiver is of a set type, each element is returned as both the
840// key and the value, since set members are their own identity.
841//
842// If the receiver is of a tuple type, the returned keys will be of type Number
843// and the value will be of the corresponding element's type.
844//
845// If the receiver is of an object type, the returned keys will be of type
846// String and the value will be of the corresponding attributes's type.
847//
848// ElementIterator is an integration method, so it cannot handle Unknown
849// values. This method will panic if the receiver is Unknown.
850func (val Value) ElementIterator() ElementIterator {
851 if !val.IsKnown() {
852 panic("can't use ElementIterator on unknown value")
853 }
854 if val.IsNull() {
855 panic("can't use ElementIterator on null value")
856 }
857 return elementIterator(val)
858}
859
860// CanIterateElements returns true if the receiver can support the
861// ElementIterator method (and by extension, ForEachElement) without panic.
862func (val Value) CanIterateElements() bool {
863 return canElementIterator(val)
864}
865
866// ForEachElement executes a given callback function for each element of
867// the receiver, which must be a collection type or tuple type, or this method
868// will panic.
869//
870// ForEachElement uses ElementIterator internally, and so the values passed
871// to the callback are as described for ElementIterator.
872//
873// Returns true if the iteration exited early due to the callback function
874// returning true, or false if the loop ran to completion.
875//
876// ForEachElement is an integration method, so it cannot handle Unknown
877// values. This method will panic if the receiver is Unknown.
878func (val Value) ForEachElement(cb ElementCallback) bool {
879 it := val.ElementIterator()
880 for it.Next() {
881 key, val := it.Element()
882 stop := cb(key, val)
883 if stop {
884 return true
885 }
886 }
887 return false
888}
889
890// Not returns the logical inverse of the receiver, which must be of type
891// Bool or this method will panic.
892func (val Value) Not() Value {
893 if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil {
894 shortCircuit = forceShortCircuitType(shortCircuit, Bool)
895 return *shortCircuit
896 }
897
898 return BoolVal(!val.v.(bool))
899}
900
901// And returns the result of logical AND with the receiver and the other given
902// value, which must both be of type Bool or this method will panic.
903func (val Value) And(other Value) Value {
904 if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil {
905 shortCircuit = forceShortCircuitType(shortCircuit, Bool)
906 return *shortCircuit
907 }
908
909 return BoolVal(val.v.(bool) && other.v.(bool))
910}
911
912// Or returns the result of logical OR with the receiver and the other given
913// value, which must both be of type Bool or this method will panic.
914func (val Value) Or(other Value) Value {
915 if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil {
916 shortCircuit = forceShortCircuitType(shortCircuit, Bool)
917 return *shortCircuit
918 }
919
920 return BoolVal(val.v.(bool) || other.v.(bool))
921}
922
923// LessThan returns True if the receiver is less than the other given value,
924// which must both be numbers or this method will panic.
925func (val Value) LessThan(other Value) Value {
926 if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil {
927 shortCircuit = forceShortCircuitType(shortCircuit, Bool)
928 return *shortCircuit
929 }
930
931 return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0)
932}
933
934// GreaterThan returns True if the receiver is greater than the other given
935// value, which must both be numbers or this method will panic.
936func (val Value) GreaterThan(other Value) Value {
937 if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil {
938 shortCircuit = forceShortCircuitType(shortCircuit, Bool)
939 return *shortCircuit
940 }
941
942 return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0)
943}
944
945// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or.
946func (val Value) LessThanOrEqualTo(other Value) Value {
947 return val.LessThan(other).Or(val.Equals(other))
948}
949
950// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or.
951func (val Value) GreaterThanOrEqualTo(other Value) Value {
952 return val.GreaterThan(other).Or(val.Equals(other))
953}
954
955// AsString returns the native string from a non-null, non-unknown cty.String
956// value, or panics if called on any other value.
957func (val Value) AsString() string {
958 if val.ty != String {
959 panic("not a string")
960 }
961 if val.IsNull() {
962 panic("value is null")
963 }
964 if !val.IsKnown() {
965 panic("value is unknown")
966 }
967
968 return val.v.(string)
969}
970
971// AsBigFloat returns a big.Float representation of a non-null, non-unknown
972// cty.Number value, or panics if called on any other value.
973//
974// For more convenient conversions to other native numeric types, use the
975// "gocty" package.
976func (val Value) AsBigFloat() *big.Float {
977 if val.ty != Number {
978 panic("not a number")
979 }
980 if val.IsNull() {
981 panic("value is null")
982 }
983 if !val.IsKnown() {
984 panic("value is unknown")
985 }
986
987 // Copy the float so that callers can't mutate our internal state
988 ret := *(val.v.(*big.Float))
989
990 return &ret
991}
992
993// AsValueSlice returns a []cty.Value representation of a non-null, non-unknown
994// value of any type that CanIterateElements, or panics if called on
995// any other value.
996//
997// For more convenient conversions to slices of more specific types, use
998// the "gocty" package.
999func (val Value) AsValueSlice() []Value {
1000 l := val.LengthInt()
1001 if l == 0 {
1002 return nil
1003 }
1004
1005 ret := make([]Value, 0, l)
1006 for it := val.ElementIterator(); it.Next(); {
1007 _, v := it.Element()
1008 ret = append(ret, v)
1009 }
1010 return ret
1011}
1012
1013// AsValueMap returns a map[string]cty.Value representation of a non-null,
1014// non-unknown value of any type that CanIterateElements, or panics if called
1015// on any other value.
1016//
1017// For more convenient conversions to maps of more specific types, use
1018// the "gocty" package.
1019func (val Value) AsValueMap() map[string]Value {
1020 l := val.LengthInt()
1021 if l == 0 {
1022 return nil
1023 }
1024
1025 ret := make(map[string]Value, l)
1026 for it := val.ElementIterator(); it.Next(); {
1027 k, v := it.Element()
1028 ret[k.AsString()] = v
1029 }
1030 return ret
1031}
1032
1033// AsValueSet returns a ValueSet representation of a non-null,
1034// non-unknown value of any collection type, or panics if called
1035// on any other value.
1036//
1037// Unlike AsValueSlice and AsValueMap, this method requires specifically a
1038// collection type (list, set or map) and does not allow structural types
1039// (tuple or object), because the ValueSet type requires homogenous
1040// element types.
1041//
1042// The returned ValueSet can store only values of the receiver's element type.
1043func (val Value) AsValueSet() ValueSet {
1044 if !val.Type().IsCollectionType() {
1045 panic("not a collection type")
1046 }
1047
1048 // We don't give the caller our own set.Set (assuming we're a cty.Set value)
1049 // because then the caller could mutate our internals, which is forbidden.
1050 // Instead, we will construct a new set and append our elements into it.
1051 ret := NewValueSet(val.Type().ElementType())
1052 for it := val.ElementIterator(); it.Next(); {
1053 _, v := it.Element()
1054 ret.Add(v)
1055 }
1056 return ret
1057}
1058
1059// EncapsulatedValue returns the native value encapsulated in a non-null,
1060// non-unknown capsule-typed value, or panics if called on any other value.
1061//
1062// The result is the same pointer that was passed to CapsuleVal to create
1063// the value. Since cty considers values to be immutable, it is strongly
1064// recommended to treat the encapsulated value itself as immutable too.
1065func (val Value) EncapsulatedValue() interface{} {
1066 if !val.Type().IsCapsuleType() {
1067 panic("not a capsule-typed value")
1068 }
1069
1070 return val.v
1071}
diff --git a/vendor/github.com/zclconf/go-cty/cty/walk.go b/vendor/github.com/zclconf/go-cty/cty/walk.go
new file mode 100644
index 0000000..a6943ba
--- /dev/null
+++ b/vendor/github.com/zclconf/go-cty/cty/walk.go
@@ -0,0 +1,182 @@
1package cty
2
3// Walk visits all of the values in a possibly-complex structure, calling
4// a given function for each value.
5//
6// For example, given a list of strings the callback would first be called
7// with the whole list and then called once for each element of the list.
8//
9// The callback function may prevent recursive visits to child values by
10// returning false. The callback function my halt the walk altogether by
11// returning a non-nil error. If the returned error is about the element
12// currently being visited, it is recommended to use the provided path
13// value to produce a PathError describing that context.
14//
15// The path passed to the given function may not be used after that function
16// returns, since its backing array is re-used for other calls.
17func Walk(val Value, cb func(Path, Value) (bool, error)) error {
18 var path Path
19 return walk(path, val, cb)
20}
21
22func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error {
23 deeper, err := cb(path, val)
24 if err != nil {
25 return err
26 }
27 if !deeper {
28 return nil
29 }
30
31 if val.IsNull() || !val.IsKnown() {
32 // Can't recurse into null or unknown values, regardless of type
33 return nil
34 }
35
36 ty := val.Type()
37 switch {
38 case ty.IsObjectType():
39 for it := val.ElementIterator(); it.Next(); {
40 nameVal, av := it.Element()
41 path := append(path, GetAttrStep{
42 Name: nameVal.AsString(),
43 })
44 err := walk(path, av, cb)
45 if err != nil {
46 return err
47 }
48 }
49 case val.CanIterateElements():
50 for it := val.ElementIterator(); it.Next(); {
51 kv, ev := it.Element()
52 path := append(path, IndexStep{
53 Key: kv,
54 })
55 err := walk(path, ev, cb)
56 if err != nil {
57 return err
58 }
59 }
60 }
61 return nil
62}
63
64// Transform visits all of the values in a possibly-complex structure,
65// calling a given function for each value which has an opportunity to
66// replace that value.
67//
68// Unlike Walk, Transform visits child nodes first, so for a list of strings
69// it would first visit the strings and then the _new_ list constructed
70// from the transformed values of the list items.
71//
72// This is useful for creating the effect of being able to make deep mutations
73// to a value even though values are immutable. However, it's the responsibility
74// of the given function to preserve expected invariants, such as homogenity of
75// element types in collections; this function can panic if such invariants
76// are violated, just as if new values were constructed directly using the
77// value constructor functions. An easy way to preserve invariants is to
78// ensure that the transform function never changes the value type.
79//
80// The callback function my halt the walk altogether by
81// returning a non-nil error. If the returned error is about the element
82// currently being visited, it is recommended to use the provided path
83// value to produce a PathError describing that context.
84//
85// The path passed to the given function may not be used after that function
86// returns, since its backing array is re-used for other calls.
87func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) {
88 var path Path
89 return transform(path, val, cb)
90}
91
92func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) {
93 ty := val.Type()
94 var newVal Value
95
96 switch {
97
98 case val.IsNull() || !val.IsKnown():
99 // Can't recurse into null or unknown values, regardless of type
100 newVal = val
101
102 case ty.IsListType() || ty.IsSetType() || ty.IsTupleType():
103 l := val.LengthInt()
104 switch l {
105 case 0:
106 // No deep transform for an empty sequence
107 newVal = val
108 default:
109 elems := make([]Value, 0, l)
110 for it := val.ElementIterator(); it.Next(); {
111 kv, ev := it.Element()
112 path := append(path, IndexStep{
113 Key: kv,
114 })
115 newEv, err := transform(path, ev, cb)
116 if err != nil {
117 return DynamicVal, err
118 }
119 elems = append(elems, newEv)
120 }
121 switch {
122 case ty.IsListType():
123 newVal = ListVal(elems)
124 case ty.IsSetType():
125 newVal = SetVal(elems)
126 case ty.IsTupleType():
127 newVal = TupleVal(elems)
128 default:
129 panic("unknown sequence type") // should never happen because of the case we are in
130 }
131 }
132
133 case ty.IsMapType():
134 l := val.LengthInt()
135 switch l {
136 case 0:
137 // No deep transform for an empty map
138 newVal = val
139 default:
140 elems := make(map[string]Value)
141 for it := val.ElementIterator(); it.Next(); {
142 kv, ev := it.Element()
143 path := append(path, IndexStep{
144 Key: kv,
145 })
146 newEv, err := transform(path, ev, cb)
147 if err != nil {
148 return DynamicVal, err
149 }
150 elems[kv.AsString()] = newEv
151 }
152 newVal = MapVal(elems)
153 }
154
155 case ty.IsObjectType():
156 switch {
157 case ty.Equals(EmptyObject):
158 // No deep transform for an empty object
159 newVal = val
160 default:
161 atys := ty.AttributeTypes()
162 newAVs := make(map[string]Value)
163 for name := range atys {
164 av := val.GetAttr(name)
165 path := append(path, GetAttrStep{
166 Name: name,
167 })
168 newAV, err := transform(path, av, cb)
169 if err != nil {
170 return DynamicVal, err
171 }
172 newAVs[name] = newAV
173 }
174 newVal = ObjectVal(newAVs)
175 }
176
177 default:
178 newVal = val
179 }
180
181 return cb(path, newVal)
182}